query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
takes clean str from self.text and creats dict of word length freq.
def makeWordLengths(self): clean_s = self.cleanString(self.text) LoW = clean_s.split() for x in LoW: if len(x) not in self.wordlengths: self.wordlengths[len(x)] = 1 else: self.wordlengths[len(x)] += 1 return self.wordlengths
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wordFreq(parseThis):\n \n freq = {}\n nono = ('\"', \"'\", '%', '$', '!', '.', '?', '-', ','\n , '\\n', '\\t', '\\r', ':', ';')\n\n for c in nono:\n parseThis = parseThis.replace(c, \" \")\n \n words = parseThis.split()\n \n for word in words:\n temp = word.lower()\n freq[temp] = freq.get(temp, 0) + 1\n\n return freq", "def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq", "def build_frequency_dict(text: bytes) -> Dict[int, int]:\n freq_dic = {}\n for elem in text:\n if elem not in freq_dic:\n freq_dic[elem] = 1\n else:\n freq_dic[elem] += 1\n return freq_dic", "def make_frequency_dict(self, text):\n\t\t\tfrequency = {}\n\t\t\t#tomamos los numeros como caracteres entonces el diccionario solo tendra un rango (0,9) las ',' y '\\n'\n\t\t\tfor character in text:#O(len(row)*columns) \n\t\t\t\tif not character in frequency:#como frequency es un diccionario es de O(1)\n\t\t\t\t\tfrequency[character] = 0\n\t\t\t\tfrequency[character] += 1\n\t\t\t\n\t\t\treturn frequency", "def create_freq_dict(corpus, doc_info):\n for idx, content in enumerate(corpus):\n word_freq_table = {}\n splitted_sentence = content.split()\n for word in splitted_sentence:\n word = word.lower()\n if word not in word_freq_table:\n word_freq_table[word] = 1\n else:\n word_freq_table[word] += 1\n doc_info[idx]['freq_dict'] = word_freq_table", "def word_frequencies(word_list: TextIO) -> dict:\n words = word_list.read().split(' ')\n amount_of_words = len(set(words))\n frequencies = {}\n for index, word in enumerate(words):\n clean_word = remove_punctuation(word)\n if clean_word not in frequencies:\n frequencies[clean_word] = (index + 1) / amount_of_words\n del frequencies[\"\"]\n return frequencies", "def make_freq_dict(text):\n freq_dict = {}\n for i in text:\n if i not in freq_dict:\n freq_dict[i] = 1\n else:\n freq_dict[i] += 1\n return freq_dict", "def _count_word_frequency(self, data):\n _dict = {}\n for _docs in data:\n for _word in _docs:\n if _word in _dict:\n _dict[_word] += 1\n else:\n _dict[_word] = 1\n return _dict", "def word_frequency(a_string):\n\n for char in \"\"\".$#,:\"'?!)(\"\"\":\n a_string = a_string.replace(char, \"\")\n for char in \"\"\"-\"\"\":\n a_string = a_string.replace(char, \" \")\n\n cleanstring = a_string.lower()\n a_list = cleanstring.split()\n a_dict = {}\n for item in a_list:\n if item in a_dict:\n a_dict[item]+= 1\n else:\n a_dict[item] = 1\n return a_dict", "def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END", "def frequency(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n list_of_words = []\n for i in root.iter(root_tag + 'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n for word in i.text.split():\n alphanumeric_filter = filter(str.isalnum, word)\n alphanumeric_string = \"\".join(alphanumeric_filter)\n list_of_words.append(alphanumeric_string)\n dict_of_frequency = collections.Counter(list_of_words)\n return dict_of_frequency", "def text2wordfreq(string, lowercase=False):\r\n\r\n\r\n from collections import Counter\r\n lst = Counter(tokenize(string, lowercase)).most_common()\r\n\r\n dictLst = dict(lst)\r\n\r\n return dictLst", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def word_count(self, document):\n start_time = time.time()\n dictionary = dict()\n counter = defaultdict(int)\n for line in document.splitlines():\n for word in line.split():\n if word not in PUNCTUATION_MARK:\n counter[word] += 1\n for word, cnt in sorted(counter.items(), key=lambda x: (-x[1], x[0])):\n dictionary[word] = cnt\n self.log.info(\"Duration count dictionary: {duration}\".format(duration=float(time.time() - start_time)))\n return dictionary", "def word_frequency_table(self, text_string):\r\n stopWords = set(stopwords.words(\"english\"))\r\n words = word_tokenize(text_string)\r\n ps = PorterStemmer()\r\n\r\n freqTable = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n if word in stopWords:\r\n continue\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n\r\n return freqTable", "def parse_text(self, text, wordcount_dictionary=None):\n if not wordcount_dictionary:\n wordcount_dictionary = {}\n words = self.parse_regexp.findall(text)\n for word in words:\n new_word = stem(word.lower())\n if new_word not in self.stopwords:\n if new_word in wordcount_dictionary:\n wordcount_dictionary[new_word] += 1\n else:\n wordcount_dictionary[new_word] = 1\n return wordcount_dictionary", "def get_wordcount(text):\r\n\r\n characters = len(text)\r\n chars_no_spaces = sum([not x.isspace() for x in text])\r\n asian_chars = sum([is_asian(x) for x in text])\r\n non_asian_words = nonj_len(text)\r\n words = non_asian_words + asian_chars\r\n \r\n return dict(characters=characters,\r\n chars_no_spaces=chars_no_spaces,\r\n asian_chars=asian_chars,\r\n non_asian_words=non_asian_words,\r\n words=words)", "def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq", "def word_lengths(sentence):\n\n word_count_dict = {}\n sentence = sentence.split()\n\n for word in sentence:\n length = len(word)\n if length not in word_count_dict:\n word_count_dict[length] = {word}\n else:\n set = word_count_dict[length]\n set.add(word)\n\n return word_count_dict", "def make_freq_dict(word_list):\n\n\tfreq_dict = {}\n\n\tfor word in word_list: #need to slice each tale into a list of words for this to work\n\t\tif word in freq_dict:\n\t\t\tcurrent_val = freq_dict.get(word)\n\t\t\tval = current_val + 1\n\t\t\tfreq_dict[word] = val #made a dictionary of the string (word, frequnecy)\n\t\telse: #if it isn't in the dictionary\n\t\t\tfreq_dict[word] = 1\n\treturn freq_dict", "def count_words(self, contents):\n wordCounts = {}\n for i in self.ngramCounts:\n if i == 0: # want the default to be the size of the corpus\n total = 0\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for word in words:\n if word:\n total += 1\n wordCounts[i] = defaultdict(lambda: total)\n continue\n else:\n counts = defaultdict(lambda: 0)\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for k, word in enumerate(words): \n if k < (i-1) or not word:\n continue\n key = \"\"\n for j in range(k-i+1, k+1):\n key += words[j] + \" \"\n counts[key.strip()] += 1\n wordCounts[i] = counts\n return wordCounts", "def get_word_freq_dict(df_col):\n results = Counter()\n df_col.str.lower().str.split().apply(results.update)\n results = sorted(results.items(), key=lambda item: item[1], reverse=True)\n d = {}\n for word, freq in results:\n d[word] = freq\n return d", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def words(text):\n clean = TextBlob(clean(text))\n sentence_count = len(clean.sentences)\n words = clean.tokenize()\n word_count = len(words)\n avg_len = np.mean([len(word) for word in words])\n words_dict = {'sentence_count': sentence_count, 'word_count': word_count,\n 'avg_len': avg_len}\n return words_dict", "def mapWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n return token_map", "def word_frequency_dict(tokens):\n\n\tfdist = FreqDist(tokens) \t\t\t\t\t\t# fdist.keys() fdist.values()\n\treturn dict(fdist)", "def computeWordFrequencies(self, tokens: ['token'], frequencies: {'token': int}):\n # project2: update this method to take existing dict as parameter and modify it\n # additionally, stopwords are not inserted in the dict;\n # words shorter than 3 character or contains all digits are ignored\n for token in tokens:\n # if the key is not in dict, dict.setdefault method initiates the value at 0\n # if token not in stopwords and len(token) >= 3 and not token.isdigit():\n frequencies[token] = frequencies.setdefault(token, 0) + 1", "def dictify(words):\n word_freq = {}\n for word in words:\n if word:\n key = word.lower()\n if key in word_freq:\n word_freq[key] += 1\n else:\n word_freq[key] = 1\n else:\n pass\n return word_freq", "def create_word_score(self):\n word_freq = {}\n word_deg = {}\n for zettel in self.lemma_tokens:\n for word in zettel:\n word_list = re.split(\" \", word[0])\n word_list_deg = len(word_list) - 1\n for new_word in word_list:\n word_freq.setdefault(new_word, 0)\n word_freq[new_word] = word_freq[new_word] + 1\n word_deg.setdefault(new_word, 0)\n word_deg[new_word] = word_deg[new_word] + word_list_deg\n word_score = {}\n for word in word_freq:\n word_deg[word] = word_deg[word] + word_freq[word]\n word_score.setdefault(word, 0)\n word_score[word] = word_deg[word] / (word_freq[word] * 1.0)\n return word_score", "def calc_weighted_frequency(words,ps,lem,stopWords,text_string):\r\n \r\n\r\n word_frequencies = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n word = lem.lemmatize(word)\r\n print(word)\r\n if word not in stopWords:\r\n if word not in word_frequencies:\r\n word_frequencies[word] = 1\r\n else:\r\n word_frequencies[word] += 1\r\n \r\n maximum_frequncy = max(word_frequencies.values())\r\n for word in word_frequencies.keys():\r\n word_frequencies[word] = (word_frequencies[word]/maximum_frequncy) \r\n print(word_frequencies)\r\n return word_frequencies", "def word_freq(word, ngram_dict):\n word = word.lower()\n return ngram_dict[word] if word in ngram_dict else 0", "def make_worddict(self):\n\t\tprint(\"Making word dictionary\")\n\t\tword_to_freq = self.make_word_to_freq()\n\t\twords = list(word_to_freq.keys())\n\t\twords.sort() # sort alphabetically first to avoid non-deterministic ordering of words with the same frequency\n\t\twords.sort(key = lambda x:word_to_freq[x], reverse = True)\n\n\t\tfor word in words[:self.FREQCAP-len(self.worddict)]:\n\t\t\tself.worddict[word] = len(self.worddict)\n\t\t\n\t\tprint(\"Word dictionary size:\", len(self.worddict))", "def prep_dict(word):\n counts = {}\n for l in word.lower():\n if l!=\" \":\n counts[l] = counts.get(l,0) + 1\n return counts", "def word_freq(self, word_list):\n hist = {}\n for word in word_list:\n hist[word] = hist.get(word, 0) + 1\n return hist", "def frequency(w: str) -> float:\n return frequency_list.get(remove_punctuation(w), 0)", "def words(phrase):\n\twordlist = phrase.split()\n\tunique_wordlist = []\n\tword_freq = []\n\n \n\twhile wordlist:\n\t\tword_freq.append(wordlist.count(wordlist[0])) #count the instances of a word and add it to the frequencies list\n\t\tunique_wordlist.append(wordlist[0]) #add the word into a unique words list\n\t\twordlist = list(filter((wordlist[0]).__ne__, wordlist)) #remove all other similar words from the wordlist\n\n\n\tn = len(word_freq)\n\toutput = {}\n\n\tfor i in range(n):\n\t\tif unique_wordlist[i].isdigit(): #convert sting digits into int\n\t\t\tunique_wordlist[i] = int(unique_wordlist[i])\n\t\toutput[unique_wordlist[i]] = word_freq[i] #add the unique words with their corresponding frequencies into the output dict\n\t\n\treturn output", "def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic", "def word_count(input_file, word_freq=None):\n if word_freq is None:\n word_freq = collections.defaultdict(int)\n\n for l in input_file:\n for w in l.strip().split():\n word_freq[w] += 1\n\n return word_freq", "def analyze_word(s):\n\n a = {}\n a['word'] = s\n a['n_letters'] = len(s)\n a['n_vowels'] = count_vowels(s)\n \n return a", "def freq(word, document):\n return document.split(None).count(word)", "def get_word_frequencies(topic_description):\n frequencies = {w:f for w,f in topic_description}\n return frequencies", "def word_count(self):\n\n # split words on default word boundaries for words list\n words = self.phrase.split() \n\n # translate removes punctuation only, normalizes to lower case\n normalized_words = [self.normalize_word(w) for w in words]\n\n # removes empty strings after stripping punctuation\n filtered_words = [w for w in normalized_words if w]\n\n # sets up default dictionary, so all entries are 0\n word_counts = collections.defaultdict(int) #{}\n\n # define word counting function for use in reduce\n def count_word(dictionary, word):\n dictionary[word] = dictionary[word] + 1\n return dictionary\n\n # count words into dictionary from word list\n reduce(count_word, filtered_words, word_counts)\n\n return word_counts", "def word_frequency( tokenized, dic ):\n print( 'computing word frequencies' )\n start = time.time()\n for i, text in enumerate( tokenized ):\n for token in text:\n if token not in dic:\n dic[ token ] = 1\n else:\n dic[ token ] += 1\n if i % 10000 == 0:\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)", "def word_count(input_str):\n counts = dict()\n words = input_str.split()\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts", "def word_frequency(words):\n freq = {}\n for w in words:\n cur_word = w.lower().strip(punctuation)\n freq[cur_word] = freq.get(cur_word, 0) + 1\n return freq", "def _word_counter(input_string: str) -> Dict[str, int]:\n # @todo Create a data type that can counts keys as they are added\n _current_word = ''\n parsed_words = {}\n\n for character in input_string.lower():\n if character in MaximalTextAnalyzer._letters:\n _current_word += character\n elif len(_current_word):\n parsed_words = MaximalTextAnalyzer._insert_into_dict(\n words_dict=parsed_words, key=_current_word)\n\n _current_word = ''\n\n # What if it does not end with a separator?\n if _current_word:\n parsed_words = MaximalTextAnalyzer._insert_into_dict(\n words_dict=parsed_words, key=_current_word)\n\n return parsed_words", "def getFrequencies(tweets):\n total_words = 0\n word_freq = {}\n for tweet in tweets:\n twext = tweet['clean_text']\n for word in twext.split(' '):\n word = word.strip()\n if word:\n total_words += 1\n if word not in word_freq:\n word_freq[word] = float(1)\n else:\n word_freq[word] += 1\n for key in word_freq:\n word_freq[key] = word_freq[key]/total_words\n return word_freq", "def find_frequency(text, n=1):\n freqs = {}\n length = len(text)\n for i in xrange(0, length):\n upper = i+n\n if upper > length:\n break\n gram = text[i:upper]\n dict_operate(freqs, gram, 1, operator.add)\n return freqs", "def calculate_frequencies(cipher_text: str) -> dict:\n cipher_frequencies = dict()\n for character in cipher_text:\n try:\n cipher_frequencies[character] += 1\n except KeyError:\n cipher_frequencies[character] = 1\n \n return cipher_frequencies", "def word_frequency(seq):\n\n # Initializes an emtpy hash map from HashMap class\n hash_map = HashMap()\n\n # For each word (not unique) in sequence\n for word in seq:\n\n # if that word is already in hash map\n if word in hash_map:\n\n # Increment value for that word\n hash_map[word] += 1\n\n # if word not yet in hash map\n else:\n\n # set count value for word equal to one\n hash_map[word] = 1\n\n # return filled hash map from sequence, words and words counts\n return hash_map", "def _compute_frequencies( word_sent):\n\t\tfreq = defaultdict(int)\n\t\tfor s in word_sent:\n\t\t\tfor word in s:\n\t\t\t\tif word not in _stopwords:\n\t\t\t\t\tfreq[word] += 1\n\t\t\t\t# frequencies normalization and fitering\n\t\treturn freq", "def clean(doc, word_count={}):\n doc = doc.lower()\n tokens = wt(doc)\n\n filterWord = []\n for w in tokens:\n if w not in dots and w not in stopWord:\n if w in slangs:\n w = slangs[w]\n filterWord.append(w)\n\n sents = \" \".join(filterWord)\n filterWord = re.findall('\\w+', sents)\n\n ps = PorterStemmer()\n\n for w in filterWord:\n fword = ps.stem(w)\n\n word_count[fword] = word_count.get(fword, 1.0)\n word_count[fword] += 1\n\n return word_count", "def convert_word_to_count(counter={}, doc=[]):\n for sentence in doc:\n for word in sentence.split():\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n return counter", "def letter_freq(txt):\n frequencies = {}\n txt_lower = txt.lower()\n\n for i in txt_lower:\n keys = frequencies.keys()\n if i in keys:\n frequencies[i] += 1\n else:\n frequencies[i] = 1\n return frequencies", "def preprocessing():\n english_dictionary = nltk.corpus.brown.words()\n slang_vocab = pickle.load(open('vocab_pattern_match_with_freq.pkl', 'rb'))\n\n normalize_english_dict = len(english_dictionary)\n normalize_slang_vocab = 0\n for w, n in slang_vocab.items():\n normalize_slang_vocab += n\n\n words = {}\n for w, n in Counter(english_dictionary).items():\n words[w] = n/normalize_english_dict\n \n for w, n in slang_vocab.items():\n if w not in words:\n words[w] = 0.\n words[w] += n/normalize_slang_vocab\n\n words_by_freq = [w for w,_ in sorted(words.items(), key=lambda x: x[1], reverse=True)]\n\n # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\n #words = open(\"words_by_frequency.txt\").read().split()\n wordcost = dict((k, log((i+1)*log(len(words_by_freq)))) for i,k in enumerate(words_by_freq))\n maxword = max(len(x) for x in words_by_freq)\n return wordcost,maxword", "def create_freq_dict(sents, lang):\n ix = 0\n freq_dict_all = []\n stop_words = set(stopwords.words(lang))\n\n for sent in sents:\n ix += 1\n freq_dict = {}\n words = word_tokenize(sent)\n\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\n temp = {\n 'doc_id': ix,\n 'freq_dict': freq_dict\n }\n\n freq_dict_all.append(temp)\n\n return freq_dict_all", "def dfc(text: str):\n #Splitting the text into a list\n wordlist = text.split()\n worddictionary = {}\n\n #Creating the wordlist dictionary\n for word in wordlist:\n if word in worddictionary:\n #Increase\n worddictionary[word] += 1\n else:\n #add to the dictionary\n worddictionary[word] = 1\n\n #Converting worddictionary into a dataframe\n df = pd.DataFrame.from_dict(worddictionary, orient='index')\n #Resetting index to a numerical one for ease of use\n df = df.reset_index()\n #Renaming the old string-valued index\n df = df.rename(columns={'index':'word'})\n #Defining two functions (over empty variables) to replace commas and dots\n remover = lambda x: x.replace(',','')\n remover2 = lambda x: x.replace('.','')\n #Using ( too many lines) to apply the functions\n df['word'] = df['word'].apply(remover)\n df['word'] = df['word'].apply(remover2)\n #Row-wise Subselection and assignment to remove words with a frequency smaller than 2\n df = df[df[0] > 2]\n #Renaming word frequncy\n df = df.rename(columns={0:'Frequency'})\n\n return df", "def word_count(s):\n # Your code here\n\n stop_char = r\"\"\":;\",.-+=/|[]{|}()*^\\&\"\"\"\n\n # Make sure special characters arent in string\n s_clean = \"\".join([x for x in s if x not in stop_char])\n\n # Lower case and remove trailing space\n word_list = s_clean.lower().split()\n\n # use cache to hold memory\n word_count = {}\n\n for x in word_list:\n\n if x not in word_count:\n # if not there, start it at 0\n word_count[x] = 0\n\n # if seen again, increase count\n word_count[x] += 1\n\n return word_count", "def unique_words(data: np.ndarray) -> dict:\n words = {}\n for doc in data:\n docwords_list = [w for w in doc.split(' ') if w != '']\n docwords_set = set(docwords_list)\n for word in docwords_list:\n try:\n words[word]['freq'] += 1\n except KeyError:\n words[word] = {'freq': 1, 'doccount': 0}\n for word in docwords_set:\n words[word]['doccount'] += 1\n return words", "def letter_freq( text ):\n\tchars = string.ascii_uppercase\n\ttext = text.upper()\n\tresult = get_letter_dict()\n\ttotal = 0\n\tfor char in chars:\n\t\tcount = text.count(char)\n\t\tresult[char] = count\n\t\ttotal += count\n\tif total != 0:\n\t\tfor char in chars:\n\t\t\tresult[char] = (result[char]*10000 / total) / float(100)\n\treturn result", "def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))", "def preprocess(text, freq=5):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n\n words_raw = text.strip().lower().split()\n word_counts = Counter(words_raw)\n words = [w for w in words_raw if word_counts[w] > freq]\n vocab = set(words)\n vocab2index = {w: idx for idx, w in enumerate(vocab)}\n index2vocab = {idx: w for idx, w in enumerate(vocab)}\n words_int = [vocab2index[w] for w in words]\n return words_int, vocab2index, index2vocab", "def __init__(self, input_string):\n self.words_to_counts = {}\n self.split_and_populate_words(input_string)", "def analyze_words(self):\n\t\t\n\t\tword_analysis = {}\n\t\tfor word in self.word_list:\n\t\t\tif word not in word_analysis:\n\t\t\t\tacademic = (word in LEMMA_DICT)\n\t\t\t\tlength = len(word)\n\t\t\t\tfrequency = len(self.word_list[word])\n\t\t\t\tstem = word\t\n\t\t\t\tword_location_index = len(self.sentence_index)-1 #first set it as the last index\n\t\t\t\t\n\t\t\t\tfor index, sentence in self.sentence_index.items():\n\t\t\t\t\tif word in sentence.split():#need to be individual words, not parts of a word\n\t\t\t\t\t\tword_location_index = index \n\t\t\t\t\t\tbreak\n\t\t\t\t\tif self.word_list[word][0] in sentence.split():#accounts for words with upper cases\n\t\t\t\t\t\tword_location_index = index\n\t\t\t\t\t\n\t\t\t\t#selection critera\n\t\t\t\tif academic:\n\t\t\t\t\tselection_criteria = 'academic word'\n\t\t\t\telif frequency > 1: \n\t\t\t\t\tselection_criteria = 'high frequency'\n\t\t\t\telse:\n\t\t\t\t\tselection_criteria = 'word length'\n\n\t\t\t\tword_analysis[word] = (academic, length, frequency, stem, word_location_index, selection_criteria)\n\t\t\n\t\tself.word_analysis = word_analysis\n\t\t\n\t\treturn self.word_analysis", "def lyrics_to_frequencies(lyrics):\n lyricsDictionary = dict()\n for each_word in lyrics:\n if each_word in lyricsDictionary:\n lyricsDictionary[each_word] += 1\n else:\n lyricsDictionary[each_word] = 1\n return lyricsDictionary", "def generate_frequency_map(data: str) -> Dict[str, int]:\n seperators: Final[List[str]] = [\n ',', '.', '\\n', ' ', '\\t', '?', '<', '>', '!', ':', ';'\n ]\n tokens: List[str] = tokenize(data, seperators)\n\n frequency_map: Dict[str, int] = {}\n for token in tokens:\n if token in frequency_map.keys():\n frequency_map[token] += 1\n else:\n frequency_map[token] = 1\n return frequency_map", "def process_dict(text, frequency_threshold):\n\n # Trying to load previous unique_words (pickle file)\n UNIQUE_WORDS_PICKLE = \"unique_words_with_frequency_\" + str(frequency_threshold) + \".pickle\"\n \n unique_words = None\n if os.path.isfile(UNIQUE_WORDS_PICKLE):\n try:\n with open(UNIQUE_WORDS_PICKLE, 'r') as f:\n unique_words = pickle.load(f)\n except:\n os.remove(UNIQUE_WORDS_PICKLE)\n unique_words = None\n\n if (type(unique_words) == list):\n return unique_words\n\n\n WORD_COUNT_PICKLE = \"word_count.pickle\"\n WORD_COUNT = 253855\n\n print(\"Processing dictionary. This will take a while.\")\n\n # Trying to load previous word_count (pickle file)\n word_count = None\n if os.path.isfile(WORD_COUNT_PICKLE):\n try:\n with open(WORD_COUNT_PICKLE, 'r') as f:\n word_count = pickle.load(f)\n if len(word_count) != WORD_COUNT:\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n except:\n raise\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n\n # count words\n if word_count == None:\n print(\"Pickle file not found. Counting word occurence...\")\n\n # grab all the words\n words = text.split(\" \")\n\n # counting word occurence\n word_count = dict(Counter(words).most_common())\n \n # saving word count for future reuse\n with open(WORD_COUNT_PICKLE, 'w') as f:\n pickle.dump(word_count, f)\n print(\"Word count saved for future reuse.\")\n \n # making sure we have the correct count loaded\n assert(type(word_count) == dict)\n assert(len(word_count) == WORD_COUNT)\n\n # remove the duplicates and single-character words.\n unique_words = [w for w in word_count.keys() if len(w) > 1]\n vocab_size = len(unique_words)\n print(\"Vocab size:\", vocab_size)\n\n # remove words with frequency lower than 1%\n unique_words = [word for word in unique_words if float(word_count[word]) / vocab_size > frequency_threshold]\n print(\"Vocab size (>%.3f%% frequency): %d\" % ((frequency_threshold * 100), len(unique_words)))\n\n unique_words.sort(key=lambda word: len(word), reverse=True)\n unique_words.append('a')\n unique_words.append('i')\n\n # save unique words for future reuse\n with open(UNIQUE_WORDS_PICKLE, 'w') as f:\n pickle.dump(unique_words, f)\n print(\"unique_words saved for future reuse.\")\n\n return unique_words", "def get_freqs(self):\n dictionary = {}\n for word in self.word_list:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n letter_sorted = sorted(dictionary.items(), key=lambda entry: entry[0]) #sorts dictionary into alphabetized tuples\n count_sorted = sorted(letter_sorted, key=lambda seq: seq[1], reverse=True) #sorts alphabetical tuples into count order\n return count_sorted", "def print_word_freq(file):\n with open(file) as text:\n text = text.read().lower()\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"’\", \"\")\n # text = \" \".join(text.split())\n # print(text)\n for character in string.punctuation:\n text = text.replace(character, \"\")\n word_list = text.split()\n clean_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n clean_list.append(word)\n \n\n # for stop_word in STOP_WORDS:\n # if stop_word in word_list:\n # word_list.remove(stop_word)\n\n\n new_dict = {}\n for word in clean_list:\n new_dict[word] = clean_list.count(word)\n sorted_dict = sorted(new_dict.items())\n print(sorted_dict)\n\n # print(f\"{key} | {value} {'*' * value}\")\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n\n # for word in word_list:\n # if word in string.punctuation:\n # #do something\n # if word in STOP_WORDS:\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n # print(text)", "def word_count(phrase):\n words = phrase.split()\n deDupedWords = set(words)\n wordCount = {}\n\n for element in deDupedWords:\n wordCount.update({element: words.count(element)})\n\n return wordCount", "def makeWords(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if x not in self.words: \r\n self.words[x] = 1\r\n else: \r\n self.words[x] += 1\r\n return self.words", "def word_frequency(words):\r\n frequency = {}\r\n for w in words:\r\n frequency[w] = frequency.get(w, 0) + 1\r\n return frequency", "def word_frequency(self, document):\n freq_table = {}\n words = nltk.word_tokenize(document)\n for word in words:\n if word in freq_table:\n freq_table[word] = freq_table.get(word) + 1\n else:\n freq_table[word] = 1\n # cut down the frequency table so that only common words are scored for\n freq_table = sorted(freq_table.items(), key=lambda x: x[1], reverse=True)\n scorable_words = []\n for word, occ in freq_table:\n # set threshold as words appearing x times or more - set to optimal valeue = 0\n # in hindsight this can just be deleted\n if int(occ) > 0:\n scorable_words.append(word)\n else:\n break\n self.sent_pos = self.sent_pos + 1 \n return scorable_words", "def get_wordcount_obj(text):\r\n return dict2obj(get_wordcount(text))", "def print_word_freq(file):\n with open(file) as text:\n text_string = str(text.readlines())\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"-\", \"\")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n word_list = text_string.split()\n no_stop_words = []\n for word in word_list:\n if word in STOP_WORDS:\n pass\n else: no_stop_words.append(word)\n clean_list = {}\n for word in no_stop_words:\n clean_list[word] = no_stop_words.count(word) \n print(clean_list)", "def word_count(phrase):\n word_dict = {}\n\n for word in phrase.split():\n word_dict[word] = word_dict.get(word, 0) + 1\n\n return word_dict", "def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq", "def print_word_freq(file):\n # with open(file, 'r') as text the r as the second arguement means that my intentions are to read the file\n with open(file, 'r') as text:\n # this reads the entire file and puts this into text string\n text_string = text.read()\n # returns the string respresentation of text string without removing special characters so you can see what you need to remove\n # print(repr(text_string))\n # this removes the specified characters from the text string\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"—\", \" \")\n text_string = text_string.replace(\"-\", \" \")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"’\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"\\\"\", \"\")\n # takes the text string and makes all the characters lower case\n text_string = text_string.lower()\n # takes the text string and splits all the words into a list this splits from space to space\n words_list = text_string.split()\n # a dictionary is a key and a value\n no_stop_words = {}\n # for loop that will cycle through the words list\n for word in words_list:\n # checking to see if the word is stop words\n if word not in STOP_WORDS:\n # if the word is already in the dictionary no stop words increment the value by 1\n if word in no_stop_words:\n no_stop_words[word] += 1\n # if the word is not in the dictionary no stop words add this to the dictionary and give it a value of 1\n else:\n no_stop_words[word] = 1\n \n sorted_dict = {}\n sorted_keys = sorted(no_stop_words, key=no_stop_words.get, reverse=True)\n \n for w in sorted_keys:\n sorted_dict[w] = no_stop_words[w]\n \n for key in sorted_dict:\n print(f\"{key:>15} | {sorted_dict[key]:2} {'*' * sorted_dict[key]}\")\n \n # good practice to ensure that we are properly closing the file in use at the end of the function\n text.close()", "def term_frequency(ngrams,lang):\n token_dictionary = {}\n for ng in ngrams:\n try:\n token_dictionary[ng] = token_dictionary[ng] + 1\n except KeyError:\n token_dictionary[ng] = 1\n return token_dictionary", "def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq", "def __init__ (self):\n self.lengths = {}\n self.lower_counts = {}\n self.upper_counts = {}\n self.digit_counts = {}\n self.symbol_counts = {}\n self.class_counts = {}\n self.word_counts = {}", "def get_word_count(my_str):\n my_list = my_str.split(\" \")\n my_map = {}\n for word in my_list:\n # Strip the word from any character\n word = word.strip(\".\")\n word = word.strip(\",\")\n # Convert word to all lowercase\n word = word.lower()\n if word not in my_map:\n my_map[word] = 1\n else:\n my_map[word] += 1\n\n return my_map", "def createDict(self):\n data = d.Dictionary.dictionary\n while True:\n filtered = [line.strip() for line in data if len(line) == self.wordLen]\n if len(filtered) == 0:\n self.setNewLen()\n else:\n break\n return filtered", "def calculate_frequency_table(word):\n\n frequency_table = {}\n if word is None:\n return frequency_table\n\n for char in word:\n if char not in frequency_table.keys():\n frequency_table[char] = 1\n else:\n frequency_table[char] += 1\n\n return frequency_table", "def preparation(self):\n self.word_freq = defaultdict(int)\n\n for sentence in self.corpus:\n for word in sentence:\n self.word_freq[word] += 1\n\n # self.words decide the index of all the words\n self.words = list(self.word_freq.keys())\n self.T = len(self.words)\n\n # word_index will give index for a given word and vice versa for index_word\n self.word_index = dict([[word, i] for i, word in enumerate(self.words)])\n self.index_word = dict([[i, word] for i, word in enumerate(self.words)])", "def letterFreq(words):\n dict = {}\n total = 0\n for word in words:#Iterate through words\n for letter in word:#Increment by letter\n count = 0\n for yearCount in words[word]:\n count += yearCount.count#Increment total instances of word\n total += count#Count total letters\n if letter in dict:\n dict[letter] += count#Add to existing entry\n else:\n dict[letter] = count#Create new entry\n \"\"\"CODE FOR THE WHOLE ALPHABET\"\"\"\n list = []\n for letter in ascii_lowercase:\n if letter in dict and dict[letter] != 0:\n list.append(dict[letter] / total)#Convert to relative\n else:\n list.append(0.0)#Fill alphabet\n return list", "def freqWords(self, words):\n return nltk.FreqDist(words)", "def buildDict(self, dict):\n for word in dict:\n self.s.add(word)\n self.length_set = set([len(word) for word in dict])", "def build_dict(fname):\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn", "def word_frequency(text):\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(text)\n\n stop = set(stopwords.words('english'))\n tokens_without_stop = list(filter(lambda word: word.lower() not in stop, tokens))\n\n counts = Counter(tokens_without_stop)\n return counts", "def process_text(self, text):\n\n flags = (re.UNICODE if sys.version < '3' and type(text) is unicode # noqa: F821\n else 0)\n pattern = r\"\\w[\\w']*\" if self.min_word_length <= 1 else r\"\\w[\\w']+\"\n regexp = self.regexp if self.regexp is not None else pattern\n\n words = re.findall(regexp, text, flags)\n # remove 's\n words = [word[:-2] if word.lower().endswith(\"'s\") else word\n for word in words]\n # remove numbers\n if not self.include_numbers:\n words = [word for word in words if not word.isdigit()]\n # remove short words\n if self.min_word_length:\n words = [word for word in words if len(word) >= self.min_word_length]\n\n stopwords = set([i.lower() for i in self.stopwords])\n if self.collocations:\n word_counts = unigrams_and_bigrams(words, stopwords, self.normalize_plurals, self.collocation_threshold)\n else:\n # remove stopwords\n words = [word for word in words if word.lower() not in stopwords]\n word_counts, _ = process_tokens(words, self.normalize_plurals)\n\n return word_counts", "def count_words(phrase):\n # split the input string at spaces\n phrase_split = phrase.split()\n\n # initiate empty dictionary\n word_count = {}\n\n # iterate over words in the phrase\n for word in phrase_split:\n if word in word_count:\n\n # if the word is already a key in the dictionary, increase the value by 1\n word_count[word] += 1\n\n else:\n # if the word is not a key in the dictionary, set its value to 1\n word_count[word] = 1\n\n return word_count", "def histogram(text):\n hist = {}\n\n for char in text.lower():\n if char.isalpha():\n hist[char] = hist.get(char, 0) + 1\n else:\n hist['others'] = hist.get('others', 0) + 1\n return hist", "def frequencyAnalysis(article, dictionary):\r\n string = article #Sets the string to the article\r\n string = re.sub(\"[^a-zA-Z0-9’\\s]+\",'', string) #Takes the articles and removes all characters appart from apostrophes, spaces, digits, and leters\r\n string = re.sub(\"’\", \"'\", string) #Replaces ’ with '\r\n string = string.lower() #Ensures that all the charcters are lower case\r\n stringList = string.split() #Takes the article and turns it into a list\r\n \r\n print(\"\\nConverted article to list\\n\")\r\n \r\n print(\"Starting frequency analysis\\n\")\r\n\r\n #Started the frequency anaylsis\r\n for word in stringList:\r\n if \"'s\" in word: #Done to remove extra keys in the dictionary, removes the possessive such that it counts \"Trump\" and \"Trump's\" as one word\r\n word = word[0:-2]\r\n elif \"s'\" in word:\r\n word = word[0:-1]\r\n if word != \"advertisement\":\r\n if word in dictionary:\r\n dictionary[word] +=1 #If it finds the word in the dictionary, the frequency has to increase by one\r\n else:\r\n dictionary[word] = 1 #If it finds a new word, it needs to add the word so the frequency is one\r", "def __init__(self):\n\t\tself.word_count_dict = {}\n\t\tself.num_comments = 0\n\t\tself.num_words = 0", "def countwords(txt):\n\twords = {}\n\n\tpattern = re.compile(\"[a-zA-Z][a-zA-Z0-9]*\")\t\n\tfor word in pattern.findall(txt):\n\t\twords[word.lower()] = words.get(word,0)+1\t \n\t\n\t# i'd rather do this in the prior step\n\t# but i need to be able to eliminate dupes\n\t# which may or may not be more expensive than\n\t# going this route. need to benchmark it.\n\tfor key,word in words.items():\n\t\tapcount.setdefault(key,0)\n\t\tapcount[key]+=1\n\t\n\treturn words", "def _create_word_count_dict(self):\n word_counts = dict()\n for wc in self.word_counts.all():\n word_counts[wc.word.name] = wc.count\n return word_counts", "def create_keyword_score(self):\n keywords_score = {}\n for zettel in self.lemma_tokens:\n for word in zettel:\n if zettel.count(word) >= self.min_keyword_freq:\n keywords_score.setdefault(word[0], 0)\n word_list = re.split(\" \", word[0])\n score = 0\n for new_word in word_list:\n score += self.word_scores[new_word]\n keywords_score[word[0]] = score\n return keywords_score" ]
[ "0.72095066", "0.71612656", "0.71218807", "0.711714", "0.7092993", "0.7040645", "0.7019579", "0.6963053", "0.6958046", "0.6943951", "0.6904577", "0.68902737", "0.6876883", "0.6857476", "0.68253565", "0.68212354", "0.6773053", "0.6735119", "0.67350656", "0.668967", "0.6683868", "0.66797227", "0.66776216", "0.6659774", "0.6630346", "0.65813136", "0.6566585", "0.6544648", "0.65338224", "0.6515932", "0.65131736", "0.6503238", "0.64956355", "0.6446325", "0.6435425", "0.6420242", "0.64129305", "0.64119995", "0.6400492", "0.63919485", "0.6388033", "0.63770765", "0.6369209", "0.63515854", "0.63356155", "0.6324697", "0.63214123", "0.63105905", "0.6303879", "0.62988496", "0.6294543", "0.62905025", "0.62833345", "0.62832886", "0.62831646", "0.62827784", "0.6275354", "0.62678003", "0.6265031", "0.62580776", "0.6242819", "0.62419885", "0.6231074", "0.6230537", "0.62301993", "0.6217521", "0.6216382", "0.621543", "0.62130773", "0.6211767", "0.62062776", "0.62027836", "0.6187261", "0.6174777", "0.61714613", "0.61682606", "0.6167545", "0.61618984", "0.61591005", "0.6152285", "0.61259013", "0.61196125", "0.61096805", "0.61077076", "0.61076796", "0.6103573", "0.6102776", "0.6101906", "0.60905474", "0.6083724", "0.608345", "0.60785335", "0.6076151", "0.60717505", "0.606563", "0.6062543", "0.6054047", "0.6043834", "0.6019203", "0.6018892" ]
0.6272309
57
takes clean str from self.text and creats dict of word freq.
def makeWords(self): clean_s = self.cleanString(self.text) LoW = clean_s.split() for x in LoW: if x not in self.words: self.words[x] = 1 else: self.words[x] += 1 return self.words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wordFreq(parseThis):\n \n freq = {}\n nono = ('\"', \"'\", '%', '$', '!', '.', '?', '-', ','\n , '\\n', '\\t', '\\r', ':', ';')\n\n for c in nono:\n parseThis = parseThis.replace(c, \" \")\n \n words = parseThis.split()\n \n for word in words:\n temp = word.lower()\n freq[temp] = freq.get(temp, 0) + 1\n\n return freq", "def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq", "def create_freq_dict(corpus, doc_info):\n for idx, content in enumerate(corpus):\n word_freq_table = {}\n splitted_sentence = content.split()\n for word in splitted_sentence:\n word = word.lower()\n if word not in word_freq_table:\n word_freq_table[word] = 1\n else:\n word_freq_table[word] += 1\n doc_info[idx]['freq_dict'] = word_freq_table", "def build_frequency_dict(text: bytes) -> Dict[int, int]:\n freq_dic = {}\n for elem in text:\n if elem not in freq_dic:\n freq_dic[elem] = 1\n else:\n freq_dic[elem] += 1\n return freq_dic", "def make_freq_dict(text):\n freq_dict = {}\n for i in text:\n if i not in freq_dict:\n freq_dict[i] = 1\n else:\n freq_dict[i] += 1\n return freq_dict", "def make_frequency_dict(self, text):\n\t\t\tfrequency = {}\n\t\t\t#tomamos los numeros como caracteres entonces el diccionario solo tendra un rango (0,9) las ',' y '\\n'\n\t\t\tfor character in text:#O(len(row)*columns) \n\t\t\t\tif not character in frequency:#como frequency es un diccionario es de O(1)\n\t\t\t\t\tfrequency[character] = 0\n\t\t\t\tfrequency[character] += 1\n\t\t\t\n\t\t\treturn frequency", "def _count_word_frequency(self, data):\n _dict = {}\n for _docs in data:\n for _word in _docs:\n if _word in _dict:\n _dict[_word] += 1\n else:\n _dict[_word] = 1\n return _dict", "def word_frequency(a_string):\n\n for char in \"\"\".$#,:\"'?!)(\"\"\":\n a_string = a_string.replace(char, \"\")\n for char in \"\"\"-\"\"\":\n a_string = a_string.replace(char, \" \")\n\n cleanstring = a_string.lower()\n a_list = cleanstring.split()\n a_dict = {}\n for item in a_list:\n if item in a_dict:\n a_dict[item]+= 1\n else:\n a_dict[item] = 1\n return a_dict", "def text2wordfreq(string, lowercase=False):\r\n\r\n\r\n from collections import Counter\r\n lst = Counter(tokenize(string, lowercase)).most_common()\r\n\r\n dictLst = dict(lst)\r\n\r\n return dictLst", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END", "def parse_text(self, text, wordcount_dictionary=None):\n if not wordcount_dictionary:\n wordcount_dictionary = {}\n words = self.parse_regexp.findall(text)\n for word in words:\n new_word = stem(word.lower())\n if new_word not in self.stopwords:\n if new_word in wordcount_dictionary:\n wordcount_dictionary[new_word] += 1\n else:\n wordcount_dictionary[new_word] = 1\n return wordcount_dictionary", "def frequency(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n list_of_words = []\n for i in root.iter(root_tag + 'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n for word in i.text.split():\n alphanumeric_filter = filter(str.isalnum, word)\n alphanumeric_string = \"\".join(alphanumeric_filter)\n list_of_words.append(alphanumeric_string)\n dict_of_frequency = collections.Counter(list_of_words)\n return dict_of_frequency", "def word_frequencies(word_list: TextIO) -> dict:\n words = word_list.read().split(' ')\n amount_of_words = len(set(words))\n frequencies = {}\n for index, word in enumerate(words):\n clean_word = remove_punctuation(word)\n if clean_word not in frequencies:\n frequencies[clean_word] = (index + 1) / amount_of_words\n del frequencies[\"\"]\n return frequencies", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def word_frequency_table(self, text_string):\r\n stopWords = set(stopwords.words(\"english\"))\r\n words = word_tokenize(text_string)\r\n ps = PorterStemmer()\r\n\r\n freqTable = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n if word in stopWords:\r\n continue\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n\r\n return freqTable", "def word_count(self, document):\n start_time = time.time()\n dictionary = dict()\n counter = defaultdict(int)\n for line in document.splitlines():\n for word in line.split():\n if word not in PUNCTUATION_MARK:\n counter[word] += 1\n for word, cnt in sorted(counter.items(), key=lambda x: (-x[1], x[0])):\n dictionary[word] = cnt\n self.log.info(\"Duration count dictionary: {duration}\".format(duration=float(time.time() - start_time)))\n return dictionary", "def make_freq_dict(word_list):\n\n\tfreq_dict = {}\n\n\tfor word in word_list: #need to slice each tale into a list of words for this to work\n\t\tif word in freq_dict:\n\t\t\tcurrent_val = freq_dict.get(word)\n\t\t\tval = current_val + 1\n\t\t\tfreq_dict[word] = val #made a dictionary of the string (word, frequnecy)\n\t\telse: #if it isn't in the dictionary\n\t\t\tfreq_dict[word] = 1\n\treturn freq_dict", "def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq", "def get_word_freq_dict(df_col):\n results = Counter()\n df_col.str.lower().str.split().apply(results.update)\n results = sorted(results.items(), key=lambda item: item[1], reverse=True)\n d = {}\n for word, freq in results:\n d[word] = freq\n return d", "def words(phrase):\n\twordlist = phrase.split()\n\tunique_wordlist = []\n\tword_freq = []\n\n \n\twhile wordlist:\n\t\tword_freq.append(wordlist.count(wordlist[0])) #count the instances of a word and add it to the frequencies list\n\t\tunique_wordlist.append(wordlist[0]) #add the word into a unique words list\n\t\twordlist = list(filter((wordlist[0]).__ne__, wordlist)) #remove all other similar words from the wordlist\n\n\n\tn = len(word_freq)\n\toutput = {}\n\n\tfor i in range(n):\n\t\tif unique_wordlist[i].isdigit(): #convert sting digits into int\n\t\t\tunique_wordlist[i] = int(unique_wordlist[i])\n\t\toutput[unique_wordlist[i]] = word_freq[i] #add the unique words with their corresponding frequencies into the output dict\n\t\n\treturn output", "def word_frequency_dict(tokens):\n\n\tfdist = FreqDist(tokens) \t\t\t\t\t\t# fdist.keys() fdist.values()\n\treturn dict(fdist)", "def dictify(words):\n word_freq = {}\n for word in words:\n if word:\n key = word.lower()\n if key in word_freq:\n word_freq[key] += 1\n else:\n word_freq[key] = 1\n else:\n pass\n return word_freq", "def word_frequency( tokenized, dic ):\n print( 'computing word frequencies' )\n start = time.time()\n for i, text in enumerate( tokenized ):\n for token in text:\n if token not in dic:\n dic[ token ] = 1\n else:\n dic[ token ] += 1\n if i % 10000 == 0:\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )", "def prep_dict(word):\n counts = {}\n for l in word.lower():\n if l!=\" \":\n counts[l] = counts.get(l,0) + 1\n return counts", "def create_word_score(self):\n word_freq = {}\n word_deg = {}\n for zettel in self.lemma_tokens:\n for word in zettel:\n word_list = re.split(\" \", word[0])\n word_list_deg = len(word_list) - 1\n for new_word in word_list:\n word_freq.setdefault(new_word, 0)\n word_freq[new_word] = word_freq[new_word] + 1\n word_deg.setdefault(new_word, 0)\n word_deg[new_word] = word_deg[new_word] + word_list_deg\n word_score = {}\n for word in word_freq:\n word_deg[word] = word_deg[word] + word_freq[word]\n word_score.setdefault(word, 0)\n word_score[word] = word_deg[word] / (word_freq[word] * 1.0)\n return word_score", "def word_frequency(words):\n freq = {}\n for w in words:\n cur_word = w.lower().strip(punctuation)\n freq[cur_word] = freq.get(cur_word, 0) + 1\n return freq", "def words(text):\n clean = TextBlob(clean(text))\n sentence_count = len(clean.sentences)\n words = clean.tokenize()\n word_count = len(words)\n avg_len = np.mean([len(word) for word in words])\n words_dict = {'sentence_count': sentence_count, 'word_count': word_count,\n 'avg_len': avg_len}\n return words_dict", "def word_freq(self, word_list):\n hist = {}\n for word in word_list:\n hist[word] = hist.get(word, 0) + 1\n return hist", "def get_wordcount(text):\r\n\r\n characters = len(text)\r\n chars_no_spaces = sum([not x.isspace() for x in text])\r\n asian_chars = sum([is_asian(x) for x in text])\r\n non_asian_words = nonj_len(text)\r\n words = non_asian_words + asian_chars\r\n \r\n return dict(characters=characters,\r\n chars_no_spaces=chars_no_spaces,\r\n asian_chars=asian_chars,\r\n non_asian_words=non_asian_words,\r\n words=words)", "def word_freq(word, ngram_dict):\n word = word.lower()\n return ngram_dict[word] if word in ngram_dict else 0", "def count_words(self, contents):\n wordCounts = {}\n for i in self.ngramCounts:\n if i == 0: # want the default to be the size of the corpus\n total = 0\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for word in words:\n if word:\n total += 1\n wordCounts[i] = defaultdict(lambda: total)\n continue\n else:\n counts = defaultdict(lambda: 0)\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for k, word in enumerate(words): \n if k < (i-1) or not word:\n continue\n key = \"\"\n for j in range(k-i+1, k+1):\n key += words[j] + \" \"\n counts[key.strip()] += 1\n wordCounts[i] = counts\n return wordCounts", "def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic", "def mapWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n return token_map", "def frequency(w: str) -> float:\n return frequency_list.get(remove_punctuation(w), 0)", "def computeWordFrequencies(self, tokens: ['token'], frequencies: {'token': int}):\n # project2: update this method to take existing dict as parameter and modify it\n # additionally, stopwords are not inserted in the dict;\n # words shorter than 3 character or contains all digits are ignored\n for token in tokens:\n # if the key is not in dict, dict.setdefault method initiates the value at 0\n # if token not in stopwords and len(token) >= 3 and not token.isdigit():\n frequencies[token] = frequencies.setdefault(token, 0) + 1", "def convert_word_to_count(counter={}, doc=[]):\n for sentence in doc:\n for word in sentence.split():\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n return counter", "def freq(word, document):\n return document.split(None).count(word)", "def clean(doc, word_count={}):\n doc = doc.lower()\n tokens = wt(doc)\n\n filterWord = []\n for w in tokens:\n if w not in dots and w not in stopWord:\n if w in slangs:\n w = slangs[w]\n filterWord.append(w)\n\n sents = \" \".join(filterWord)\n filterWord = re.findall('\\w+', sents)\n\n ps = PorterStemmer()\n\n for w in filterWord:\n fword = ps.stem(w)\n\n word_count[fword] = word_count.get(fword, 1.0)\n word_count[fword] += 1\n\n return word_count", "def unique_words(data: np.ndarray) -> dict:\n words = {}\n for doc in data:\n docwords_list = [w for w in doc.split(' ') if w != '']\n docwords_set = set(docwords_list)\n for word in docwords_list:\n try:\n words[word]['freq'] += 1\n except KeyError:\n words[word] = {'freq': 1, 'doccount': 0}\n for word in docwords_set:\n words[word]['doccount'] += 1\n return words", "def getFrequencies(tweets):\n total_words = 0\n word_freq = {}\n for tweet in tweets:\n twext = tweet['clean_text']\n for word in twext.split(' '):\n word = word.strip()\n if word:\n total_words += 1\n if word not in word_freq:\n word_freq[word] = float(1)\n else:\n word_freq[word] += 1\n for key in word_freq:\n word_freq[key] = word_freq[key]/total_words\n return word_freq", "def word_count(input_str):\n counts = dict()\n words = input_str.split()\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts", "def preprocess(text, freq=5):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n\n words_raw = text.strip().lower().split()\n word_counts = Counter(words_raw)\n words = [w for w in words_raw if word_counts[w] > freq]\n vocab = set(words)\n vocab2index = {w: idx for idx, w in enumerate(vocab)}\n index2vocab = {idx: w for idx, w in enumerate(vocab)}\n words_int = [vocab2index[w] for w in words]\n return words_int, vocab2index, index2vocab", "def letter_freq(txt):\n frequencies = {}\n txt_lower = txt.lower()\n\n for i in txt_lower:\n keys = frequencies.keys()\n if i in keys:\n frequencies[i] += 1\n else:\n frequencies[i] = 1\n return frequencies", "def word_count(input_file, word_freq=None):\n if word_freq is None:\n word_freq = collections.defaultdict(int)\n\n for l in input_file:\n for w in l.strip().split():\n word_freq[w] += 1\n\n return word_freq", "def calc_weighted_frequency(words,ps,lem,stopWords,text_string):\r\n \r\n\r\n word_frequencies = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n word = lem.lemmatize(word)\r\n print(word)\r\n if word not in stopWords:\r\n if word not in word_frequencies:\r\n word_frequencies[word] = 1\r\n else:\r\n word_frequencies[word] += 1\r\n \r\n maximum_frequncy = max(word_frequencies.values())\r\n for word in word_frequencies.keys():\r\n word_frequencies[word] = (word_frequencies[word]/maximum_frequncy) \r\n print(word_frequencies)\r\n return word_frequencies", "def analyze_word(s):\n\n a = {}\n a['word'] = s\n a['n_letters'] = len(s)\n a['n_vowels'] = count_vowels(s)\n \n return a", "def _word_counter(input_string: str) -> Dict[str, int]:\n # @todo Create a data type that can counts keys as they are added\n _current_word = ''\n parsed_words = {}\n\n for character in input_string.lower():\n if character in MaximalTextAnalyzer._letters:\n _current_word += character\n elif len(_current_word):\n parsed_words = MaximalTextAnalyzer._insert_into_dict(\n words_dict=parsed_words, key=_current_word)\n\n _current_word = ''\n\n # What if it does not end with a separator?\n if _current_word:\n parsed_words = MaximalTextAnalyzer._insert_into_dict(\n words_dict=parsed_words, key=_current_word)\n\n return parsed_words", "def word_frequency(words):\r\n frequency = {}\r\n for w in words:\r\n frequency[w] = frequency.get(w, 0) + 1\r\n return frequency", "def generate_frequency_map(data: str) -> Dict[str, int]:\n seperators: Final[List[str]] = [\n ',', '.', '\\n', ' ', '\\t', '?', '<', '>', '!', ':', ';'\n ]\n tokens: List[str] = tokenize(data, seperators)\n\n frequency_map: Dict[str, int] = {}\n for token in tokens:\n if token in frequency_map.keys():\n frequency_map[token] += 1\n else:\n frequency_map[token] = 1\n return frequency_map", "def countwords(txt):\n\twords = {}\n\n\tpattern = re.compile(\"[a-zA-Z][a-zA-Z0-9]*\")\t\n\tfor word in pattern.findall(txt):\n\t\twords[word.lower()] = words.get(word,0)+1\t \n\t\n\t# i'd rather do this in the prior step\n\t# but i need to be able to eliminate dupes\n\t# which may or may not be more expensive than\n\t# going this route. need to benchmark it.\n\tfor key,word in words.items():\n\t\tapcount.setdefault(key,0)\n\t\tapcount[key]+=1\n\t\n\treturn words", "def word_count(self):\n\n # split words on default word boundaries for words list\n words = self.phrase.split() \n\n # translate removes punctuation only, normalizes to lower case\n normalized_words = [self.normalize_word(w) for w in words]\n\n # removes empty strings after stripping punctuation\n filtered_words = [w for w in normalized_words if w]\n\n # sets up default dictionary, so all entries are 0\n word_counts = collections.defaultdict(int) #{}\n\n # define word counting function for use in reduce\n def count_word(dictionary, word):\n dictionary[word] = dictionary[word] + 1\n return dictionary\n\n # count words into dictionary from word list\n reduce(count_word, filtered_words, word_counts)\n\n return word_counts", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def create_freq_dict(sents, lang):\n ix = 0\n freq_dict_all = []\n stop_words = set(stopwords.words(lang))\n\n for sent in sents:\n ix += 1\n freq_dict = {}\n words = word_tokenize(sent)\n\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\n temp = {\n 'doc_id': ix,\n 'freq_dict': freq_dict\n }\n\n freq_dict_all.append(temp)\n\n return freq_dict_all", "def _compute_frequencies( word_sent):\n\t\tfreq = defaultdict(int)\n\t\tfor s in word_sent:\n\t\t\tfor word in s:\n\t\t\t\tif word not in _stopwords:\n\t\t\t\t\tfreq[word] += 1\n\t\t\t\t# frequencies normalization and fitering\n\t\treturn freq", "def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))", "def preparation(self):\n self.word_freq = defaultdict(int)\n\n for sentence in self.corpus:\n for word in sentence:\n self.word_freq[word] += 1\n\n # self.words decide the index of all the words\n self.words = list(self.word_freq.keys())\n self.T = len(self.words)\n\n # word_index will give index for a given word and vice versa for index_word\n self.word_index = dict([[word, i] for i, word in enumerate(self.words)])\n self.index_word = dict([[i, word] for i, word in enumerate(self.words)])", "def __init__(self, input_string):\n self.words_to_counts = {}\n self.split_and_populate_words(input_string)", "def calculate_frequency_table(word):\n\n frequency_table = {}\n if word is None:\n return frequency_table\n\n for char in word:\n if char not in frequency_table.keys():\n frequency_table[char] = 1\n else:\n frequency_table[char] += 1\n\n return frequency_table", "def word_count(phrase):\n words = phrase.split()\n deDupedWords = set(words)\n wordCount = {}\n\n for element in deDupedWords:\n wordCount.update({element: words.count(element)})\n\n return wordCount", "def get_wordcount_obj(text):\r\n return dict2obj(get_wordcount(text))", "def print_word_freq(file):\n with open(file) as text:\n text = text.read().lower()\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"’\", \"\")\n # text = \" \".join(text.split())\n # print(text)\n for character in string.punctuation:\n text = text.replace(character, \"\")\n word_list = text.split()\n clean_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n clean_list.append(word)\n \n\n # for stop_word in STOP_WORDS:\n # if stop_word in word_list:\n # word_list.remove(stop_word)\n\n\n new_dict = {}\n for word in clean_list:\n new_dict[word] = clean_list.count(word)\n sorted_dict = sorted(new_dict.items())\n print(sorted_dict)\n\n # print(f\"{key} | {value} {'*' * value}\")\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n\n # for word in word_list:\n # if word in string.punctuation:\n # #do something\n # if word in STOP_WORDS:\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n # print(text)", "def get_freqs(self):\n dictionary = {}\n for word in self.word_list:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n letter_sorted = sorted(dictionary.items(), key=lambda entry: entry[0]) #sorts dictionary into alphabetized tuples\n count_sorted = sorted(letter_sorted, key=lambda seq: seq[1], reverse=True) #sorts alphabetical tuples into count order\n return count_sorted", "def make_worddict(self):\n\t\tprint(\"Making word dictionary\")\n\t\tword_to_freq = self.make_word_to_freq()\n\t\twords = list(word_to_freq.keys())\n\t\twords.sort() # sort alphabetically first to avoid non-deterministic ordering of words with the same frequency\n\t\twords.sort(key = lambda x:word_to_freq[x], reverse = True)\n\n\t\tfor word in words[:self.FREQCAP-len(self.worddict)]:\n\t\t\tself.worddict[word] = len(self.worddict)\n\t\t\n\t\tprint(\"Word dictionary size:\", len(self.worddict))", "def word_frequency(self, document):\n freq_table = {}\n words = nltk.word_tokenize(document)\n for word in words:\n if word in freq_table:\n freq_table[word] = freq_table.get(word) + 1\n else:\n freq_table[word] = 1\n # cut down the frequency table so that only common words are scored for\n freq_table = sorted(freq_table.items(), key=lambda x: x[1], reverse=True)\n scorable_words = []\n for word, occ in freq_table:\n # set threshold as words appearing x times or more - set to optimal valeue = 0\n # in hindsight this can just be deleted\n if int(occ) > 0:\n scorable_words.append(word)\n else:\n break\n self.sent_pos = self.sent_pos + 1 \n return scorable_words", "def process_dict(text, frequency_threshold):\n\n # Trying to load previous unique_words (pickle file)\n UNIQUE_WORDS_PICKLE = \"unique_words_with_frequency_\" + str(frequency_threshold) + \".pickle\"\n \n unique_words = None\n if os.path.isfile(UNIQUE_WORDS_PICKLE):\n try:\n with open(UNIQUE_WORDS_PICKLE, 'r') as f:\n unique_words = pickle.load(f)\n except:\n os.remove(UNIQUE_WORDS_PICKLE)\n unique_words = None\n\n if (type(unique_words) == list):\n return unique_words\n\n\n WORD_COUNT_PICKLE = \"word_count.pickle\"\n WORD_COUNT = 253855\n\n print(\"Processing dictionary. This will take a while.\")\n\n # Trying to load previous word_count (pickle file)\n word_count = None\n if os.path.isfile(WORD_COUNT_PICKLE):\n try:\n with open(WORD_COUNT_PICKLE, 'r') as f:\n word_count = pickle.load(f)\n if len(word_count) != WORD_COUNT:\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n except:\n raise\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n\n # count words\n if word_count == None:\n print(\"Pickle file not found. Counting word occurence...\")\n\n # grab all the words\n words = text.split(\" \")\n\n # counting word occurence\n word_count = dict(Counter(words).most_common())\n \n # saving word count for future reuse\n with open(WORD_COUNT_PICKLE, 'w') as f:\n pickle.dump(word_count, f)\n print(\"Word count saved for future reuse.\")\n \n # making sure we have the correct count loaded\n assert(type(word_count) == dict)\n assert(len(word_count) == WORD_COUNT)\n\n # remove the duplicates and single-character words.\n unique_words = [w for w in word_count.keys() if len(w) > 1]\n vocab_size = len(unique_words)\n print(\"Vocab size:\", vocab_size)\n\n # remove words with frequency lower than 1%\n unique_words = [word for word in unique_words if float(word_count[word]) / vocab_size > frequency_threshold]\n print(\"Vocab size (>%.3f%% frequency): %d\" % ((frequency_threshold * 100), len(unique_words)))\n\n unique_words.sort(key=lambda word: len(word), reverse=True)\n unique_words.append('a')\n unique_words.append('i')\n\n # save unique words for future reuse\n with open(UNIQUE_WORDS_PICKLE, 'w') as f:\n pickle.dump(unique_words, f)\n print(\"unique_words saved for future reuse.\")\n\n return unique_words", "def create_dict(text):\n #On/Off case sensitivity\n text = text.lower() \n\n #handy one liner that splits words apart via whitespace, and \n #removes punctuation. Results in list of words.\n word_list = [s.strip(string.punctuation) for s in text.split()]\n \n d = dict()\n for word in word_list:\n d[word] = d.get(word, 0) +1\n return d", "def frequencyAnalysis(article, dictionary):\r\n string = article #Sets the string to the article\r\n string = re.sub(\"[^a-zA-Z0-9’\\s]+\",'', string) #Takes the articles and removes all characters appart from apostrophes, spaces, digits, and leters\r\n string = re.sub(\"’\", \"'\", string) #Replaces ’ with '\r\n string = string.lower() #Ensures that all the charcters are lower case\r\n stringList = string.split() #Takes the article and turns it into a list\r\n \r\n print(\"\\nConverted article to list\\n\")\r\n \r\n print(\"Starting frequency analysis\\n\")\r\n\r\n #Started the frequency anaylsis\r\n for word in stringList:\r\n if \"'s\" in word: #Done to remove extra keys in the dictionary, removes the possessive such that it counts \"Trump\" and \"Trump's\" as one word\r\n word = word[0:-2]\r\n elif \"s'\" in word:\r\n word = word[0:-1]\r\n if word != \"advertisement\":\r\n if word in dictionary:\r\n dictionary[word] +=1 #If it finds the word in the dictionary, the frequency has to increase by one\r\n else:\r\n dictionary[word] = 1 #If it finds a new word, it needs to add the word so the frequency is one\r", "def make_frequency_dict(path):\n filepath = path#'germanWordList_03.csv'\n wordDict = {}\n f = open(filepath,'r')\n for line in f:\n split = line.split(\",\")\n word = (split[0]).lower()\n temp = float(split[1])\n if word in wordDict:\n val = float(wordDict[word])\n if val > 0:\n num = (temp * val) / (val + temp)\n else:\n num = temp\n else:\n num = temp\n wordDict[word]= num\n return wordDict", "def histogram(text):\n hist = {}\n\n for char in text.lower():\n if char.isalpha():\n hist[char] = hist.get(char, 0) + 1\n else:\n hist['others'] = hist.get('others', 0) + 1\n return hist", "def word_count(phrase):\n word_dict = {}\n\n for word in phrase.split():\n word_dict[word] = word_dict.get(word, 0) + 1\n\n return word_dict", "def word_frequency(seq):\n\n # Initializes an emtpy hash map from HashMap class\n hash_map = HashMap()\n\n # For each word (not unique) in sequence\n for word in seq:\n\n # if that word is already in hash map\n if word in hash_map:\n\n # Increment value for that word\n hash_map[word] += 1\n\n # if word not yet in hash map\n else:\n\n # set count value for word equal to one\n hash_map[word] = 1\n\n # return filled hash map from sequence, words and words counts\n return hash_map", "def word_frequency(text):\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(text)\n\n stop = set(stopwords.words('english'))\n tokens_without_stop = list(filter(lambda word: word.lower() not in stop, tokens))\n\n counts = Counter(tokens_without_stop)\n return counts", "def dfc(text: str):\n #Splitting the text into a list\n wordlist = text.split()\n worddictionary = {}\n\n #Creating the wordlist dictionary\n for word in wordlist:\n if word in worddictionary:\n #Increase\n worddictionary[word] += 1\n else:\n #add to the dictionary\n worddictionary[word] = 1\n\n #Converting worddictionary into a dataframe\n df = pd.DataFrame.from_dict(worddictionary, orient='index')\n #Resetting index to a numerical one for ease of use\n df = df.reset_index()\n #Renaming the old string-valued index\n df = df.rename(columns={'index':'word'})\n #Defining two functions (over empty variables) to replace commas and dots\n remover = lambda x: x.replace(',','')\n remover2 = lambda x: x.replace('.','')\n #Using ( too many lines) to apply the functions\n df['word'] = df['word'].apply(remover)\n df['word'] = df['word'].apply(remover2)\n #Row-wise Subselection and assignment to remove words with a frequency smaller than 2\n df = df[df[0] > 2]\n #Renaming word frequncy\n df = df.rename(columns={0:'Frequency'})\n\n return df", "def preprocessing():\n english_dictionary = nltk.corpus.brown.words()\n slang_vocab = pickle.load(open('vocab_pattern_match_with_freq.pkl', 'rb'))\n\n normalize_english_dict = len(english_dictionary)\n normalize_slang_vocab = 0\n for w, n in slang_vocab.items():\n normalize_slang_vocab += n\n\n words = {}\n for w, n in Counter(english_dictionary).items():\n words[w] = n/normalize_english_dict\n \n for w, n in slang_vocab.items():\n if w not in words:\n words[w] = 0.\n words[w] += n/normalize_slang_vocab\n\n words_by_freq = [w for w,_ in sorted(words.items(), key=lambda x: x[1], reverse=True)]\n\n # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\n #words = open(\"words_by_frequency.txt\").read().split()\n wordcost = dict((k, log((i+1)*log(len(words_by_freq)))) for i,k in enumerate(words_by_freq))\n maxword = max(len(x) for x in words_by_freq)\n return wordcost,maxword", "def get_word_frequencies(topic_description):\n frequencies = {w:f for w,f in topic_description}\n return frequencies", "def print_word_freq(file):\n with open(file) as text:\n text_string = str(text.readlines())\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"-\", \"\")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n word_list = text_string.split()\n no_stop_words = []\n for word in word_list:\n if word in STOP_WORDS:\n pass\n else: no_stop_words.append(word)\n clean_list = {}\n for word in no_stop_words:\n clean_list[word] = no_stop_words.count(word) \n print(clean_list)", "def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)", "def get_word_count(my_str):\n my_list = my_str.split(\" \")\n my_map = {}\n for word in my_list:\n # Strip the word from any character\n word = word.strip(\".\")\n word = word.strip(\",\")\n # Convert word to all lowercase\n word = word.lower()\n if word not in my_map:\n my_map[word] = 1\n else:\n my_map[word] += 1\n\n return my_map", "def print_word_freq(file):\n # with open(file, 'r') as text the r as the second arguement means that my intentions are to read the file\n with open(file, 'r') as text:\n # this reads the entire file and puts this into text string\n text_string = text.read()\n # returns the string respresentation of text string without removing special characters so you can see what you need to remove\n # print(repr(text_string))\n # this removes the specified characters from the text string\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"—\", \" \")\n text_string = text_string.replace(\"-\", \" \")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"’\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"\\\"\", \"\")\n # takes the text string and makes all the characters lower case\n text_string = text_string.lower()\n # takes the text string and splits all the words into a list this splits from space to space\n words_list = text_string.split()\n # a dictionary is a key and a value\n no_stop_words = {}\n # for loop that will cycle through the words list\n for word in words_list:\n # checking to see if the word is stop words\n if word not in STOP_WORDS:\n # if the word is already in the dictionary no stop words increment the value by 1\n if word in no_stop_words:\n no_stop_words[word] += 1\n # if the word is not in the dictionary no stop words add this to the dictionary and give it a value of 1\n else:\n no_stop_words[word] = 1\n \n sorted_dict = {}\n sorted_keys = sorted(no_stop_words, key=no_stop_words.get, reverse=True)\n \n for w in sorted_keys:\n sorted_dict[w] = no_stop_words[w]\n \n for key in sorted_dict:\n print(f\"{key:>15} | {sorted_dict[key]:2} {'*' * sorted_dict[key]}\")\n \n # good practice to ensure that we are properly closing the file in use at the end of the function\n text.close()", "def analyze_words(self):\n\t\t\n\t\tword_analysis = {}\n\t\tfor word in self.word_list:\n\t\t\tif word not in word_analysis:\n\t\t\t\tacademic = (word in LEMMA_DICT)\n\t\t\t\tlength = len(word)\n\t\t\t\tfrequency = len(self.word_list[word])\n\t\t\t\tstem = word\t\n\t\t\t\tword_location_index = len(self.sentence_index)-1 #first set it as the last index\n\t\t\t\t\n\t\t\t\tfor index, sentence in self.sentence_index.items():\n\t\t\t\t\tif word in sentence.split():#need to be individual words, not parts of a word\n\t\t\t\t\t\tword_location_index = index \n\t\t\t\t\t\tbreak\n\t\t\t\t\tif self.word_list[word][0] in sentence.split():#accounts for words with upper cases\n\t\t\t\t\t\tword_location_index = index\n\t\t\t\t\t\n\t\t\t\t#selection critera\n\t\t\t\tif academic:\n\t\t\t\t\tselection_criteria = 'academic word'\n\t\t\t\telif frequency > 1: \n\t\t\t\t\tselection_criteria = 'high frequency'\n\t\t\t\telse:\n\t\t\t\t\tselection_criteria = 'word length'\n\n\t\t\t\tword_analysis[word] = (academic, length, frequency, stem, word_location_index, selection_criteria)\n\t\t\n\t\tself.word_analysis = word_analysis\n\t\t\n\t\treturn self.word_analysis", "def frequency(text):\n # TODO: change function input to a textfile?\n import collections\n freq = collections.Counter(text)\n # print freq\n return freq", "def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq", "def word_count(s):\n # Your code here\n\n stop_char = r\"\"\":;\",.-+=/|[]{|}()*^\\&\"\"\"\n\n # Make sure special characters arent in string\n s_clean = \"\".join([x for x in s if x not in stop_char])\n\n # Lower case and remove trailing space\n word_list = s_clean.lower().split()\n\n # use cache to hold memory\n word_count = {}\n\n for x in word_list:\n\n if x not in word_count:\n # if not there, start it at 0\n word_count[x] = 0\n\n # if seen again, increase count\n word_count[x] += 1\n\n return word_count", "def gen_words(self, doc):\n pattern = re.compile(u'[\\\\s\\\\d,.<>/?:;\\'\\\"[\\\\]{}()\\\\|~!@#$%^&*\\\\-_=+a-zA-Z,。《》、?:;“”‘’{}【】()…¥!—┄-]+')\n doc = re.sub(pattern, ' ', doc)\n suffix_indexes = index_of_sorted_suffix(doc, self.max_word_len)\n word_cands = {}\n # compute frequency and neighbors\n for suf in suffix_indexes:\n word = doc[suf[0]:suf[1]]\n if word not in word_cands:\n word_cands[word] = WordInfo(word)\n word_cands[word].update(doc[suf[0] - 1:suf[0]], doc[suf[1]:suf[1] + 1])\n # compute probability and entropy\n length = len(doc)\n for k in word_cands:\n word_cands[k].compute(length)\n word_cands[k].compute_pp(self.pos_prop)\n # compute aggregation of words whose length > 1\n values = sorted(word_cands.values(), key=lambda x: len(x.text))\n for v in values:\n if len(v.text) == 1:\n continue\n v.compute_cohesion(word_cands)\n\n return sorted(values, key=lambda v: v.freq, reverse=True)", "def word_lengths(sentence):\n\n word_count_dict = {}\n sentence = sentence.split()\n\n for word in sentence:\n length = len(word)\n if length not in word_count_dict:\n word_count_dict[length] = {word}\n else:\n set = word_count_dict[length]\n set.add(word)\n\n return word_count_dict", "def letter_freq( text ):\n\tchars = string.ascii_uppercase\n\ttext = text.upper()\n\tresult = get_letter_dict()\n\ttotal = 0\n\tfor char in chars:\n\t\tcount = text.count(char)\n\t\tresult[char] = count\n\t\ttotal += count\n\tif total != 0:\n\t\tfor char in chars:\n\t\t\tresult[char] = (result[char]*10000 / total) / float(100)\n\treturn result", "def lyrics_to_frequencies(lyrics):\n lyricsDictionary = dict()\n for each_word in lyrics:\n if each_word in lyricsDictionary:\n lyricsDictionary[each_word] += 1\n else:\n lyricsDictionary[each_word] = 1\n return lyricsDictionary", "def process_text(self, text):\n\n flags = (re.UNICODE if sys.version < '3' and type(text) is unicode # noqa: F821\n else 0)\n pattern = r\"\\w[\\w']*\" if self.min_word_length <= 1 else r\"\\w[\\w']+\"\n regexp = self.regexp if self.regexp is not None else pattern\n\n words = re.findall(regexp, text, flags)\n # remove 's\n words = [word[:-2] if word.lower().endswith(\"'s\") else word\n for word in words]\n # remove numbers\n if not self.include_numbers:\n words = [word for word in words if not word.isdigit()]\n # remove short words\n if self.min_word_length:\n words = [word for word in words if len(word) >= self.min_word_length]\n\n stopwords = set([i.lower() for i in self.stopwords])\n if self.collocations:\n word_counts = unigrams_and_bigrams(words, stopwords, self.normalize_plurals, self.collocation_threshold)\n else:\n # remove stopwords\n words = [word for word in words if word.lower() not in stopwords]\n word_counts, _ = process_tokens(words, self.normalize_plurals)\n\n return word_counts", "def find_frequency(text, n=1):\n freqs = {}\n length = len(text)\n for i in xrange(0, length):\n upper = i+n\n if upper > length:\n break\n gram = text[i:upper]\n dict_operate(freqs, gram, 1, operator.add)\n return freqs", "def count_words(phrase):\n # split the input string at spaces\n phrase_split = phrase.split()\n\n # initiate empty dictionary\n word_count = {}\n\n # iterate over words in the phrase\n for word in phrase_split:\n if word in word_count:\n\n # if the word is already a key in the dictionary, increase the value by 1\n word_count[word] += 1\n\n else:\n # if the word is not a key in the dictionary, set its value to 1\n word_count[word] = 1\n\n return word_count", "def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq", "def create_keyword_score(self):\n keywords_score = {}\n for zettel in self.lemma_tokens:\n for word in zettel:\n if zettel.count(word) >= self.min_keyword_freq:\n keywords_score.setdefault(word[0], 0)\n word_list = re.split(\" \", word[0])\n score = 0\n for new_word in word_list:\n score += self.word_scores[new_word]\n keywords_score[word[0]] = score\n return keywords_score", "def post(self):\n input_text = self.get_argument('input_text', '')\n self.write(json_encode(extract_word_frequencies(input_text)))", "def get_frequencies(tokens):\n cnt = {}\n\n for word in tokens:\n if word not in cnt:\n cnt[word] = 0\n\n cnt[word] += 1\n\n return cnt", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def build_dict(fname):\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn", "def task1(sentence):\n split_sentence = sentence.split()\n dictionary = dict()\n for word in split_sentence:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n for item in dictionary:\n print(\"Word \" + item + \" used \" + str(dictionary[item]) + \" times\")\n return dictionary", "def frequency(lst):\n\n count = dict()\n for word in lst:\n if word in count:\n count[word] += 1\n else:\n count[word] = 1\n return count", "def _create_word_count_dict(self):\n word_counts = dict()\n for wc in self.word_counts.all():\n word_counts[wc.word.name] = wc.count\n return word_counts" ]
[ "0.74459994", "0.74220663", "0.7391444", "0.7324509", "0.7309391", "0.7289624", "0.7283719", "0.7226567", "0.722053", "0.7219711", "0.72076803", "0.7172384", "0.7164771", "0.7158287", "0.7152727", "0.7031354", "0.6992306", "0.69521797", "0.691885", "0.6871911", "0.68676853", "0.6859454", "0.68534315", "0.6745747", "0.6740641", "0.6725147", "0.66993594", "0.66962415", "0.6693471", "0.6675164", "0.6669777", "0.6659404", "0.6654962", "0.664969", "0.66479856", "0.66455925", "0.65943843", "0.6592497", "0.6578193", "0.6567506", "0.6535927", "0.65332997", "0.65188456", "0.6517215", "0.6507352", "0.650685", "0.6497039", "0.64894134", "0.64856184", "0.6474933", "0.6474854", "0.6474608", "0.64569646", "0.64547837", "0.6446805", "0.64458996", "0.644528", "0.6442004", "0.6427516", "0.6427329", "0.6425747", "0.6425517", "0.6423607", "0.6408584", "0.64008045", "0.6397894", "0.63925827", "0.63925314", "0.6383784", "0.6380176", "0.6374139", "0.6371749", "0.63703465", "0.63679266", "0.63657326", "0.6363823", "0.63474035", "0.6346347", "0.6343593", "0.63385713", "0.63382727", "0.63319343", "0.6329965", "0.63283527", "0.63265723", "0.63218755", "0.6313483", "0.6310146", "0.6307117", "0.6305917", "0.6298263", "0.6289032", "0.62831306", "0.62629735", "0.6258844", "0.62578106", "0.6252437", "0.62484926", "0.6243618", "0.6232469" ]
0.6263762
93
takes clean str from self.text and creats dict of stem freq.
def makeStems(self): clean_s = self.cleanString(self.text) LoW = clean_s.split() for x in LoW: if create_stem(x) not in self.stems: self.stems[create_stem(x)] = 1 else: self.stems[create_stem(x)] += 1 return self.stems
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_text(self, text, wordcount_dictionary=None):\n if not wordcount_dictionary:\n wordcount_dictionary = {}\n words = self.parse_regexp.findall(text)\n for word in words:\n new_word = stem(word.lower())\n if new_word not in self.stopwords:\n if new_word in wordcount_dictionary:\n wordcount_dictionary[new_word] += 1\n else:\n wordcount_dictionary[new_word] = 1\n return wordcount_dictionary", "def word_frequency_table(self, text_string):\r\n stopWords = set(stopwords.words(\"english\"))\r\n words = word_tokenize(text_string)\r\n ps = PorterStemmer()\r\n\r\n freqTable = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n if word in stopWords:\r\n continue\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n\r\n return freqTable", "def stemming(self,sentence):", "def make_frequency_dict(self, text):\n\t\t\tfrequency = {}\n\t\t\t#tomamos los numeros como caracteres entonces el diccionario solo tendra un rango (0,9) las ',' y '\\n'\n\t\t\tfor character in text:#O(len(row)*columns) \n\t\t\t\tif not character in frequency:#como frequency es un diccionario es de O(1)\n\t\t\t\t\tfrequency[character] = 0\n\t\t\t\tfrequency[character] += 1\n\t\t\t\n\t\t\treturn frequency", "def create_freq_dict(corpus, doc_info):\n for idx, content in enumerate(corpus):\n word_freq_table = {}\n splitted_sentence = content.split()\n for word in splitted_sentence:\n word = word.lower()\n if word not in word_freq_table:\n word_freq_table[word] = 1\n else:\n word_freq_table[word] += 1\n doc_info[idx]['freq_dict'] = word_freq_table", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def do(text):\n return freeling_stemming(text)", "def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat", "def clean(doc, word_count={}):\n doc = doc.lower()\n tokens = wt(doc)\n\n filterWord = []\n for w in tokens:\n if w not in dots and w not in stopWord:\n if w in slangs:\n w = slangs[w]\n filterWord.append(w)\n\n sents = \" \".join(filterWord)\n filterWord = re.findall('\\w+', sents)\n\n ps = PorterStemmer()\n\n for w in filterWord:\n fword = ps.stem(w)\n\n word_count[fword] = word_count.get(fword, 1.0)\n word_count[fword] += 1\n\n return word_count", "def wordFreq(parseThis):\n \n freq = {}\n nono = ('\"', \"'\", '%', '$', '!', '.', '?', '-', ','\n , '\\n', '\\t', '\\r', ':', ';')\n\n for c in nono:\n parseThis = parseThis.replace(c, \" \")\n \n words = parseThis.split()\n \n for word in words:\n temp = word.lower()\n freq[temp] = freq.get(temp, 0) + 1\n\n return freq", "def make_freq_dict(text):\n freq_dict = {}\n for i in text:\n if i not in freq_dict:\n freq_dict[i] = 1\n else:\n freq_dict[i] += 1\n return freq_dict", "def word_frequencies(word_list: TextIO) -> dict:\n words = word_list.read().split(' ')\n amount_of_words = len(set(words))\n frequencies = {}\n for index, word in enumerate(words):\n clean_word = remove_punctuation(word)\n if clean_word not in frequencies:\n frequencies[clean_word] = (index + 1) / amount_of_words\n del frequencies[\"\"]\n return frequencies", "def stem_text(text):\r\n return ps.stem(unidecode.unidecode(text.translate(TRANSLATION_TABLE)))", "def word_frequency(a_string):\n\n for char in \"\"\".$#,:\"'?!)(\"\"\":\n a_string = a_string.replace(char, \"\")\n for char in \"\"\"-\"\"\":\n a_string = a_string.replace(char, \" \")\n\n cleanstring = a_string.lower()\n a_list = cleanstring.split()\n a_dict = {}\n for item in a_list:\n if item in a_dict:\n a_dict[item]+= 1\n else:\n a_dict[item] = 1\n return a_dict", "def calculate_fdist(text, stem=False):\n list_of_words = remove_and_stemming(text, stem)\n fdist_all = FreqDist(list_of_words)\n return fdist_all", "def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END", "def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq", "def build_frequency_dict(text: bytes) -> Dict[int, int]:\n freq_dic = {}\n for elem in text:\n if elem not in freq_dic:\n freq_dic[elem] = 1\n else:\n freq_dic[elem] += 1\n return freq_dic", "def frequency(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n list_of_words = []\n for i in root.iter(root_tag + 'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n for word in i.text.split():\n alphanumeric_filter = filter(str.isalnum, word)\n alphanumeric_string = \"\".join(alphanumeric_filter)\n list_of_words.append(alphanumeric_string)\n dict_of_frequency = collections.Counter(list_of_words)\n return dict_of_frequency", "def frequency(w: str) -> float:\n return frequency_list.get(remove_punctuation(w), 0)", "def analyze_words(self):\n\t\t\n\t\tword_analysis = {}\n\t\tfor word in self.word_list:\n\t\t\tif word not in word_analysis:\n\t\t\t\tacademic = (word in LEMMA_DICT)\n\t\t\t\tlength = len(word)\n\t\t\t\tfrequency = len(self.word_list[word])\n\t\t\t\tstem = word\t\n\t\t\t\tword_location_index = len(self.sentence_index)-1 #first set it as the last index\n\t\t\t\t\n\t\t\t\tfor index, sentence in self.sentence_index.items():\n\t\t\t\t\tif word in sentence.split():#need to be individual words, not parts of a word\n\t\t\t\t\t\tword_location_index = index \n\t\t\t\t\t\tbreak\n\t\t\t\t\tif self.word_list[word][0] in sentence.split():#accounts for words with upper cases\n\t\t\t\t\t\tword_location_index = index\n\t\t\t\t\t\n\t\t\t\t#selection critera\n\t\t\t\tif academic:\n\t\t\t\t\tselection_criteria = 'academic word'\n\t\t\t\telif frequency > 1: \n\t\t\t\t\tselection_criteria = 'high frequency'\n\t\t\t\telse:\n\t\t\t\t\tselection_criteria = 'word length'\n\n\t\t\t\tword_analysis[word] = (academic, length, frequency, stem, word_location_index, selection_criteria)\n\t\t\n\t\tself.word_analysis = word_analysis\n\t\t\n\t\treturn self.word_analysis", "def text2wordfreq(string, lowercase=False):\r\n\r\n\r\n from collections import Counter\r\n lst = Counter(tokenize(string, lowercase)).most_common()\r\n\r\n dictLst = dict(lst)\r\n\r\n return dictLst", "def getFrequencies(tweets):\n total_words = 0\n word_freq = {}\n for tweet in tweets:\n twext = tweet['clean_text']\n for word in twext.split(' '):\n word = word.strip()\n if word:\n total_words += 1\n if word not in word_freq:\n word_freq[word] = float(1)\n else:\n word_freq[word] += 1\n for key in word_freq:\n word_freq[key] = word_freq[key]/total_words\n return word_freq", "def word_frequency_dict(tokens):\n\n\tfdist = FreqDist(tokens) \t\t\t\t\t\t# fdist.keys() fdist.values()\n\treturn dict(fdist)", "def add_string(self, s):\n\n #sent_lengths\n sent_len = sentence_length(s)\n for sentences in sent_len:\n if sentences not in self.sentence_lengths:\n self.sentence_lengths[sentences] = 1\n elif sentences in self.sentence_lengths:\n self.sentence_lengths[sentences] += 1\n \n s = clean_text(s)\n word_list = s.split(' ')\n\n for w in word_list:\n self.numwords += 1\n # frequency of words\n if w not in self.words:\n self.words[w] = 1\n elif w in self.words:\n self.words[w] += 1\n # freqency of length of words\n if len(w) not in self.word_lengths:\n self.word_lengths[len(w)] = 1\n elif len(w) in self.word_lengths:\n self.word_lengths[len(w)] += 1\n #word stemming\n word_stem = stem(w)\n if word_stem not in self.stems:\n self.stems[word_stem] = 1\n elif word_stem in self.stems:\n self.stems[word_stem] += 1\n\n # ten most common words\n a = list(self.words)\n maximum_count = self.words[a[0]] \n for word in a:\n if self.words[word] > maximum_count:\n maximum_count = self.words[word]\n count = 1\n cw_list = []\n while count <= maximum_count:\n for word in a:\n if self.words[word] == count:\n cw_list = [word] + cw_list\n count += 1\n\n self.common_word = cw_list[:10]\n \n #simplify stemlist\n a = list(self.stems)\n for x in range(len(a)):\n for y in a[x+1:]:\n if y[:4] == a[x][:4]:\n self.stems[a[x]] += self.stems[y]\n del self.stems[y]\n a.remove(y)", "def find_frequency(text, n=1):\n freqs = {}\n length = len(text)\n for i in xrange(0, length):\n upper = i+n\n if upper > length:\n break\n gram = text[i:upper]\n dict_operate(freqs, gram, 1, operator.add)\n return freqs", "def fit(self, text):\n\n if self.lowercase:\n text = text.lower()\n\n print(\"Tokenize sentences...\")\n tokens = word_tokenize(text)\n\n self.words_set_size = len(set(tokens))\n\n print(\"Collecting of ngram counters...\")\n\n self.unigram_counts = Counter(tokens)\n self.bigram_counts = Counter(bigrams(tokens))\n\n return self", "def _count_word_frequency(self, data):\n _dict = {}\n for _docs in data:\n for _word in _docs:\n if _word in _dict:\n _dict[_word] += 1\n else:\n _dict[_word] = 1\n return _dict", "def frequencyAnalysis(article, dictionary):\r\n string = article #Sets the string to the article\r\n string = re.sub(\"[^a-zA-Z0-9’\\s]+\",'', string) #Takes the articles and removes all characters appart from apostrophes, spaces, digits, and leters\r\n string = re.sub(\"’\", \"'\", string) #Replaces ’ with '\r\n string = string.lower() #Ensures that all the charcters are lower case\r\n stringList = string.split() #Takes the article and turns it into a list\r\n \r\n print(\"\\nConverted article to list\\n\")\r\n \r\n print(\"Starting frequency analysis\\n\")\r\n\r\n #Started the frequency anaylsis\r\n for word in stringList:\r\n if \"'s\" in word: #Done to remove extra keys in the dictionary, removes the possessive such that it counts \"Trump\" and \"Trump's\" as one word\r\n word = word[0:-2]\r\n elif \"s'\" in word:\r\n word = word[0:-1]\r\n if word != \"advertisement\":\r\n if word in dictionary:\r\n dictionary[word] +=1 #If it finds the word in the dictionary, the frequency has to increase by one\r\n else:\r\n dictionary[word] = 1 #If it finds a new word, it needs to add the word so the frequency is one\r", "def apply_stemming(document):\n return [BasicNL.stemmer.stem(x) for x in document]", "def stem(self, df):\r\n word_dict = {}\r\n\r\n # document adalah nama lain dari kumpulan sentence (1 sentence juga == 1 document)\r\n # Loop sentence/ document dari dataframe kolom tweet/ kolom teks\r\n for document in df:\r\n # Loop kalimat menjadi sebuah kata\r\n for word in document: \r\n # Cek apabila kata tersebut tidak masuk dalam variabel dict word_dict\r\n if word not in word_dict: \r\n # If true: kata tersebut akan dijadikan keys dan diisi value kosong (nantinya value kosong akan direplae dengan hasil stemming)\r\n word_dict[word] = ' ' \r\n\r\n # Loop word dict hasil loop sebelumnya\r\n for word in word_dict: \r\n # Setiap kata yang tersimpan dalam word_dict akan di stemming\r\n word_dict[word] = stemmer.stem(word) \r\n\r\n # variabel x = list of words dari dataframe\r\n df = df.apply(lambda x: [word_dict[word] for word in x]) \r\n return df", "def process_text(text, stem=True):\n table = str.maketrans(\"\",\"\",string.punctuation) \n text = text.translate(table)\n tokens = word_tokenize(text)\n \n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def __parse_corpus(self, corpus):\n corpus = self.__handle_corpus_unkwon_words(corpus)\n start_token = ' '.join([NGramModel.START_SENTENCE_TOKEN]*(self.__n-1))\n word_list = corpus.replace(NGramModel.START_SENTENCE_TOKEN, start_token).split()\n \n for n in range(1, self.__n+1): \n self.__ngram_counts[n] = {}\n for ngram, count in Counter(self.__generate_n_grams(word_list, n)).items():\n self.__ngram_counts[n][' '.join(ngram)] = count", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def words(phrase):\n\twordlist = phrase.split()\n\tunique_wordlist = []\n\tword_freq = []\n\n \n\twhile wordlist:\n\t\tword_freq.append(wordlist.count(wordlist[0])) #count the instances of a word and add it to the frequencies list\n\t\tunique_wordlist.append(wordlist[0]) #add the word into a unique words list\n\t\twordlist = list(filter((wordlist[0]).__ne__, wordlist)) #remove all other similar words from the wordlist\n\n\n\tn = len(word_freq)\n\toutput = {}\n\n\tfor i in range(n):\n\t\tif unique_wordlist[i].isdigit(): #convert sting digits into int\n\t\t\tunique_wordlist[i] = int(unique_wordlist[i])\n\t\toutput[unique_wordlist[i]] = word_freq[i] #add the unique words with their corresponding frequencies into the output dict\n\t\n\treturn output", "def process_text(text, stem=True):\n table = string.maketrans(\"\",\"\")\n text = text.translate(table, string.punctuation)\n tokens = word_tokenize(text)\n \n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def generate_frequency_map(data: str) -> Dict[str, int]:\n seperators: Final[List[str]] = [\n ',', '.', '\\n', ' ', '\\t', '?', '<', '>', '!', ':', ';'\n ]\n tokens: List[str] = tokenize(data, seperators)\n\n frequency_map: Dict[str, int] = {}\n for token in tokens:\n if token in frequency_map.keys():\n frequency_map[token] += 1\n else:\n frequency_map[token] = 1\n return frequency_map", "def stemmed_words(doc):\n doc = process_text(doc)\n stemmer = PorterStemmer()\n analyzer = CountVectorizer(decode_error='ignore').build_analyzer()\n return (stemmer.stem(w) for w in analyzer(doc))", "def process_text(text, stemmer=SnowballStemmer(\"english\"), min_length=3):\n text = text.lower()\n text = re.sub('dictated.*', '', text, flags=re.MULTILINE|re.DOTALL)\n text = re.sub('.*:\\s+', '', text)\n text = re.sub('\\n', ' ', text)\n text = re.sub('\\[.*?\\]', '', text)\n text = re.sub('\\s\\s+', ' ', text)\n text = re.sub('[,.]', '', text)\n text = re.sub('[/-]', ' ', text)\n tokens = word_tokenize(text)\n return \" \".join([stemmer.stem(t) for t in tokens if t not in stop_words\n and len(t) >= min_length])", "def create_word_score(self):\n word_freq = {}\n word_deg = {}\n for zettel in self.lemma_tokens:\n for word in zettel:\n word_list = re.split(\" \", word[0])\n word_list_deg = len(word_list) - 1\n for new_word in word_list:\n word_freq.setdefault(new_word, 0)\n word_freq[new_word] = word_freq[new_word] + 1\n word_deg.setdefault(new_word, 0)\n word_deg[new_word] = word_deg[new_word] + word_list_deg\n word_score = {}\n for word in word_freq:\n word_deg[word] = word_deg[word] + word_freq[word]\n word_score.setdefault(word, 0)\n word_score[word] = word_deg[word] / (word_freq[word] * 1.0)\n return word_score", "def stem_text(self):\n\n stemmer = None\n\n if self.lang == 'ru':\n stemmer = RussianStemmer()\n\n elif self.lang == 'en':\n stemmer = EnglishStemmer()\n\n for i in range(len(self.__corpora)):\n words = self.__corpora[i].split()\n\n if self.lang == 'uk':\n self.__corpora[i] = ' '.join([UkrainianStemmer(word).stem_word() for word in words])\n\n else:\n self.__corpora[i] = ' '.join([stemmer.stem(word) for word in words])", "def __init__(self, text=None, bare=False, stem='gap', pos=False, roman = False, stopwords=False, punct=False, conjunction=False, article=False, demonstrative=False, preposition=False, question=False, pronoun=False, quantifier=False, date=False, number=False, ssn=False, telephone=False, name=False, address=False, sentiment=False, gender=False, age = False, dob=False, unit=False, standard=False, metric=False, spell=None ):\n self._text = text # raw text\n self._words = None # list of words\n self._punct = punct # keep/remove punctuation\n self._stemming = stem # on/off stemming\n self._pos = pos # on/off parts of speech\n self._roman = roman # on/off romanization \n self._porter = stopwords # keep/remove stopwords\n self._bare = bare # on/off bare tokenizing\n self._standard = standard # convert metric to standard units\n self._metric = metric # convert standard to metric units\n self._spell = None # spell checking\n self._bow = None # bag of words\n self._freq = None # word count frequency\n self._tf = None # term frequency\n \n # More than just bare tokenizing\n if self._bare == False:\n self._spell = spell # do (not) spell checking\n \n # Keep Stopwords\n if stopwords is True:\n self._quantifier = True # keep words indicating a size\n self._preposition = True # keep prepositions\n self._article = True # keep articles\n self._conjunction = True # keep conjunctions\n self._demonstrative = True # keep demonstratives\n self._question = True # keep question words\n self._pronoun = True # keep pronouns \n self._sentiment = True # keep sentiment words\n self._number = True # keep numbers \n self._date = True # keep date\n self._ssn = True # keep social security number\n self._telephone = True # keep telephone numbers\n self._address = True # keep street addresses\n self._name = True # keep proper names\n self._gender = True # keep gender words\n self._age = True # keep age\n self._dob = True # keep date of birth words\n self._unit = True # keep unit of measurement\n # Remove Stopwords\n else:\n self._quantifier = quantifier # keep/remove words indicating a size\n self._preposition = preposition # keep/remove prepositions\n self._article = article # keep/remove articles\n self._conjunction = conjunction # keep/remove conjunctions\n self._demonstrative = demonstrative # keep/remove demonstratives\n self._question = question # keep/remove question words\n self._pronoun = pronoun # keep/remove pronouns\n self._sentiment = sentiment # keep/remove sentiment words\n self._number = number # keep/remove numbers\n self._date = date # keep/remove date\n self._ssn = ssn # keep/remove social security number\n self._telephone = telephone # keep/remove telephone numbers\n self._address = address # keep/remove street addresses\n self._name = name # keep/remove proper names\n self._gender = gender # keep/remove gender words\n self._age = age # keep/remove age\n self._dob = dob # keep/remove date of birth words\n self._unit = unit # keep/remove unit of measurement words\n \n if isinstance(stopwords, bool) is False:\n raise TypeError(\"Stopwords must be a boolean\")\n if isinstance(bare, bool) is False:\n raise TypeError(\"Bare must be a boolean\")\n if isinstance(quantifier, bool) is False:\n raise TypeError(\"Quantifier must be a boolean\")\n if isinstance(preposition, bool) is False:\n raise TypeError(\"Preposition must be a boolean\")\n if isinstance(conjunction, bool) is False:\n raise TypeError(\"Conjunction must be a boolean\")\n if isinstance(article, bool) is False:\n raise TypeError(\"Article must be a boolean\")\n if isinstance(demonstrative, bool) is False:\n raise TypeError(\"Demonstrative must be a boolean\")\n if isinstance(question, bool) is False:\n raise TypeError(\"Question must be a boolean\")\n if isinstance(pronoun, bool) is False:\n raise TypeError(\"Pronoun must be a boolean\")\n if isinstance(number, bool) is False:\n raise TypeError(\"Number must be a boolean\")\n if isinstance(date, bool) is False:\n raise TypeError(\"Date must be a boolean\")\n if isinstance(ssn, bool) is False:\n raise TypeError(\"SSN must be a boolean\")\n if isinstance(telephone, bool) is False:\n raise TypeError(\"Telephone must be a boolean\")\n if isinstance(name, bool) is False:\n raise TypeError(\"Name must be a boolean\")\n if isinstance(address, bool) is False:\n raise TypeError(\"Address must be a boolean\")\n if isinstance(sentiment, bool) is False:\n raise TypeError(\"Sentiment must be a boolean\")\n if isinstance(gender, bool) is False:\n raise TypeError(\"Gender must be a boolean\")\n if isinstance(dob, bool) is False:\n raise TypeError(\"Gender must be a boolean\")\n if isinstance(age, bool) is False:\n raise TypeError(\"Age must be a boolean\")\n if isinstance(punct, bool) is False:\n raise TypeError(\"Punct must be a boolean\")\n if isinstance(unit, bool) is False:\n raise TypeError(\"Unit must be a boolean\")\n if isinstance(standard, bool) is False:\n raise TypeError(\"Standard must be a boolean\")\n if isinstance(metric, bool) is False:\n raise TypeError(\"Metric must be a boolean\")\n if text is not None:\n if isinstance(text, str) is False:\n raise TypeError(\"String expected for text\")\n if spell is not None:\n if spell not in ['en', 'fr', 'es', 'it', 'de']:\n raise ValueError(\"Wrong value for spell: en, es, fr, it or de\")\n \n if text is not None:\n self._split()\n if self._bare == False:\n # preprocess the tokens\n self._preprocess()\n # word stemming\n if self._stemming == 'gap':\n self._stem()\n elif self._stemming == 'porter':\n self._nltkStemmer('porter')\n elif self._stemming == 'snowball':\n self._nltkStemmer('snowball')\n elif self._stemming == 'lancaster':\n self._nltkStemmer('lancaster')\n elif self._stemming == 'lemma':\n self._lemma()\n # remove stop words\n self._stopwords()\n # Do unit conversions\n self._conversion()\n # Do POS tagging\n if self._pos == True:\n self._partsofspeech()", "def preparation(self):\n self.word_freq = defaultdict(int)\n\n for sentence in self.corpus:\n for word in sentence:\n self.word_freq[word] += 1\n\n # self.words decide the index of all the words\n self.words = list(self.word_freq.keys())\n self.T = len(self.words)\n\n # word_index will give index for a given word and vice versa for index_word\n self.word_index = dict([[word, i] for i, word in enumerate(self.words)])\n self.index_word = dict([[i, word] for i, word in enumerate(self.words)])", "def create_freq_dict(sents, lang):\n ix = 0\n freq_dict_all = []\n stop_words = set(stopwords.words(lang))\n\n for sent in sents:\n ix += 1\n freq_dict = {}\n words = word_tokenize(sent)\n\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\n temp = {\n 'doc_id': ix,\n 'freq_dict': freq_dict\n }\n\n freq_dict_all.append(temp)\n\n return freq_dict_all", "def word_diff(self):\n \n stmr = ps()\n word_dict = {stmr.stem(word):[] for word in self.words}\n\n for word in self.words:\n stemmed = stmr.stem(word)\n st_temp = stmr.stem(word)\n while word.startswith(st_temp)==False and len(st_temp)!=0:\n splitted_str = list(st_temp)\n splitted_str.pop()\n st_temp = ''.join(splitted_str)\n\n word_dict[stemmed].append(re.sub(st_temp , '' , word))\n return word_dict", "def frequencies(corpus, index, to_lower=False):\n freq = {}\n for sentence in corpus.get_sentences():\n for word in sentence:\n key = word[index]\n if to_lower:\n key = key.lower()\n if key in freq:\n freq[key] += 1\n else:\n freq[key] = 1\n\n return freq", "def get_wordcount(text):\r\n\r\n characters = len(text)\r\n chars_no_spaces = sum([not x.isspace() for x in text])\r\n asian_chars = sum([is_asian(x) for x in text])\r\n non_asian_words = nonj_len(text)\r\n words = non_asian_words + asian_chars\r\n \r\n return dict(characters=characters,\r\n chars_no_spaces=chars_no_spaces,\r\n asian_chars=asian_chars,\r\n non_asian_words=non_asian_words,\r\n words=words)", "def _compute_frequencies( word_sent):\n\t\tfreq = defaultdict(int)\n\t\tfor s in word_sent:\n\t\t\tfor word in s:\n\t\t\t\tif word not in _stopwords:\n\t\t\t\t\tfreq[word] += 1\n\t\t\t\t# frequencies normalization and fitering\n\t\treturn freq", "def parse_sentence(self, text):\n\n if text is None:\n return []\n text_tokens = word_tokenize(text)\n text_tokens_without_stopwords = []\n # text_lower_tokens_without_stopwords = [w.lower() for w in text_tokens if w not in self.stop_words]\n\n # remove stopwords\n for w in text_tokens:\n if w.lower() not in self.stop_words_dict:\n text_tokens_without_stopwords.append(w)\n\n # parsing\n doc_length = len(text_tokens_without_stopwords)\n num_dict = {\"thousand\": \"K\", \"million\": \"M\", \"billion\": \"B\", \"dollar\": \"$\", \"dollars\": \"$\", \"bucks\":\"$\", \"percent\": \"%\",\n \"$\": \"$\", \"%\": \"%\",\n \"percentage\": \"%\"}\n\n similar_words_dict = {\"corona\":\"covid\", \"covid19\":\"covid\", \"coronavirus\":\"covid\", \"covid-19\":\"covid\", \"covid\": \"covid\",\"#covid\": \"covid\", \"#covid19\": \"covid\"}\n\n new_tokenized_text = []\n i = -1\n # for i in range(doc_length):\n while i < doc_length - 1:\n # please note: when we do i += 1 it is because next_term(old_token[i + 1]) is used already so we skip over it next iteration\n # so we dont go over it twice\n\n i += 1\n term = text_tokens_without_stopwords[i]\n\n term = term.encode(\"ascii\", \"ignore\").decode() # remove ascii\n if term.lower() in similar_words_dict:\n new_tokenized_text.append(similar_words_dict[term.lower()])\n continue\n next_term = None\n if term.startswith(\"//t\") or (term.isalpha() and len(term) == 1): # remove short urls and terms that are single letters\n continue\n if term.__contains__(\"-\"):\n new_tokenized_text.extend(term.split(\"-\"))\n if i + 1 < doc_length:\n next_term = text_tokens_without_stopwords[i + 1]\n if term is \"@\" and next_term is not None:\n i += 2 # removing @ and name\n continue\n if term is \"#\" and next_term is not None:\n new_tokenized_text.extend(self.handle_hashtag(next_term))\n i += 1\n elif term is \"$\" and next_term is not None and str.isdigit(\n next_term.replace(\",\", \"\")): # $100 thousand / $75 --> 100K$ / 75$\n num = self.handle_numbers(next_term)\n if i + 2 < doc_length and text_tokens_without_stopwords[i + 2] in num_dict:\n num = num + num_dict[text_tokens_without_stopwords[i + 2]]\n i += 1\n new_tokenized_text.append(num + \"$\")\n i += 1\n elif str.isdigit(term.replace(\",\", \"\")): # if term is a number\n # deal with decimal number like 10.1234567 -> 10.123\n num = self.handle_numbers(term)\n if next_term is not None and next_term.lower() in num_dict:\n new_tokenized_text.append(num + num_dict[next_term.lower()])\n i += 1\n else:\n new_tokenized_text.append(num)\n elif not term.isidentifier(): # identifier: (a-z) and (0-9), or underscores (_)\n emojis_removed = self.remove_emojis(term)\n if emojis_removed is not \"\":\n new_tokenized_text.append(emojis_removed)\n else:\n new_tokenized_text.append(term.lower())\n\n return new_tokenized_text", "def add_string(self, s):\r\n space = 0\r\n count = 0\r\n word_list = clean_text(s)\r\n for w in word_list:\r\n # Update self.words to reflect w\r\n if w not in self.words:\r\n self.words[w] = 0\r\n self.words[w] += 1\r\n #self.word_lengths\r\n for w in word_list:\r\n if len(w) not in self.word_lengths:\r\n self.word_lengths[len(w)] = 0\r\n self.word_lengths[len(w)] += 1\r\n #self.stem\r\n for w in word_list:\r\n if stem(w) not in self.stems:\r\n self.stems[stem(w)] = 0 \r\n self.stems[stem(w)] += 1 \r\n #self.sentence_lengths\r\n for w in s:\r\n if w == ' ':\r\n space += 1\r\n if w in '.?!' and count == 0:\r\n if space not in self.sentence_lengths:\r\n self.sentence_lengths[space+1] = 1\r\n space = 0\r\n count += 1 \r\n elif w in '.?!' and count > 0:\r\n if space not in self.sentence_lengths:\r\n self.sentence_lengths[space] = 0\r\n self.sentence_lengths[space] += 1\r\n space = 0\r\n #self.punctuation\r\n for w in s:\r\n if w == '?':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == \"...\":\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == \".\":\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == \"!\":\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == \"-\":\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '/':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == ';':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '[':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '\\\"':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '(':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '—':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == ':':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '\\'':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1", "def stem(s):\n special = {'appall', 'kill', 'stroll', 'kiss', 'thrill', 'chugg', 'dress', 'err', 'express', 'fall', 'free', 'gall', 'add','cross', 'impress', 'inn', 'call', 'ball', 'bill', 'buzz'} \n ie_words = {'vying', 'lying', 'dying', 'tying'}\n short_ing = {'bring','sling','sping', 'bring', 'sing', 'ring', 'king', 'cling' ,'fling', 'wing', 'ding', 'ping', 'ting'}\n c_k_words = {'kick', 'muck', 'lock','pick', 'back', 'mock', 'peck', 'lock', 'nick'}\n\n if len(s) <= 3:\n return s\n if s[-3:] == 'ing' or s[-4:] == 'ings': \n if s in short_ing:\n return s\n elif s in special:\n return s[:-3]\n elif s[:-3] not in special and s[-4] == s[-5]:\n return s[:-4]\n elif s[:-3] not in c_k_words and s[-4] == 'k':\n return s[:-4]\n elif s == 'everything' or s == 'anything' or s == 'something':\n return s[:-5]\n elif s in ie_words:\n return s[0] + 'ie'\n else:\n return s[:-3]\n elif s[-3:] == 'ers':\n return s[:-3]\n elif s[-2:] == 'es':\n return s[:-2]\n elif s[-2:] == 'en':\n return s[:-2]\n elif s[-2:] == 'er':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2] \n elif s[-2:] == 'ed':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2]\n elif s[-3:] == 'ies':\n return s[:-2]\n elif s[-1:] == 's':\n return s[:-1]\n elif s[-1:] == 'e' and s not in ie_words:\n return s[:-1]\n elif s[-3:] == 'ful':\n return s[:-3]\n elif s[:2] == 'de':\n return s[2:]\n elif len(s) > 4 and s[-4:] == 'able' or s[-4] == 'ible':\n return s[:-4]\n elif s[:2] == 'in' or s[:2] == 'il' or s[:2] == 'ir':\n return s[2:]\n elif s[-1:] == 'y':\n return s[:-1] + 'i'\n else:\n return s", "def find_types_of_sents_in_text(text):\r\n return dict(Counter(map(lambda x: x[-1], nltk.sent_tokenize(text))))", "def word_count(self, document):\n start_time = time.time()\n dictionary = dict()\n counter = defaultdict(int)\n for line in document.splitlines():\n for word in line.split():\n if word not in PUNCTUATION_MARK:\n counter[word] += 1\n for word, cnt in sorted(counter.items(), key=lambda x: (-x[1], x[0])):\n dictionary[word] = cnt\n self.log.info(\"Duration count dictionary: {duration}\".format(duration=float(time.time() - start_time)))\n return dictionary", "def stem(self) -> str:", "def stemming(self, text):\n porter = PorterStemmer()\n words = text.split()\n stemmed_words = [porter.stem(word) for word in words]\n return \" \".join(stemmed_words)", "def computeWordFrequencies(self, tokens: ['token'], frequencies: {'token': int}):\n # project2: update this method to take existing dict as parameter and modify it\n # additionally, stopwords are not inserted in the dict;\n # words shorter than 3 character or contains all digits are ignored\n for token in tokens:\n # if the key is not in dict, dict.setdefault method initiates the value at 0\n # if token not in stopwords and len(token) >= 3 and not token.isdigit():\n frequencies[token] = frequencies.setdefault(token, 0) + 1", "def __init__(self):\n\n\t\tself.b = \"\" # buffer for word to be stemmed\n\t\tself.k = 0\n\t\tself.k0 = 0\n\t\tself.j = 0\t # j is a general offset into the string", "def process_text(text, stem=True):\n text = text.translate(str.maketrans('', '', string.punctuation))\n tokens = word_tokenize(text)\n \n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def stem_text(text):\n stmr = PorterStemmer() ## LancasterStemmer()\n return ' '.join([stmr.stem(i) for i in text.split()])", "def calc_weighted_frequency(words,ps,lem,stopWords,text_string):\r\n \r\n\r\n word_frequencies = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n word = lem.lemmatize(word)\r\n print(word)\r\n if word not in stopWords:\r\n if word not in word_frequencies:\r\n word_frequencies[word] = 1\r\n else:\r\n word_frequencies[word] += 1\r\n \r\n maximum_frequncy = max(word_frequencies.values())\r\n for word in word_frequencies.keys():\r\n word_frequencies[word] = (word_frequencies[word]/maximum_frequncy) \r\n print(word_frequencies)\r\n return word_frequencies", "def __init__(self, input_string):\n self.words_to_counts = {}\n self.split_and_populate_words(input_string)", "def get_word_freq_dict(df_col):\n results = Counter()\n df_col.str.lower().str.split().apply(results.update)\n results = sorted(results.items(), key=lambda item: item[1], reverse=True)\n d = {}\n for word, freq in results:\n d[word] = freq\n return d", "def stemWord(self,word):\n if(\"stem\" in self._classes):\n return self._stem.stemmingWord(word)", "def term_frequency(ngrams,lang):\n token_dictionary = {}\n for ng in ngrams:\n try:\n token_dictionary[ng] = token_dictionary[ng] + 1\n except KeyError:\n token_dictionary[ng] = 1\n return token_dictionary", "def letter_freq( text ):\n\tchars = string.ascii_uppercase\n\ttext = text.upper()\n\tresult = get_letter_dict()\n\ttotal = 0\n\tfor char in chars:\n\t\tcount = text.count(char)\n\t\tresult[char] = count\n\t\ttotal += count\n\tif total != 0:\n\t\tfor char in chars:\n\t\t\tresult[char] = (result[char]*10000 / total) / float(100)\n\treturn result", "def process_text(self, text):\n\n flags = (re.UNICODE if sys.version < '3' and type(text) is unicode # noqa: F821\n else 0)\n pattern = r\"\\w[\\w']*\" if self.min_word_length <= 1 else r\"\\w[\\w']+\"\n regexp = self.regexp if self.regexp is not None else pattern\n\n words = re.findall(regexp, text, flags)\n # remove 's\n words = [word[:-2] if word.lower().endswith(\"'s\") else word\n for word in words]\n # remove numbers\n if not self.include_numbers:\n words = [word for word in words if not word.isdigit()]\n # remove short words\n if self.min_word_length:\n words = [word for word in words if len(word) >= self.min_word_length]\n\n stopwords = set([i.lower() for i in self.stopwords])\n if self.collocations:\n word_counts = unigrams_and_bigrams(words, stopwords, self.normalize_plurals, self.collocation_threshold)\n else:\n # remove stopwords\n words = [word for word in words if word.lower() not in stopwords]\n word_counts, _ = process_tokens(words, self.normalize_plurals)\n\n return word_counts", "def words(text):\n clean = TextBlob(clean(text))\n sentence_count = len(clean.sentences)\n words = clean.tokenize()\n word_count = len(words)\n avg_len = np.mean([len(word) for word in words])\n words_dict = {'sentence_count': sentence_count, 'word_count': word_count,\n 'avg_len': avg_len}\n return words_dict", "def stem_words(self, words):\n return self.stemmer.stemWords(words)", "def extract_terms(document):\n tokens = word_tokenize(document)\n stemmer = PorterStemmer()\n terms = {} # Dictionary {term: appearances}\n word_count = 0 # To return total (meaningful) word count\n for token in tokens:\n token = token.lower() # Lowercase\n token = token.strip(string.punctuation) # Remove punctuation\n if token and token not in stopwords.words(\"english\"): # Remove stopwords\n token = stemmer.stem(token) # Using Porter Stemmer\n if token not in terms:\n terms[token] = 1\n else:\n terms[token] += 1\n word_count += 1\n return terms, word_count", "def preprocess(text, freq=5):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n\n words_raw = text.strip().lower().split()\n word_counts = Counter(words_raw)\n words = [w for w in words_raw if word_counts[w] > freq]\n vocab = set(words)\n vocab2index = {w: idx for idx, w in enumerate(vocab)}\n index2vocab = {idx: w for idx, w in enumerate(vocab)}\n words_int = [vocab2index[w] for w in words]\n return words_int, vocab2index, index2vocab", "def letter_freq(txt):\n frequencies = {}\n txt_lower = txt.lower()\n\n for i in txt_lower:\n keys = frequencies.keys()\n if i in keys:\n frequencies[i] += 1\n else:\n frequencies[i] = 1\n return frequencies", "def initialize_terms_and_postings():\n global dictionary, postings\n for id in document_filenames:\n document = getDocumentContent(document_filenames[id])\n if(document_filenames[id].rfind(\".pdf\") == len(document_filenames[id]) - 4):\n terms = tokenize(document.encode('utf-8'))\n if(document_filenames[id].rfind(\".txt\") == len(document_filenames[id]) - 4):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".docx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".pptx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n unique_terms = set(terms)\n dictionary = dictionary.union(unique_terms)\n for term in unique_terms:\n postings[term][id] = terms.count(term) # the value is the\n # frequency of the\n # term in the\n # document", "def tokenize_and_stem(doc):\n punctuation_remover = dict((ord(char), None) for char in string.punctuation)\n tokens = nltk.word_tokenize(doc.lower().translate(punctuation_remover))\n return PlagiarismDetector.stem_tokens(tokens)", "def make_word_trie(text):\n trie = Trie()\n\n sentences = tokenize_sentences(text)\n\n for sentence in sentences:\n words = sentence.split()\n add_freq_trie(words, trie)\n\n return trie", "def _getFrequency(s, line, text):\n if len(s) > 0 and s[0].isalpha(): \n if s.lower() in _frequency:\n return _frequency[s.lower()]\n else:\n raise ValueError(\"invalid note name/frequency '%s' on line %d: %s\" % (s, line, text))\n else:\n return int(float(s))", "def verb_stem(s):\n \n #If the stem is have, its 3s form is has.\n if s == \"has\" :\n return \"have\"\n\n #If the stem ends in y preceded by a vowel, simply add s (pays, buys).\n elif re.match(r\"[A-z]+[aeiou][y]s\\b\", s):\n str = s[:-1]\n\n #If the stem ends in y preceded by a non-vowel and contains at least three letters, change the y to ies (flies, tries, unifies).\n elif re.match(r\"[A-z]+[^aeiou]ies\\b\", s):\n str = s[:-3] + 'y'\n\n #If the stem is of the form Xie where X is a single letter other than a vowel, simply add s (dies, lies, ties note that this doesnt account for unties).\n elif re.match(r\"[^aeiou]ies\\b\", s):\n str = s[:-1]\n\n #If the stem ends in o,x,ch,sh,ss or zz, add es (goes, boxes, attaches, washes, dresses, fizzes).\n elif re.match(r\"[A-z]+([ox]|[cs]h|[s]s|[z]z)es\\b\", s): \n str = s[:-2]\n\n #If the stem ends in se or ze but not in sse or zze, add s (loses, dazes, lapses, analyses).\n elif re.match(r\"[A-z]+([s][^s][e]|[z][^z][e])s\\b\", s):\n str = s[:-1]\n\n #If the stem ends in e not preceded by i,o,s,x,z,ch,sh, just add s (likes, hates, bathes).\n elif re.match(r\"[A-z]+([^iosxz]|[^ch]|[^sh])es\\b\", s):\n str = s[:-1]\n \n #If the stem ends in anything except s,x,y,z,ch,sh or a vowel, add s (eats, tells, shows)\n elif re.match(r\"[A-z]+([^sxyzaeiou]|[^cs]h)s\\b\", s):\n str = s[:-1]\n\n else: \n str = \"\"\n\n\n matches = [(w, t) for (w, t) in vb_list if (w == s or w == str)]\n\n tag_s = [(w, t) for (w, t) in matches if w == s and t == 'VBZ']\n\n if tag_s == True:\n return str\n else:\n tag_str = [t for (w, t) in matches if w == str and t == 'VB']\n\n if not (tag_s or tag_str):\n str = \"\"\n\n return str", "def make_freq_dict(word_list):\n\n\tfreq_dict = {}\n\n\tfor word in word_list: #need to slice each tale into a list of words for this to work\n\t\tif word in freq_dict:\n\t\t\tcurrent_val = freq_dict.get(word)\n\t\t\tval = current_val + 1\n\t\t\tfreq_dict[word] = val #made a dictionary of the string (word, frequnecy)\n\t\telse: #if it isn't in the dictionary\n\t\t\tfreq_dict[word] = 1\n\treturn freq_dict", "def stemword(w):\n if not w in stemmer_cache:\n stemmer_cache[w] = stemmer.stem(w)\n return stemmer_cache[w]", "def create_dict(text):\n #On/Off case sensitivity\n text = text.lower() \n\n #handy one liner that splits words apart via whitespace, and \n #removes punctuation. Results in list of words.\n word_list = [s.strip(string.punctuation) for s in text.split()]\n \n d = dict()\n for word in word_list:\n d[word] = d.get(word, 0) +1\n return d", "def mapWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n return token_map", "def stemming(data):\n stemmer = PorterStemmer()\n tokens = word_tokenize(str(data))\n new = \"\"\n for word in tokens:\n new = new + \" \" + stemmer.stem(word)\n return new", "def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic", "def process_text(text):\n text = text.translate(translator)\n tokens = word_tokenize(text)\n# if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def stemm_word(word):\n return stem(word)", "def word_count(self):\n\n # split words on default word boundaries for words list\n words = self.phrase.split() \n\n # translate removes punctuation only, normalizes to lower case\n normalized_words = [self.normalize_word(w) for w in words]\n\n # removes empty strings after stripping punctuation\n filtered_words = [w for w in normalized_words if w]\n\n # sets up default dictionary, so all entries are 0\n word_counts = collections.defaultdict(int) #{}\n\n # define word counting function for use in reduce\n def count_word(dictionary, word):\n dictionary[word] = dictionary[word] + 1\n return dictionary\n\n # count words into dictionary from word list\n reduce(count_word, filtered_words, word_counts)\n\n return word_counts", "def build_parser(word_counts, signatures, stems, suffixes):\n \n # Go through signatures and associate each generated word with its parses...\n sig_num = 1\n parses = collections.defaultdict(dict)\n for suffixes in signatures:\n for pair in itertools.product(signatures[suffixes], suffixes):\n word = \"\".join(pair)\n parses[word][pair] = sig_num\n sig_num += 1\n \n # Go through each word in the corpus and increment stem and suffix count...\n stem_count = dict()\n suffix_count = dict()\n total_stem_count = total_suffix_count = 0\n for word, count in word_counts.items():\n if len(parses[word]):\n for stem, suffix in parses[word]:\n stem_count[stem] = stem_count.get(stem, 0) + count\n suffix_count[suffix] = suffix_count.get(suffix, 0) + count\n total_stem_count += count\n total_suffix_count += count\n else:\n parses[word][word, \"\"] = 0\n stem_count[word] = stem_count.get(word, 0) + count\n suffix_count[\"\"] = suffix_count.get(\"\", 0) + count\n total_stem_count += count\n total_suffix_count += count\n\n # Compute score of each word parse and store ordered lists of parses...\n parser = collections.defaultdict(list)\n for word in parses:\n scores = dict()\n total = 0\n for stem, suffix in parses[word]:\n score = (stem_count[stem]/total_stem_count) \\\n * (suffix_count[suffix]/total_suffix_count)\n scores[stem, suffix] = score\n total += score\n for stem, suffix in sorted(\n scores, key=scores.get, reverse=True\n ):\n parser[word].append(\n Parse(\n stem=stem,\n suffix=suffix,\n signature=parses[word][stem, suffix],\n score=scores[stem, suffix] / total,\n )\n )\n return parser", "def process_text(text, stem=True):\n tokens = word_tokenize(text.lower())\n stop_words = set(stopwords.words('english')) \n \n tokens_cpy = []\n for t in tokens:\n if t not in stop_words:\n tokens_cpy.append(t)\n tokens = tokens_cpy\n \n if stem:\n tokens_cpy = []\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n for t in tokens:\n if t not in stopword_list:\n tokens_cpy.append(t)\n tokens = tokens_cpy\n return tokens", "def fit(self):\n sentences = ''.join(self.__sentences) # concatenate all sentences\n chars = sorted(list(set(sentences))) # extract unique characters (unigrams)\n bigrams = sorted(list(set(self.ngrams(sentences, 2))))\n all_grams = chars + bigrams + ['unk'] # add unknown character\n\n self.__dictionary = dict((c, i) for i, c in enumerate(all_grams, start=1))\n self.__vocab_size = len(self.__dictionary)\n\n if self.__verbose:\n print('Vocab size:', self.__vocab_size)", "def histogram(text):\n hist = {}\n\n for char in text.lower():\n if char.isalpha():\n hist[char] = hist.get(char, 0) + 1\n else:\n hist['others'] = hist.get('others', 0) + 1\n return hist", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n\n ### split off metadata\n content = all_text.split(\"X-FileName:\")\n words = \"\"\n stemmed=\"\"\n if len(content) > 1:\n ### remove punctuation\n text_string = content[1].translate(maketrans(\"\", \"\", string.punctuation))\n \n ### project part 2: comment out the line below\n# words = text_string\n\n ### split the text string into individual words, stem each word, \n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n ps = SnowballStemmer(\"english\") \n \n words = text_string.split() \n for w in words: \n# print(w, \" : \", ps.stem(w)) \n stemmed= stemmed+ \" \"+ ps.stem(w)\n stemmed= stemmed.lstrip()\n \n\n return stemmed", "def stem(s):\n short_words = {'is': 'is', 'the': 'the','he': 'he', 'she': 'she', \\\n 'my': 'my', }\n if s in short_words:\n return s\n if s[-1] == 's':\n s = s[:-1]\n special_cases = {'children': 'child', 'doing': 'do', 'did': 'do', \\\n 'string': 'string', 'spring': 'spring'}\n if s in special_cases:\n return special_cases[s]\n if s[-1] == 'e':\n s = s[:-1]\n if s[-3:] == 'ing' and len(s) > 5:\n if s[-5:-3] == 'mm' or s[-5:-3] == 'tt':\n s = s[-4]\n else:\n s = s[:-3]\n if s[-1] == 'y':\n s = s[:-1] + 'i'\n elif s[-2:] == 'er' and len(s) > 4:\n if s[-4:-2] == 'mm' or s[-4:-2] == 'tt':\n s = s[:-3]\n else:\n s = s[:-2]\n elif s[-2:] == 'ed' and len(s) > 4:\n if s[-4:-2] == 'mm' or s[-4:-2] == 'tt':\n s = s[:-3]\n else:\n s = s[:-2]\n return s", "def post(self):\n input_text = self.get_argument('input_text', '')\n self.write(json_encode(extract_word_frequencies(input_text)))", "def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq", "def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq", "def word_freq(word, ngram_dict):\n word = word.lower()\n return ngram_dict[word] if word in ngram_dict else 0", "def dfc(text: str):\n #Splitting the text into a list\n wordlist = text.split()\n worddictionary = {}\n\n #Creating the wordlist dictionary\n for word in wordlist:\n if word in worddictionary:\n #Increase\n worddictionary[word] += 1\n else:\n #add to the dictionary\n worddictionary[word] = 1\n\n #Converting worddictionary into a dataframe\n df = pd.DataFrame.from_dict(worddictionary, orient='index')\n #Resetting index to a numerical one for ease of use\n df = df.reset_index()\n #Renaming the old string-valued index\n df = df.rename(columns={'index':'word'})\n #Defining two functions (over empty variables) to replace commas and dots\n remover = lambda x: x.replace(',','')\n remover2 = lambda x: x.replace('.','')\n #Using ( too many lines) to apply the functions\n df['word'] = df['word'].apply(remover)\n df['word'] = df['word'].apply(remover2)\n #Row-wise Subselection and assignment to remove words with a frequency smaller than 2\n df = df[df[0] > 2]\n #Renaming word frequncy\n df = df.rename(columns={0:'Frequency'})\n\n return df", "def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq", "def apply_all(text):\n return stem_words(remove_stop_words(initial_clean(text)))", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict" ]
[ "0.68903923", "0.6869947", "0.68236566", "0.6701824", "0.6560145", "0.6438137", "0.6392169", "0.63749504", "0.635293", "0.6312454", "0.62951475", "0.62908894", "0.6230646", "0.6177849", "0.61489856", "0.6128415", "0.6118843", "0.6098367", "0.6080806", "0.6080696", "0.6066174", "0.6055362", "0.602864", "0.5999254", "0.5993621", "0.59705776", "0.5967573", "0.59063977", "0.589038", "0.5870025", "0.58640707", "0.58636045", "0.5851849", "0.5846045", "0.5836826", "0.58364207", "0.5826567", "0.58135784", "0.58074397", "0.5804268", "0.5799337", "0.5799025", "0.5793016", "0.57878083", "0.5782046", "0.5777437", "0.5771302", "0.5764756", "0.5761061", "0.57550615", "0.5754962", "0.5749969", "0.5743187", "0.57374644", "0.5735284", "0.5735083", "0.5734601", "0.572035", "0.5714934", "0.57132775", "0.57054055", "0.57030547", "0.5672666", "0.56658113", "0.5654747", "0.56501865", "0.56476855", "0.56467", "0.56361794", "0.56318766", "0.5630476", "0.56293714", "0.5627189", "0.5623523", "0.5621878", "0.56217206", "0.56216085", "0.56122094", "0.56100637", "0.5610008", "0.56045306", "0.559345", "0.5585451", "0.55827266", "0.55827045", "0.5567856", "0.55629236", "0.5558797", "0.5556648", "0.55524784", "0.5549833", "0.55487317", "0.5547835", "0.55476207", "0.554646", "0.55437225", "0.5542854", "0.5540191", "0.5535233", "0.553293" ]
0.6041122
22
takes clean str from self.text and creats dict of gerund/present participle freq.
def makeGerund(self): clean_s = self.cleanString(self.text) LoW = clean_s.split() for x in LoW: if 'ing' in x and x not in self.gerund: self.gerund[x] = 1 elif 'ing' in x and x in self.gerund: self.gerund[x] += 1 return self.gerund
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_frequency_dict(self, text):\n\t\t\tfrequency = {}\n\t\t\t#tomamos los numeros como caracteres entonces el diccionario solo tendra un rango (0,9) las ',' y '\\n'\n\t\t\tfor character in text:#O(len(row)*columns) \n\t\t\t\tif not character in frequency:#como frequency es un diccionario es de O(1)\n\t\t\t\t\tfrequency[character] = 0\n\t\t\t\tfrequency[character] += 1\n\t\t\t\n\t\t\treturn frequency", "def make_freq_dict(text):\n freq_dict = {}\n for i in text:\n if i not in freq_dict:\n freq_dict[i] = 1\n else:\n freq_dict[i] += 1\n return freq_dict", "def build_frequency_dict(text: bytes) -> Dict[int, int]:\n freq_dic = {}\n for elem in text:\n if elem not in freq_dic:\n freq_dic[elem] = 1\n else:\n freq_dic[elem] += 1\n return freq_dic", "def create_freq_dict(corpus, doc_info):\n for idx, content in enumerate(corpus):\n word_freq_table = {}\n splitted_sentence = content.split()\n for word in splitted_sentence:\n word = word.lower()\n if word not in word_freq_table:\n word_freq_table[word] = 1\n else:\n word_freq_table[word] += 1\n doc_info[idx]['freq_dict'] = word_freq_table", "def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq", "def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def frequency(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n list_of_words = []\n for i in root.iter(root_tag + 'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n for word in i.text.split():\n alphanumeric_filter = filter(str.isalnum, word)\n alphanumeric_string = \"\".join(alphanumeric_filter)\n list_of_words.append(alphanumeric_string)\n dict_of_frequency = collections.Counter(list_of_words)\n return dict_of_frequency", "def wordFreq(parseThis):\n \n freq = {}\n nono = ('\"', \"'\", '%', '$', '!', '.', '?', '-', ','\n , '\\n', '\\t', '\\r', ':', ';')\n\n for c in nono:\n parseThis = parseThis.replace(c, \" \")\n \n words = parseThis.split()\n \n for word in words:\n temp = word.lower()\n freq[temp] = freq.get(temp, 0) + 1\n\n return freq", "def initialize(eng, fre):\n\t# TODO\n t = {}\n t[\"SENTSTART\"] = {\"SENTSTART\":1}\n t[\"SENTEND\"] = {\"SENTEND\":1}\n num_sentences = len(eng)\n for i in range(num_sentences):\n list_eng = eng[i].split(\" \")\n list_fre = fre[i].split(\" \")\n for word_eng in list_eng:\n if word_eng == 'SENTSTART' or word_eng == 'SENTEND':\n continue\n if word_eng not in t:\n t[word_eng] = {}\n for word_fre in list_fre:\n if word_fre in t[word_eng]:\n t[word_eng][word_fre] += 1\n else:\n t[word_eng][word_fre] = 1\n for word_eng in t:\n num = 0\n for word_fre in t[word_eng]:\n num += t[word_eng][word_fre]\n for word_fre in t[word_eng]:\n t[word_eng][word_fre] /= num\n return t", "def frequencyAnalysis(article, dictionary):\r\n string = article #Sets the string to the article\r\n string = re.sub(\"[^a-zA-Z0-9’\\s]+\",'', string) #Takes the articles and removes all characters appart from apostrophes, spaces, digits, and leters\r\n string = re.sub(\"’\", \"'\", string) #Replaces ’ with '\r\n string = string.lower() #Ensures that all the charcters are lower case\r\n stringList = string.split() #Takes the article and turns it into a list\r\n \r\n print(\"\\nConverted article to list\\n\")\r\n \r\n print(\"Starting frequency analysis\\n\")\r\n\r\n #Started the frequency anaylsis\r\n for word in stringList:\r\n if \"'s\" in word: #Done to remove extra keys in the dictionary, removes the possessive such that it counts \"Trump\" and \"Trump's\" as one word\r\n word = word[0:-2]\r\n elif \"s'\" in word:\r\n word = word[0:-1]\r\n if word != \"advertisement\":\r\n if word in dictionary:\r\n dictionary[word] +=1 #If it finds the word in the dictionary, the frequency has to increase by one\r\n else:\r\n dictionary[word] = 1 #If it finds a new word, it needs to add the word so the frequency is one\r", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def _count_word_frequency(self, data):\n _dict = {}\n for _docs in data:\n for _word in _docs:\n if _word in _dict:\n _dict[_word] += 1\n else:\n _dict[_word] = 1\n return _dict", "def text2wordfreq(string, lowercase=False):\r\n\r\n\r\n from collections import Counter\r\n lst = Counter(tokenize(string, lowercase)).most_common()\r\n\r\n dictLst = dict(lst)\r\n\r\n return dictLst", "def make_frequency_dict(path):\n filepath = path#'germanWordList_03.csv'\n wordDict = {}\n f = open(filepath,'r')\n for line in f:\n split = line.split(\",\")\n word = (split[0]).lower()\n temp = float(split[1])\n if word in wordDict:\n val = float(wordDict[word])\n if val > 0:\n num = (temp * val) / (val + temp)\n else:\n num = temp\n else:\n num = temp\n wordDict[word]= num\n return wordDict", "def make_freq_dict(word_list):\n\n\tfreq_dict = {}\n\n\tfor word in word_list: #need to slice each tale into a list of words for this to work\n\t\tif word in freq_dict:\n\t\t\tcurrent_val = freq_dict.get(word)\n\t\t\tval = current_val + 1\n\t\t\tfreq_dict[word] = val #made a dictionary of the string (word, frequnecy)\n\t\telse: #if it isn't in the dictionary\n\t\t\tfreq_dict[word] = 1\n\treturn freq_dict", "def create_freq_dict(sents, lang):\n ix = 0\n freq_dict_all = []\n stop_words = set(stopwords.words(lang))\n\n for sent in sents:\n ix += 1\n freq_dict = {}\n words = word_tokenize(sent)\n\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\n temp = {\n 'doc_id': ix,\n 'freq_dict': freq_dict\n }\n\n freq_dict_all.append(temp)\n\n return freq_dict_all", "def stats_text_en(text):\n text1 = text.replace(',','').replace('.','').replace('--','').replace('*','').replace('!','') # Remove the non-English characters in the text.\n text2 = text1.split() # Convert the string type to the list type.\n dict = {x: text2.count(x) for x in text2} # Count the times of each word in the list.\n dict1= sorted(dict.items(), key= lambda d:d[1], reverse = True) # Sort the words in the descending order according to the times of words.\n print(dict1) # Return the result.", "def create_word_score(self):\n word_freq = {}\n word_deg = {}\n for zettel in self.lemma_tokens:\n for word in zettel:\n word_list = re.split(\" \", word[0])\n word_list_deg = len(word_list) - 1\n for new_word in word_list:\n word_freq.setdefault(new_word, 0)\n word_freq[new_word] = word_freq[new_word] + 1\n word_deg.setdefault(new_word, 0)\n word_deg[new_word] = word_deg[new_word] + word_list_deg\n word_score = {}\n for word in word_freq:\n word_deg[word] = word_deg[word] + word_freq[word]\n word_score.setdefault(word, 0)\n word_score[word] = word_deg[word] / (word_freq[word] * 1.0)\n return word_score", "def letter_freq(txt):\n frequencies = {}\n txt_lower = txt.lower()\n\n for i in txt_lower:\n keys = frequencies.keys()\n if i in keys:\n frequencies[i] += 1\n else:\n frequencies[i] = 1\n return frequencies", "def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq", "def frequency(w: str) -> float:\n return frequency_list.get(remove_punctuation(w), 0)", "def words(phrase):\n\twordlist = phrase.split()\n\tunique_wordlist = []\n\tword_freq = []\n\n \n\twhile wordlist:\n\t\tword_freq.append(wordlist.count(wordlist[0])) #count the instances of a word and add it to the frequencies list\n\t\tunique_wordlist.append(wordlist[0]) #add the word into a unique words list\n\t\twordlist = list(filter((wordlist[0]).__ne__, wordlist)) #remove all other similar words from the wordlist\n\n\n\tn = len(word_freq)\n\toutput = {}\n\n\tfor i in range(n):\n\t\tif unique_wordlist[i].isdigit(): #convert sting digits into int\n\t\t\tunique_wordlist[i] = int(unique_wordlist[i])\n\t\toutput[unique_wordlist[i]] = word_freq[i] #add the unique words with their corresponding frequencies into the output dict\n\t\n\treturn output", "def word_frequency(a_string):\n\n for char in \"\"\".$#,:\"'?!)(\"\"\":\n a_string = a_string.replace(char, \"\")\n for char in \"\"\"-\"\"\":\n a_string = a_string.replace(char, \" \")\n\n cleanstring = a_string.lower()\n a_list = cleanstring.split()\n a_dict = {}\n for item in a_list:\n if item in a_dict:\n a_dict[item]+= 1\n else:\n a_dict[item] = 1\n return a_dict", "def get_frequencies(filename):\n freq_dict = {}\n _,long_name = filename.split(\"\\\\\")\n name,_ = long_name.split(\"_gold_\")\n f = os.path.join(PARSED, name + \".fix.xml\")\n #soup = bs(open(f, 'r'))\n soup = bs(codecs.open(f, 'r', encoding='utf-8'))\n for sent in soup.findAll('sentence'):\n for token in sent.findAll('token'):\n try:\n w = token.word.string\n if w in freq_dict:\n freq_dict[w] += 1\n else:\n freq_dict[w] = 1\n except AttributeError:\n pass\n return freq_dict", "def profile_text_stats( doc: BeautifulSoup ):\n text = doc.find('main', {'class': 'core-rail'}).text.strip()\n words = text.split()\n eng_ratio = sum(1 for word in words if word in COMMON_ENGLISH) * 10/ (len(words) + 0.001)\n return { 'length': len( text ),\n 'eng_ratio': np.round( eng_ratio, 2)}\n # %%", "def find_frequency(text, n=1):\n freqs = {}\n length = len(text)\n for i in xrange(0, length):\n upper = i+n\n if upper > length:\n break\n gram = text[i:upper]\n dict_operate(freqs, gram, 1, operator.add)\n return freqs", "def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic", "def word_frequency_dict(tokens):\n\n\tfdist = FreqDist(tokens) \t\t\t\t\t\t# fdist.keys() fdist.values()\n\treturn dict(fdist)", "def word_count(self, document):\n start_time = time.time()\n dictionary = dict()\n counter = defaultdict(int)\n for line in document.splitlines():\n for word in line.split():\n if word not in PUNCTUATION_MARK:\n counter[word] += 1\n for word, cnt in sorted(counter.items(), key=lambda x: (-x[1], x[0])):\n dictionary[word] = cnt\n self.log.info(\"Duration count dictionary: {duration}\".format(duration=float(time.time() - start_time)))\n return dictionary", "def term_frequency(ngrams,lang):\n token_dictionary = {}\n for ng in ngrams:\n try:\n token_dictionary[ng] = token_dictionary[ng] + 1\n except KeyError:\n token_dictionary[ng] = 1\n return token_dictionary", "def generate_frequency_map(data: str) -> Dict[str, int]:\n seperators: Final[List[str]] = [\n ',', '.', '\\n', ' ', '\\t', '?', '<', '>', '!', ':', ';'\n ]\n tokens: List[str] = tokenize(data, seperators)\n\n frequency_map: Dict[str, int] = {}\n for token in tokens:\n if token in frequency_map.keys():\n frequency_map[token] += 1\n else:\n frequency_map[token] = 1\n return frequency_map", "def main(str_text):\n\n frequencies = count_value(str_text)\n sorted_data = sort_dict(frequencies)\n\n return sorted_data", "def word_frequency_table(self, text_string):\r\n stopWords = set(stopwords.words(\"english\"))\r\n words = word_tokenize(text_string)\r\n ps = PorterStemmer()\r\n\r\n freqTable = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n if word in stopWords:\r\n continue\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n\r\n return freqTable", "def word_frequencies(word_list: TextIO) -> dict:\n words = word_list.read().split(' ')\n amount_of_words = len(set(words))\n frequencies = {}\n for index, word in enumerate(words):\n clean_word = remove_punctuation(word)\n if clean_word not in frequencies:\n frequencies[clean_word] = (index + 1) / amount_of_words\n del frequencies[\"\"]\n return frequencies", "def parse_text(self, text, wordcount_dictionary=None):\n if not wordcount_dictionary:\n wordcount_dictionary = {}\n words = self.parse_regexp.findall(text)\n for word in words:\n new_word = stem(word.lower())\n if new_word not in self.stopwords:\n if new_word in wordcount_dictionary:\n wordcount_dictionary[new_word] += 1\n else:\n wordcount_dictionary[new_word] = 1\n return wordcount_dictionary", "def letter_freq( text ):\n\tchars = string.ascii_uppercase\n\ttext = text.upper()\n\tresult = get_letter_dict()\n\ttotal = 0\n\tfor char in chars:\n\t\tcount = text.count(char)\n\t\tresult[char] = count\n\t\ttotal += count\n\tif total != 0:\n\t\tfor char in chars:\n\t\t\tresult[char] = (result[char]*10000 / total) / float(100)\n\treturn result", "def get_word_freq_dict(df_col):\n results = Counter()\n df_col.str.lower().str.split().apply(results.update)\n results = sorted(results.items(), key=lambda item: item[1], reverse=True)\n d = {}\n for word, freq in results:\n d[word] = freq\n return d", "def info(doc):\n\tinfo = {}\n\tinfo['sentences'] = [str(sent) for sent in doc.sents]\n\t#sentences : [sent1, sent2, ...]\n\tinfo['tokens'] = [str(token) for token in doc]\n\t#all tokens in info['tokens']\n\ttoken_vals = {}\n\tfor token in info['tokens']:\n\t\tcurrent_word = token\n\t\ti = 0\n\t\tcurrent_sent = info['sentences'][i]\n\t\tfor i in range(len(info['sentences'])): #for each sentence\n\t\t\tval = current_sent.count(str(current_word))\n\t\t\t#value is the number of times the current word is in the current sent\n\t\t\ttoken_vals[str(token)] = val\n\t\t\t#append to dictionary\n\tinfo['token_vals'] = token_vals\n\t#given a word and a sentence, val is how many times it appears in that sentence\n\treturn info", "def histogram(text):\n hist = {}\n\n for char in text.lower():\n if char.isalpha():\n hist[char] = hist.get(char, 0) + 1\n else:\n hist['others'] = hist.get('others', 0) + 1\n return hist", "def initialize(eng, fre):\n # TODO\n # Setup keys of AM\n # For each sentence pair:\n # For each word in english:\n # If eng_word is not in AM:\n # AM[eng_word] = {}\n # For each word in french:\n # AM[eng_word][french_word] = 0\n\n # Setup values of AM\n # For each word in english in AM:\n # AM[eng_word] = dict.fromkeys( AM[eng_word], 1 / len(AM[eng_word]) )\n\n # Force SENTEND and SENTSTART\n\n #============================\n # initialize(eng, fre) version:\n # presumably we return an AM dictionary\n # the algorithm is the same as above we just iterate over lists of words\n # rather than words in a sentence string\n\n AM = {}\n\n num_sentences = min(len(eng), len(fre))\n for i in range(0, num_sentences):\n for e in eng[i]:\n if e not in AM:\n AM[e] = {}\n for f in fre[i]:\n AM[e][f] = 0\n\n for e in AM.keys():\n AM[e] = dict.fromkeys( AM[e], 1 / len(AM[e]) )\n\n if 'SENTSTART' in AM.keys() and 'SENTEND' in AM.keys():\n AM['SENTSTART'] = dict.fromkeys(AM['SENTSTART'], 0.0000001)\n AM['SENTSTART']['SENTSTART'] = 1\n AM['SENTEND'] = dict.fromkeys(AM['SENTEND'], 0.0000001)\n AM['SENTEND']['SENTEND'] = 1\n\n return AM", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def calc_weighted_frequency(words,ps,lem,stopWords,text_string):\r\n \r\n\r\n word_frequencies = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n word = lem.lemmatize(word)\r\n print(word)\r\n if word not in stopWords:\r\n if word not in word_frequencies:\r\n word_frequencies[word] = 1\r\n else:\r\n word_frequencies[word] += 1\r\n \r\n maximum_frequncy = max(word_frequencies.values())\r\n for word in word_frequencies.keys():\r\n word_frequencies[word] = (word_frequencies[word]/maximum_frequncy) \r\n print(word_frequencies)\r\n return word_frequencies", "def test_fill_in_dic():\n ngrams = NgramFrequencies()\n word_per_list = [\"time\", \"burton's\", \"corpse\", \"bride\"]\n ngrams.fill_in_dic(word_per_list)\n assert ngrams.unigrams_dic == {\n \"COUNT\": 4,\n \"time\": 1,\n \"burton's\": 1,\n \"corpse\": 1,\n \"bride\": 1\n }\n assert ngrams.bigrams_dic == {\n \"COUNT\": 3,\n \"time_burton's\": 1,\n \"burton's_corpse\": 1,\n \"corpse_bride\": 1\n }\n assert ngrams.trigrams_dic == {\n \"COUNT\": 2,\n \"time_burton's_corpse\": 1,\n \"burton's_corpse_bride\": 1\n }", "def counterFrequency(text):\n dictText = {}\n maxN = 0\n mostFrequent = \"\"\n for item in text:\n if (item not in dictText):\n dictText[item] = 1\n else: \n dictText[item] +=1\n \n if (dictText[item] > maxN):\n mostFrequent = item\n maxN = dictText[item]\n return mostFrequent", "def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat", "def dictify(words):\n word_freq = {}\n for word in words:\n if word:\n key = word.lower()\n if key in word_freq:\n word_freq[key] += 1\n else:\n word_freq[key] = 1\n else:\n pass\n return word_freq", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def load_dic():\r\n f = open('../data/phrases-freq.txt')\r\n d = {}\r\n for line in f:\r\n line = line.strip().decode('utf-8')\r\n if line == '' or line[0] == '#':\r\n continue\r\n else:\r\n k, v = line.split()\r\n d[k] = int(v)\r\n return d", "def word_freq(word, ngram_dict):\n word = word.lower()\n return ngram_dict[word] if word in ngram_dict else 0", "def calculate_frequencies(cipher_text: str) -> dict:\n cipher_frequencies = dict()\n for character in cipher_text:\n try:\n cipher_frequencies[character] += 1\n except KeyError:\n cipher_frequencies[character] = 1\n \n return cipher_frequencies", "def _getFrequency(s, line, text):\n if len(s) > 0 and s[0].isalpha(): \n if s.lower() in _frequency:\n return _frequency[s.lower()]\n else:\n raise ValueError(\"invalid note name/frequency '%s' on line %d: %s\" % (s, line, text))\n else:\n return int(float(s))", "def feature_dict(sent, i):\n palabra=sent[i] #suponinedo que al menos tiene una palabra\n especiales= [\"á\",\"é\",\"í\",\"ó\",\"ú\", \"ü\"] #solo chequeo minusculas porque pregunto sobre el lower del string\n\n #sobre la anterior\n if i==0: #primera de la oracion\n alower=\"\"\n aistitle=False\n aisupper=False\n aisnumeric=False\n aisplural=False\n #aunder=False\n aislower=False\n aespecial=False\n else:\n alower = sent[i-1].lower()\n aistitle = sent[i-1].istitle()\n aisupper = sent[i-1].isupper()\n aisnumeric = sent[i-1].isnumeric()\n aisplural= (sent[i-1][-1:].lower() == 's')\n #aunder= (sent[i-1].find('_') >= 0)\n aislower = sent[i-1].islower()\n aespecial = (1 in [c in sent[i-1].lower() for c in especiales]),\n\n #sobre la proxima\n if i==len(sent)-1: #si es la ultima\n plower = \"\"\n pistitle = False\n pisupper = False\n pisnumeric = False\n pisplural= False\n #punder=False\n pislower = False\n pespecial = False\n else:\n plower = sent[i + 1].lower()\n pistitle = sent[i + 1].istitle()\n pisupper = sent[i + 1].isupper()\n pisnumeric = sent[i + 1].isnumeric()\n pisplural= (sent[i + 1][-1:].lower() == 's')\n #punder = (sent[i + 1].find('_') >= 0)\n pislower = sent[i + 1].islower()\n pespecial = (1 in [c in sent[i+1].lower() for c in especiales]),\n\n return {\n 'lower': palabra.lower(),\n 'istitle': palabra.istitle(),\n 'isupper': palabra.isupper(),\n 'isnumeric': palabra.isnumeric(),\n 'isplural': (palabra[-1:].lower() == 's'),\n #'under': (palabra.find('_') >= 0),\n 'islower': palabra.islower(),\n 'especial': (1 in [c in palabra.lower() for c in especiales]),\n 'alower': alower,\n 'aistitle': aistitle,\n 'aisupper': aisupper,\n 'aisnumeric': aisnumeric,\n 'aisplural': aisplural,\n #'aunder': aunder,\n 'aespecial': aespecial,\n 'aislower': aislower,\n 'plower': plower,\n 'pistitle': pistitle,\n 'pisupper': pisupper,\n 'pisnumeric': pisnumeric,\n 'pisplural': pisplural,\n #'punder': punder,\n 'pislower': pislower,\n 'pespecial': pespecial,\n }", "def lyrics_to_frequencies(lyrics):\n lyricsDictionary = dict()\n for each_word in lyrics:\n if each_word in lyricsDictionary:\n lyricsDictionary[each_word] += 1\n else:\n lyricsDictionary[each_word] = 1\n return lyricsDictionary", "def prep_dict(word):\n counts = {}\n for l in word.lower():\n if l!=\" \":\n counts[l] = counts.get(l,0) + 1\n return counts", "def preparation(self):\n self.word_freq = defaultdict(int)\n\n for sentence in self.corpus:\n for word in sentence:\n self.word_freq[word] += 1\n\n # self.words decide the index of all the words\n self.words = list(self.word_freq.keys())\n self.T = len(self.words)\n\n # word_index will give index for a given word and vice versa for index_word\n self.word_index = dict([[word, i] for i, word in enumerate(self.words)])\n self.index_word = dict([[i, word] for i, word in enumerate(self.words)])", "def stats_text_cn(text):\n text1 = text.replace(',','').replace('.','').replace('--','').replace('*','').replace('!','') # Remove the non-Chinese characters in the text.\n text2 = list(text1) # Convert the string type to the list type.\n dict = {x: text2.count(x) for x in text2} # Count the times of each character in the list.\n dict1= sorted(dict.items(), key= lambda d:d[1], reverse = True) # Sort the character in the descending order according to the times of characters.\n print(dict1) # Return the result. ", "def transform_string_to_dictionary(data_text: str) -> Dict[str, float]:\n # TODO: create an empty population dictionary\n # TODO: iterate through each line of the data set\n # TODO: extract the ordered pair on this line\n # the ordered pair has the format:\n # (Date, population count in thousands of persons)\n # TODO: extract the dat and store it as a string\n # TODO: convert the population count to a float and store it\n # TODO: add the new key-value pair to the dictionary where\n # the key is the date and the population is the value\n # TODO: return the population dictionary", "def get_word_frequencies(topic_description):\n frequencies = {w:f for w,f in topic_description}\n return frequencies", "def getFrequencyDict(sequence):\n # freqs: dictionary (element_type -> int)\n \n for x in sequence:\n hand[x] = hand.get(x,0) + 1\n updatehand(hand, word)\n print hand\n print \"freq function\"\n #return hand", "def dfc(text: str):\n #Splitting the text into a list\n wordlist = text.split()\n worddictionary = {}\n\n #Creating the wordlist dictionary\n for word in wordlist:\n if word in worddictionary:\n #Increase\n worddictionary[word] += 1\n else:\n #add to the dictionary\n worddictionary[word] = 1\n\n #Converting worddictionary into a dataframe\n df = pd.DataFrame.from_dict(worddictionary, orient='index')\n #Resetting index to a numerical one for ease of use\n df = df.reset_index()\n #Renaming the old string-valued index\n df = df.rename(columns={'index':'word'})\n #Defining two functions (over empty variables) to replace commas and dots\n remover = lambda x: x.replace(',','')\n remover2 = lambda x: x.replace('.','')\n #Using ( too many lines) to apply the functions\n df['word'] = df['word'].apply(remover)\n df['word'] = df['word'].apply(remover2)\n #Row-wise Subselection and assignment to remove words with a frequency smaller than 2\n df = df[df[0] > 2]\n #Renaming word frequncy\n df = df.rename(columns={0:'Frequency'})\n\n return df", "def get_freqs(self):\n dictionary = {}\n for word in self.word_list:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n letter_sorted = sorted(dictionary.items(), key=lambda entry: entry[0]) #sorts dictionary into alphabetized tuples\n count_sorted = sorted(letter_sorted, key=lambda seq: seq[1], reverse=True) #sorts alphabetical tuples into count order\n return count_sorted", "def dictionary(cleaned_data,threshold):\n news = []\n for date in cleaned_data:\n for headlines in cleaned_data[date]:\n news.append(headlines)\n\n word_freq = nltk.FreqDist(itertools.chain(*news))\n id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>']\n word_to_id = {word:idx for idx, word in enumerate(id_to_word)}\n \n return id_to_word, word_to_id", "def frequencies(corpus, index, to_lower=False):\n freq = {}\n for sentence in corpus.get_sentences():\n for word in sentence:\n key = word[index]\n if to_lower:\n key = key.lower()\n if key in freq:\n freq[key] += 1\n else:\n freq[key] = 1\n\n return freq", "def get_stock_frequency(entries):\n stock_frequency = {}\n\n # Extract text for processing\n \"\"\"\n raw_text = [] # raw text in sentences\n for post in posts:\n raw_text.append(post['title'])\n raw_text += tokenize.sent_tokenize(post['selftext'])\n for comment in comments:\n raw_text += tokenize.sent_tokenize(comment['body'])\n \"\"\"\n\n raw_text = [] # raw text in sentences\n for entry in entries:\n # Its a post\n if 'title' in entry:\n raw_text.append(entry['title'])\n raw_text += tokenize.sent_tokenize(entry['selftext'])\n else:\n raw_text += tokenize.sent_tokenize(entry['body'])\n\n\n # First stage of text preprocessing\n cleaned_text = []\n for text in raw_text:\n text = text.lower() # Convert text to lowercase\n text = re.sub(r'http\\S+', '', text) # Remove URLs\n text = re.sub(r\"\\d+%\", '', text) # Remove percentage and integers before it\n text = re.sub(' +', ' ', text).strip() # Remove multiple spaces\n cleaned_text.append(text)\n\n \"\"\"\n pattern_1 = r\"(\\b|\\$)([a-z])+ (\\d)+/(\\d)+\\b\" # stock month/day\n pattern_2 = r\"((buy)|(sell)|(short)|(long)) \\$?([a-z])+\" # buy/sell/short/long\n pattern_3 = r\"\\$[a-z]+\\b\" # $SPY\n pattern_4 = r\"(\\b|\\$)([a-z])+ \\$?(\\d)+(p|c|\\$)\\b\" # stock price\n \"\"\"\n\n pattern_x_1 = r\"(\\b|\\$)([a-z])+(\\b|\\$)\"\n pattern_x_2 = r\"\\b(\\d)+/(\\d)+\\b\"\n pattern_x_3 = r\"(\\b|\\$)(\\d)+([a-z]|\\$|\\b)*\"\n\n pattern_1_list = []\n for pattern in list(itertools.permutations([pattern_x_1, pattern_x_2])):\n pattern_1 = r'(' + r' '.join(pattern) + r')'\n pattern_1_list.append(pattern_1)\n pattern_1 = r'|'.join(pattern_1_list)\n\n pattern_4_list = []\n for pattern in list(itertools.permutations([pattern_x_1, pattern_x_3])):\n pattern_4 = r'(' + r' '.join(pattern) + r')'\n pattern_4_list.append(pattern_4)\n pattern_4 = r'|'.join(pattern_4_list)\n\n pattern_5_list = []\n for pattern in list(itertools.permutations([pattern_x_1, pattern_x_2, pattern_x_3])):\n pattern_5 = r'(' + r' '.join(pattern) + r')'\n pattern_5_list.append(pattern_5)\n pattern_5 = r'|'.join(pattern_5_list)\n\n verbs = [ 'buy', 'buys', 'bought', 'buying', 'sell', 'sells', 'sold', 'selling', 'short', 'shorts', 'shorted', 'long', 'longed', 'longs', 'hold', 'held' ]\n verbs = [ f'({v})' for v in verbs ]\n verbs_regex = '|'.join(verbs)\n pattern_2 = r\"(\" + verbs_regex + r\") \\$?([a-z])+ ((calls)|(puts)|(call)|(put))?\" # buy/sell/short/long\n\n pattern_3 = r\"\\$[a-z]+\\b\" # $SPY\n\n pattern_6 = r\"(\\b|\\$)([a-z])+(\\b|\\$) ((calls)|(puts)|(call)|(put))\"\n\n # Extract stocks mentioned in specific formats\n for text in cleaned_text:\n candidate_stocks = {}\n to_print = text\n\n # Match pattern 5 and remove it from the sentence\n while True:\n res_5 = re.search(pattern_5, text)\n if not (res_5 is None):\n stock = res_5.group().split(' ')[0]\n text = re.sub(' +', ' ', text.replace(res_5.group(), \"\")).strip()\n candidate_stocks[stock] = 5\n else:\n break\n \n # Match pattern 2 and remove it from the sentence\n while True:\n res_2 = re.search(pattern_2, text)\n if not (res_2 is None):\n stock = res_2.group().split(' ')[1]\n if stock not in STOP_WORDS:\n text = re.sub(' +', ' ', text.replace(res_2.group(), \"\")).strip()\n candidate_stocks[stock] = 2\n else:\n break\n else:\n break\n \n # Pattern 1\n while True:\n res_1 = re.search(pattern_1, text)\n if not (res_1 is None):\n stock = res_1.group().split(' ')[0]\n text = re.sub(' +', ' ', text.replace(res_1.group(), \"\")).strip()\n candidate_stocks[stock] = 1\n else:\n break\n\n # Pattern 6\n while True:\n res_6 = re.search(pattern_6, text)\n if not (res_6 is None):\n stock = res_6.group().split(' ')[0]\n text = re.sub(' +', ' ', text.replace(res_6.group(), \"\")).strip()\n candidate_stocks[stock] = 6\n else:\n break\n\n # Pattern 4\n while True:\n res_4 = re.search(pattern_4, text)\n if not (res_4 is None):\n stock = res_4.group().split(' ')[0]\n text = re.sub(' +', ' ', text.replace(res_4.group(), \"\")).strip()\n candidate_stocks[stock] = 4\n else:\n break\n\n # Match pattern 3 and remove it from the sentence\n res_3 = re.findall(pattern_3, text)\n if len(res_3) > 0:\n stocks = dict(Counter(res_3))\n for stock, _ in stocks.items():\n text = re.sub(' +', ' ', text.replace(stock, \"\")).strip()\n candidate_stocks[stock[1:]] = 3\n\n # Add stock frequency\n if len(candidate_stocks) > 0:\n #print(to_print)\n pass\n for stock, p in candidate_stocks.items():\n if stock[0] == '$' or stock[-1] == '$':\n stock = stock[1:]\n if stock in STOCKS and stock not in FORIBIDDEN_WORDS:\n #print(to_print)\n #print(f'({p}) {stock}')\n #print('\\n')\n if stock in stock_frequency:\n stock_frequency[stock] += 1\n else:\n stock_frequency[stock] = 1\n else:\n #print(f'- ({p}) {stock}')\n pass\n #print('\\n')\n\n # Restructure and return\n return [ { 'stock_name' : k.upper(), 'mentions' : v } for k, v in stock_frequency.items() ]", "def correctFrequency(bias, dictionary):\r\n os.rename(\"cleanedData/\"+str(bias)+\"cleaned.txt\", \"cleanedData/\"+str(bias)+\"needToChange.txt\")\r\n tempDict = {}\r\n with open(\"cleanedData/\"+str(bias)+\"needToChange.txt\",'r') as f:\r\n for line in f:\r\n freq = line.split(\": \")\r\n tempDict[freq[0]] = int(freq[1].strip())\r\n for tup in dictionary:\r\n if tup[0] in tempDict:\r\n tempDict[tup[0]]+=tup[1]\r\n else:\r\n tempDict[tup[0]]=tup[1]\r\n sortedDict = sorted(tempDict.items(),key=operator.itemgetter(1),reverse=True)\r\n \r\n url = \"cleanedData/\" + str(bias) + 'cleaned.txt'\r\n with open(url, 'w') as f:\r\n for word in sortedDict:\r\n f.write(word[0] + \": \" +str(word[1])+\"\\n\") \r\n os.remove(\"cleanedData/\"+str(bias)+\"needToChange.txt\")", "def preprocessing():\n english_dictionary = nltk.corpus.brown.words()\n slang_vocab = pickle.load(open('vocab_pattern_match_with_freq.pkl', 'rb'))\n\n normalize_english_dict = len(english_dictionary)\n normalize_slang_vocab = 0\n for w, n in slang_vocab.items():\n normalize_slang_vocab += n\n\n words = {}\n for w, n in Counter(english_dictionary).items():\n words[w] = n/normalize_english_dict\n \n for w, n in slang_vocab.items():\n if w not in words:\n words[w] = 0.\n words[w] += n/normalize_slang_vocab\n\n words_by_freq = [w for w,_ in sorted(words.items(), key=lambda x: x[1], reverse=True)]\n\n # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\n #words = open(\"words_by_frequency.txt\").read().split()\n wordcost = dict((k, log((i+1)*log(len(words_by_freq)))) for i,k in enumerate(words_by_freq))\n maxword = max(len(x) for x in words_by_freq)\n return wordcost,maxword", "def print_word_freq(file):\n with open(file) as text:\n text = text.read().lower()\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"’\", \"\")\n # text = \" \".join(text.split())\n # print(text)\n for character in string.punctuation:\n text = text.replace(character, \"\")\n word_list = text.split()\n clean_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n clean_list.append(word)\n \n\n # for stop_word in STOP_WORDS:\n # if stop_word in word_list:\n # word_list.remove(stop_word)\n\n\n new_dict = {}\n for word in clean_list:\n new_dict[word] = clean_list.count(word)\n sorted_dict = sorted(new_dict.items())\n print(sorted_dict)\n\n # print(f\"{key} | {value} {'*' * value}\")\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n\n # for word in word_list:\n # if word in string.punctuation:\n # #do something\n # if word in STOP_WORDS:\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n # print(text)", "def unigram_model(list_of_words, unigram_count, N=count_token()):\n d = pd.read_csv(unigram_count)\n proba_dict = {list_of_words[i]: (d[el].values[0] / float(N)) if el in d.columns.values else 0.0 for i, el in enumerate(list_of_words) }\n return proba_dict", "def __init__(self):\n self.freq = {}", "def print_word_freq(file):\n # with open(file, 'r') as text the r as the second arguement means that my intentions are to read the file\n with open(file, 'r') as text:\n # this reads the entire file and puts this into text string\n text_string = text.read()\n # returns the string respresentation of text string without removing special characters so you can see what you need to remove\n # print(repr(text_string))\n # this removes the specified characters from the text string\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"—\", \" \")\n text_string = text_string.replace(\"-\", \" \")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"’\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"\\\"\", \"\")\n # takes the text string and makes all the characters lower case\n text_string = text_string.lower()\n # takes the text string and splits all the words into a list this splits from space to space\n words_list = text_string.split()\n # a dictionary is a key and a value\n no_stop_words = {}\n # for loop that will cycle through the words list\n for word in words_list:\n # checking to see if the word is stop words\n if word not in STOP_WORDS:\n # if the word is already in the dictionary no stop words increment the value by 1\n if word in no_stop_words:\n no_stop_words[word] += 1\n # if the word is not in the dictionary no stop words add this to the dictionary and give it a value of 1\n else:\n no_stop_words[word] = 1\n \n sorted_dict = {}\n sorted_keys = sorted(no_stop_words, key=no_stop_words.get, reverse=True)\n \n for w in sorted_keys:\n sorted_dict[w] = no_stop_words[w]\n \n for key in sorted_dict:\n print(f\"{key:>15} | {sorted_dict[key]:2} {'*' * sorted_dict[key]}\")\n \n # good practice to ensure that we are properly closing the file in use at the end of the function\n text.close()", "def get_frequency():\n # Check if value is in json\n if request.is_json:\n content = request.get_json()\n if \"text\" in content:\n string = content[\"text\"]\n if \"sort\" in content:\n to_sort = content[\"sort\"]\n else:\n to_sort = 0\n else:\n return jsonify(message=\"Please supply a json with text as key and text to be analyzed as value\"),400\n client_ip = request.environ.get(\"HTTP_X_FORWARDED_FOR\")\n loadbalancer_ip = request.environ.get(\"REMOTE_ADDR\")\n worker_ip = request.environ.get(\"HTTP_HOST\")\n result = word_counter(string,to_sort)\n return jsonify(result,{\"Client_IP\":client_ip,\"Loadbalancer_IP\":loadbalancer_ip,\"Worker_IP\":worker_ip}),200\n\n else:\n return jsonify(message=\"Please Send Your request in json\"),400", "def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq", "def create_dict(text):\n #On/Off case sensitivity\n text = text.lower() \n\n #handy one liner that splits words apart via whitespace, and \n #removes punctuation. Results in list of words.\n word_list = [s.strip(string.punctuation) for s in text.split()]\n \n d = dict()\n for word in word_list:\n d[word] = d.get(word, 0) +1\n return d", "def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq", "def word_freq(self, word_list):\n hist = {}\n for word in word_list:\n hist[word] = hist.get(word, 0) + 1\n return hist", "def letterFreq(words):\n dict = {}\n total = 0\n for word in words:#Iterate through words\n for letter in word:#Increment by letter\n count = 0\n for yearCount in words[word]:\n count += yearCount.count#Increment total instances of word\n total += count#Count total letters\n if letter in dict:\n dict[letter] += count#Add to existing entry\n else:\n dict[letter] = count#Create new entry\n \"\"\"CODE FOR THE WHOLE ALPHABET\"\"\"\n list = []\n for letter in ascii_lowercase:\n if letter in dict and dict[letter] != 0:\n list.append(dict[letter] / total)#Convert to relative\n else:\n list.append(0.0)#Fill alphabet\n return list", "def _compute_frequencies( word_sent):\n\t\tfreq = defaultdict(int)\n\t\tfor s in word_sent:\n\t\t\tfor word in s:\n\t\t\t\tif word not in _stopwords:\n\t\t\t\t\tfreq[word] += 1\n\t\t\t\t# frequencies normalization and fitering\n\t\treturn freq", "def building_english_dict():\n english_letters_frequency = dict()\n\n # assigning the percentages of english letters in a sample\n english_letters_frequency['a'] = 8.2\n english_letters_frequency['b'] = 1.5\n english_letters_frequency['c'] = 2.8\n english_letters_frequency['d'] = 4.3\n english_letters_frequency['e'] = 12.7\n english_letters_frequency['f'] = 2.2\n english_letters_frequency['g'] = 2.0\n english_letters_frequency['h'] = 6.1\n english_letters_frequency['i'] = 7.0\n english_letters_frequency['j'] = 0.2\n english_letters_frequency['k'] = 0.8\n english_letters_frequency['l'] = 4.0\n english_letters_frequency['m'] = 2.4\n english_letters_frequency['n'] = 6.7\n english_letters_frequency['o'] = 7.5\n english_letters_frequency['p'] = 1.9\n english_letters_frequency['q'] = 0.1\n english_letters_frequency['r'] = 6.0\n english_letters_frequency['s'] = 6.3\n english_letters_frequency['t'] = 9.1\n english_letters_frequency['u'] = 2.8\n english_letters_frequency['v'] = 1.0\n english_letters_frequency['w'] = 2.4\n english_letters_frequency['x'] = 0.2\n english_letters_frequency['y'] = 2.0\n english_letters_frequency['z'] = 0.1\n\n return english_letters_frequency", "def lemmas_freq_doc(doc):\n lemmas = {}\n morpho = doc.morpho\n for i in morpho:\n # if this is a word\n if 'analysis' in i.keys():\n # if there is few lex\n if len(i['analysis']):\n for l in i.get('analysis', []):\n if l.get('lex', False):\n if (not l['lex'] in stop_lemmas) & (l.get('wt', 0) > 0):\n lemmas[l['lex']] = lemmas.get(l['lex'], 0) + l.get('wt', 1)\n else:\n # english word or number or smth like this\n word = i.get('text', '')\n # take word, don't take number\n if (len(word) > 0) and not word.isdigit():\n lemmas[word] = lemmas.get(word, 0) + 1\n doc.lemmas = lemmas", "def find_types_of_sents_in_text(text):\r\n return dict(Counter(map(lambda x: x[-1], nltk.sent_tokenize(text))))", "def frequency(text):\n # TODO: change function input to a textfile?\n import collections\n freq = collections.Counter(text)\n # print freq\n return freq", "def __init__(self):\r\n #\r\n # Create dictionaries for each characteristic\r\n #\r\n self.words = {} # For counting words\r\n self.wordlengths = {} # For counting word lengths\r\n self.stems = {} # For counting stems\r\n self.sentencelengths = {} # For counting sentence lengths\r\n #\r\n # Create another of your own\r\n #\r\n self.gerund = {} # For counting words with ing \r\n self.text = ''", "def scores(self, text: str) -> Dict[str, float]:\n values = extract(text)\n input_fn = _to_func(([values], []))\n prediction = self._classifier.predict_proba(input_fn=input_fn)\n probabilities = next(prediction).tolist()\n sorted_languages = sorted(self.languages)\n return dict(zip(sorted_languages, probabilities))", "def generate_wf(dataset: Dataset) -> Dict[str, int]:\n wf_dict = Counter()\n\n for item in tqdm(dataset, desc=\"Calculating word frequencies\"):\n for w in item[\"review\"].split():\n wf_dict[w] += 1\n\n return wf_dict", "def analyze_words(self):\n\t\t\n\t\tword_analysis = {}\n\t\tfor word in self.word_list:\n\t\t\tif word not in word_analysis:\n\t\t\t\tacademic = (word in LEMMA_DICT)\n\t\t\t\tlength = len(word)\n\t\t\t\tfrequency = len(self.word_list[word])\n\t\t\t\tstem = word\t\n\t\t\t\tword_location_index = len(self.sentence_index)-1 #first set it as the last index\n\t\t\t\t\n\t\t\t\tfor index, sentence in self.sentence_index.items():\n\t\t\t\t\tif word in sentence.split():#need to be individual words, not parts of a word\n\t\t\t\t\t\tword_location_index = index \n\t\t\t\t\t\tbreak\n\t\t\t\t\tif self.word_list[word][0] in sentence.split():#accounts for words with upper cases\n\t\t\t\t\t\tword_location_index = index\n\t\t\t\t\t\n\t\t\t\t#selection critera\n\t\t\t\tif academic:\n\t\t\t\t\tselection_criteria = 'academic word'\n\t\t\t\telif frequency > 1: \n\t\t\t\t\tselection_criteria = 'high frequency'\n\t\t\t\telse:\n\t\t\t\t\tselection_criteria = 'word length'\n\n\t\t\t\tword_analysis[word] = (academic, length, frequency, stem, word_location_index, selection_criteria)\n\t\t\n\t\tself.word_analysis = word_analysis\n\t\t\n\t\treturn self.word_analysis", "def sentiment(self) -> Dict[str, float]:", "def task1(sentence):\n split_sentence = sentence.split()\n dictionary = dict()\n for word in split_sentence:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n for item in dictionary:\n print(\"Word \" + item + \" used \" + str(dictionary[item]) + \" times\")\n return dictionary", "def create_model_owc(text: str) -> Dict[str, Set[str]]:\n dict_so_far = {}\n list_of_words = str.split(text)\n\n\n for x in range(0, len(list_of_words)):\n \"\"\"\n check if the word is followed by a period and add it to the follow list if it is, then remove the period to \n check if the word is followed by something else\n \"\"\"\n if list_of_words[x][-1] == '.':\n list_of_words[x] = list_of_words[x][0:-1]\n update_follow_set(dict_so_far, list_of_words[x], '.')\n\n else:\n update_follow_set(dict_so_far, list_of_words[x], list_of_words[x + 1].rstrip('.'))\n return dict_so_far", "def dictagfreq(kind, fname):\n\n d = {}\n f = open(fname)\n f.next()\n f.next()\n y = '0'\n for l in f:\n adict = agline(l)\n if y == adict['well']:\n continue \n gen = adict['gen']\n if gen not in d:\n d[gen] = [] \n km = kind + 'm'\n ks = kind + 's'\n \n if kind == 'escd' or kind == 'escm':\n if adict[ks] == '':\n d[gen].append(0)\n elif int(adict[ks]) >= 0:\n d[gen].append(100)\n else:\n if adict[ks] == 'x':\n d[gen].append(0)\n elif adict[ks] == '-':\n pass\n elif adict[ks] == '':\n d[gen].append(100)\n elif int(adict[ks]) >= 0:\n d[gen].append(100)\n y = adict['well']\n \n return(d)", "def pos(text):\n\n pos_counter = src.utils.nlp.parts_of_speech(text)\n total_count = sum(pos_counter.values())\n pos_dict = {pos: count / total_count for pos, count in pos_counter.items()}\n return pos_dict", "def freq():", "def frequency_feelings(self):\n feelings = {}\n for response in self.responses:\n if response.question.text == \"In one word, how does this text make you feel?\":\n lower_case_word = response.response.lower()\n if feelings.get(lower_case_word, 0) == 0:\n feelings[lower_case_word] = 1\n else:\n feelings[lower_case_word] += 1\n\n frequent_words = [] # list of tuples in the format (frequency, word)\n for word in feelings:\n if feelings[word] > 1:\n frequent_words.append((word, feelings[word]))\n frequent_words.sort(key=lambda x: x[1], reverse=True)\n return frequent_words", "def _create_dictionary(self, document):\n words = self._normalize_words(document.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def word_frequency( tokenized, dic ):\n print( 'computing word frequencies' )\n start = time.time()\n for i, text in enumerate( tokenized ):\n for token in text:\n if token not in dic:\n dic[ token ] = 1\n else:\n dic[ token ] += 1\n if i % 10000 == 0:\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )", "def get_sentence_score(sentences, word_frequencies):\r\n sentence_scores = dict()\r\n for sent in sentences:\r\n word_count_without_stopwords=0\r\n for word in word_tokenize(sent.lower()):\r\n if word in word_frequencies.keys():\r\n word_count_without_stopwords+=1 \r\n if len(sent.split(' ')) < 30:\r\n if sent not in sentence_scores.keys():\r\n sentence_scores[sent] = word_frequencies[word]\r\n else:\r\n sentence_scores[sent] += word_frequencies[word]\r\n \r\n if sent in sentence_scores:\r\n sentence_scores[sent] = sentence_scores[sent]/word_count_without_stopwords\r\n \r\n print(sentence_scores) \r\n return sentence_scores", "def new_counts_dict():\n\n\tIN_FILES = [\"../_semtag_dataset_webanno_tfidf_inimigo.txt\",\"../_semtag_dataset_webanno_tfidf_publico.txt\" ]\n\n\ttxt = []\n\tfor in_file in IN_FILES:\n\t with codecs.open(in_file,\"r\",\"utf-8\") as fid:\n\t txt += fid.readlines()\n\t#words\n\twords = [w for m in txt for w in m.split()]\n\t#unique words\n\twords = list(set(words))\n\t#word index\n\twrd2idx = {w:-1 for w in words}\n\n\tset_trace()\n\t\n\twith open(COUNTS_DIC,\"w\") as fod:\n\t\tcPickle.dump(wrd2idx, fod, cPickle.HIGHEST_PROTOCOL)", "def find_ngrams(self, n):\n\n output = {}\n\n for i in range(len(self.text)-n+1):\n s = ' '.join(self.text[i:i+n])\n # if s is not already in dictionary, set value to 0\n output.setdefault(s, 0)\n output[s] += 1\n return output", "def get_ngramlogprobs(freqdict):\n return" ]
[ "0.73105294", "0.6889312", "0.683538", "0.6775084", "0.6548403", "0.6505949", "0.65024614", "0.6410328", "0.6371125", "0.6328725", "0.6307117", "0.6296894", "0.625435", "0.6206774", "0.6178976", "0.61252326", "0.61038357", "0.6091284", "0.60814357", "0.6076231", "0.6062723", "0.6057911", "0.6008987", "0.5993319", "0.599048", "0.5985016", "0.5976527", "0.5976333", "0.59736633", "0.5965174", "0.59613574", "0.5946538", "0.5941092", "0.5929414", "0.59134746", "0.58947736", "0.58876145", "0.58872664", "0.587917", "0.58655375", "0.5848372", "0.5846768", "0.5838395", "0.5826121", "0.58217365", "0.5813359", "0.5811112", "0.5808967", "0.58023083", "0.57885945", "0.578588", "0.5771855", "0.5761654", "0.57590646", "0.5753431", "0.5736757", "0.57143426", "0.56934065", "0.56929773", "0.56929374", "0.567255", "0.56714445", "0.5663591", "0.56454134", "0.5639419", "0.5638965", "0.5629658", "0.5626072", "0.5625807", "0.5625641", "0.5622428", "0.56212115", "0.5611098", "0.5606395", "0.5601715", "0.56006294", "0.5599373", "0.55962366", "0.55942386", "0.5580941", "0.55806106", "0.5579132", "0.5569975", "0.5569074", "0.556097", "0.5554616", "0.55511874", "0.55511004", "0.5547109", "0.5542751", "0.5542686", "0.5537796", "0.55362225", "0.55295944", "0.5529191", "0.5526877", "0.5525465", "0.55247086", "0.5515517", "0.55131525" ]
0.60706675
20
two dictionaries nd1 and nd2 and return the smallest positive value
def smallestValue(self, nd1, nd2): minnd1 = min(nd1.values()) minnd2 = min(nd2.values()) totalmin = min(minnd1,minnd2) return totalmin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minInDict(dist):\r\n m = float('inf')\r\n for p in dist:\r\n for q in dist[p]:\r\n if dist[p][q] < m:\r\n m = dist[p][q]\r\n a,b = p,q\r\n return a,b", "def keywithsecondminval(d): \r\n if len(d) == 1:\r\n d = (d.keys())\r\n return d[0]\r\n\r\n v=list(d.values())\r\n k=list(d.keys())\r\n\r\n a = list(v)\r\n\r\n b = list(k)\r\n\r\n k.pop(v.index(min(v)))\r\n\r\n v.pop(v.index(min(v)))\r\n\r\n test = k[v.index(min(v))]\r\n\r\n return test", "def min_dst(tet1, tet2, allow_zero=True):\n dists = ssd.cdist(tet1, tet2)\n if not allow_zero:\n dists[dists == 0] = np.inf\n return dists.min(axis=1)\n\n #dists = np.empty(tet1.shape[0])\n #for i, t1 in enumerate(tet1):\n # min_dist = np.sum((tet2 - t1) ** 2, axis=1)\n # if not allow_zero:\n # dists[i] = np.min(min_dist[min_dist != 0])\n # else:\n # dists[i] = np.min(min_dist)\n #return np.sqrt(dists)", "def _get_min_positive_value(self, a, b):\n if a < 0 and b >= 0:\n return b\n if a >= 0 and b < 0:\n return a\n return min(a, b)", "def argmin(self,d):\n if not d: \n return None\n # import ipdb; ipdb.set_trace()\n min_val = min(d.values())\n return [k for k in d if d[k] == min_val][0]", "def match_min(coords1,coords2,tail1=(),tail2=()):\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n\n dist_min=zeros(np1)*1.\n\n for j in range(np1):\n #dist=sqrt(add.reduce((a1[:,j,NewAxis]-a2[:,:])**2))\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n dist_min[j]=dist[i_min]\n match[j]=i_min\n\n salida=list(a1)\n for i in range(nt1):salida.append(tail1[i])\n \n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n\n salida.append(dist_min)\n return tuple(salida)", "def minEntryAbs(m1, m2, zeros=True):\n\n if zeros:\n return np.min(np.abs(m1-m2))\n else:\n result = [np.abs(x-y) for x, y in\n zip(m1.flatten(), m2.flatten())\n if x > 0 or y >0]\n\n if len(result) > 0:\n return np.min(result)\n else:\n return 0.0", "def compare_min(values, weights):\n return np.min(values.numpy())", "def minimum_inplace(a, b):", "def compareDictionaries(self, d, nd1, nd2): \r\n normnd1 = self.normalizeDictionary(nd1)\r\n normnd2 = self.normalizeDictionary(nd2) \r\n total_log_prob1 = 0.0\r\n total_log_prob2 = 0.0\r\n epsilon = self.smallestValue(normnd1,normnd2)/2\r\n for x in d:\r\n if x not in normnd1:\r\n total_log_prob1 += log(epsilon)\r\n else:\r\n total_log_prob1 += log(normnd1[x])*d[x]\r\n for x in d: \r\n if x not in normnd2:\r\n total_log_prob2 += log(epsilon)\r\n else:\r\n total_log_prob2 += log(normnd2[x])*d[x]\r\n return [total_log_prob1, total_log_prob2]", "def keywithminval(d): \r\n v=list(d.values())\r\n k=list(d.keys())\r\n return k[v.index(min(v))]", "def keep_lesser_x0_y0_zbt0_pair_in_dict(p, p1, p2):\n const_dict = p[3]\n cd1, cd2 = p1[3], p2[3]\n if 'x0' in cd1 and 'x0' in cd2:\n if cd2['x0'] < cd1['x0']:\n const_dict['x0'] = cd2['x0']\n const_dict['y0'] = cd2['y0'] if 'y0' in cd2 else None\n const_dict['zbt0'] = cd2['zbt0'] if 'zbt0' in cd2 else None\n else:\n const_dict['x0'] = cd1['x0']\n const_dict['y0'] = cd1['y0'] if 'y0' in cd1 else None\n const_dict['zbt0'] = cd1['zbt0'] if 'zbt0' in cd1 else None\n p = p[0:3] + (const_dict,)\n return p", "def d_min(x, y):\n axis = np.argmax(x.shape)\n return np.min(np.array([x, y]), axis=axis)", "def kth_smallest_alt(arr1, arr2, k):\n pass", "def smallest(*args):\r\n if len(args) == 2:\r\n a, b = args\r\n return switch(a < b, a, b)\r\n else:\r\n return min(stack(*args), axis=0)", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n \n for i in d1:\n total = total + d1[i]\n for i in d2:\n if i in d1:\n if total == 0:\n score = score\n else:\n probablility = (d1[i] / total)\n score = score + (math.log10(probablility) * d2[i])\n else:\n if total == 0:\n score = score\n else:\n score = score + ((0.5 / total) * d2[i])\n return score", "def set_min_dist(S1, S2):\n ret =[]\n if len(S2)>len(S1):\n tmp = S1\n S1=S2\n S2=tmp\n \n for x in S1:\n min_x=((x[0]-S2[0][0])**2+(x[1]-S2[0][1])**2)**0.5\n for y in S2:\n d = ((x[0]-y[0])**2+(x[1]-y[1])**2)**0.5\n if d<min_x:\n min_x = d\n ret.append(min_x)\n\n return ret", "def mini(a, b):\n return min(a, b)", "def min_in_dict(dict_of_ints):\n list_of_vals =[]\n list_of_min_keys = []\n for i in dict_of_ints:\n list_of_vals.append(dict_of_ints[i])\n min_val = min(list_of_vals)\n for i in dict_of_ints:\n if dict_of_ints[i] == min_val :\n list_of_min_keys.append(i)\n if len(list_of_min_keys) == 1:\n return list_of_min_keys[0]\n else:\n return list_of_min_keys", "def profitcal(dict1,dict2):\n for i in dict1:\n dict1[i].append('x')\n\n for i in dict2:\n dict2[i].append('x')\n\n for i in dict1:\n x=(dict1[i][1]-dict2[i][1])\n dict1[i][2]=-x\n dict2[i][2] = x\n return dict1,dict2", "def argmin(self, values: pdarray) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"argmin\")\n return k, cast(pdarray, v)", "def first2(x, y):\n y = np.asarray(y)\n return y[np.argsort(x)][0]", "def find_min_distance():\n return np.argmin(d)", "def compare_dictionaries(d1, d2):\r\n score = 0\r\n gef = 0\r\n for z in d1:\r\n gef += d1[z]\r\n total = gef\r\n \r\n for x in d2:\r\n if x in d1:\r\n score += math.log(d1[x] / total) * d2[x] \r\n else:\r\n score += math.log(0.5/total) * d2[x]\r\n return score", "def compare(dict1, dict2):\n\n\tcompared_data = {}\n\tfor keys in dict1:\n\t\tcompared_data[keys] = abs(dict1[keys] - dict2[keys])\n\t\n\treturn compared_data", "def min_value(self, ipdict):\n min_ip = min(ipdict.keys())\n return min_ip", "def minimum_distance(object_1, object_2):\n\n # package import\n import numpy as np\n\n # main algorithm\n minimum_distance = 100000\n\n for coord_1 in object_1:\n for coord_2 in object_2:\n distance_btwn_coords = np.linalg.norm(coord_1 - coord_2)\n if distance_btwn_coords == 0:\n minimum_distance = distance_btwn_coords\n return float(minimum_distance)\n elif distance_btwn_coords < minimum_distance:\n minimum_distance = distance_btwn_coords\n\n return float(minimum_distance)", "def test_perf_min():\n dict_time = timeit.timeit(\n \"min(keys_dict.keys())\",\n setup=\"keys_dict = {key: key for key in range(-1000, 1000)}\",\n number=1000\n )\n dict_sort_time = timeit.timeit(\n \"sorted(keys_dict.keys())[1]\",\n setup=\"keys_dict = {key: key for key in range(1000, -1000, -1)}\",\n number=1000\n )\n tree_time = timeit.timeit(\n \"keys_tree.min()\",\n setup=\"from amp_trees import OrderedTreeDict;\"\n \"keys_tree = OrderedTreeDict((key, key) for key in range(-1000, 1000))\",\n number=1000\n )\n assert dict_time > tree_time, \"Min method is slow.\"\n assert dict_sort_time > tree_time, \"Max method is slow.\"", "def calculate_l1_distance(dict1, dict2):\n res = 0.0\n for key in dict1.keys():\n d1 = dict1[key]\n d2 = dict2[key]\n res += abs(d1-d2)\n return res", "def get_closest_match(indices1, indices2):\n \n if len(indices1) == 1 and len(indices2) == 1:\n return indices1[0], indices2[0]\n \n closest_match = (indices1[0], indices2[0])\n min_dist = np.abs(closest_match[0][0] - closest_match[1][0])\n for pair in itertools.product(indices1, indices2):\n dist = np.abs(pair[0][0] - pair[1][0])\n if dist < min_dist:\n closest_match = pair\n min_dist = dist\n \n return closest_match", "def structured_minimum(x, y):\r\n # see decorator for function body\r", "def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]", "def secondSmallest(d_diff_pts):\n tmp_inds = np.arange(len(d_diff_pts))\n tmp_inds_min0 = np.argmin(d_diff_pts)\n tmp_inds = np.delete(tmp_inds, tmp_inds_min0)\n tmp_d_diff_pts =np.delete(d_diff_pts, tmp_inds_min0)\n secondSmallest_value = min(tmp_d_diff_pts)\n secondSmallest_ind = np.argmin(np.abs(d_diff_pts - secondSmallest_value))\n return secondSmallest_value, secondSmallest_ind", "def _min_norm_element_from2(v1v1, v1v2, v2v2):\n if v1v2 >= v1v1:\n # Case: Fig 1, third column\n gamma = 0.999\n cost = v1v1\n return gamma, cost\n if v1v2 >= v2v2:\n # Case: Fig 1, first column\n gamma = 0.001\n cost = v2v2\n return gamma, cost\n # Case: Fig 1, second column\n gamma = -1.0 * ( (v1v2 - v2v2) / (v1v1+v2v2 - 2*v1v2) )\n cost = v2v2 + gamma*(v1v2 - v2v2)\n return gamma, cost", "def smart_min(v1, v2):\n\n if v1 is None:\n return v2\n\n if v2 is None:\n return v1\n\n return min(v1, v2)", "def min_scalar_prod(x, y):\n x = sorted(x) # make copies\n y = sorted(y) # to save arguments\n return sum(x[i] * y[-i - 1] for i in range(len(x)))", "def min(self, include_zero=False):\n for key, value in self.items():\n if value > 0 or include_zero:\n return key", "def channel_sqrdiff(dict1, dict2, position, normalize_constant = 1):\r\n\r\n\tassert len(dict1) == len(dict2), \"lengthes of the dictionaries not same\"\r\n\r\n\tarray1 = np.array([i[position] for i in dict1]) / normalize_constant\r\n\tarray2 = np.array([i[position] for i in dict2]) / normalize_constant\r\n\r\n\treturn (array1 - array2) ** 2", "def match_min2(coords1,coords2,tail1=(),tail2=()):\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n dist_min=zeros(np1)*1.\n x2=zeros(np1)*1.\n y2=zeros(np1)*1.\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n dist_min[j]=dist[i_min]\n x2[j],y2[j]=a2[0,i_min],a2[1,i_min]\n match[j]=i_min\n \n salida=list(a1)\n salida.append(x2)\n salida.append(y2)\n\n for i in range(nt1):salida.append(tail1[i])\n \n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n\n salida.append(dist_min)\n return tuple(salida)", "def _null_min(a, b):\n if a is None:\n return b\n if b is None:\n return a\n return min(a, b)", "def bhattacharyya(dict1, dict2, inside01=True):\n s = 0\n max_value_possible = 0\n for i in {*dict1, *dict2}:\n s+=(dict1.get(i,0)*dict2.get(i,0))**.5\n if inside01:\n max_value_possible+= (max(dict1.get(i,0),dict2.get(i,0))**.5)\n if inside01:\n return -math.log(s)/max_value_possible if s!=0 else -np.inf\n else:\n return -math.log(s) if s!=0 else -np.inf", "def closest_vals(arr1, arr2):\n arr1 = np.array(arr1).T\n arr1 = arr1[:, np.newaxis]\n arr2 = np.array(arr2).T\n\n return np.argmin(abs(arr2 - arr1), axis=1)", "def secondTwoSmallest(d_diff_pts):\n d_diff_pts_saved = d_diff_pts\n #Find the second smallest\n secondSmallest_value, secondSmallest_ind = secondSmallest(d_diff_pts)\n #Delete the second smallest\n d_diff_pts = np.delete(d_diff_pts,secondSmallest_ind)\n\n #Delete the smallest\n tmp_inds = np.arange(len(d_diff_pts))\n tmp_inds_min0 = np.argmin(d_diff_pts)\n tmp_inds = np.delete(tmp_inds, tmp_inds_min0)\n tmp_d_diff_pts =np.delete(d_diff_pts, tmp_inds_min0)\n #Find the third smallest\n thirdSmallest_value = min(tmp_d_diff_pts)\n thirdSmallest_ind = np.argmin(np.abs(d_diff_pts_saved - thirdSmallest_value))\n return secondSmallest_value, secondSmallest_ind, thirdSmallest_value, thirdSmallest_ind", "def assembly_compare(x, y) :\n if x.kinf() < y.kinf() :\n return 1\n elif x.kinf() == y.kinf() :\n return 0\n else : #x.resultType < y.resultType\n return -1", "def find_closest(a, b):\n a = np.atleast_1d(np.array(a))\n b = np.atleast_1d(np.array(b))\n out = [np.argmin(abs(b - a1)) for a1 in a]\n return out", "def extract_minOld2(H):\n minDist = approxInf\n u = None\n i = 0\n for (v, d) in H:\n if d <= minDist:\n minDist = d\n u = v # note that u is unused (instead returned by pop)\n imin = i\n i += i\n return(H.pop(imin)) # return [u, d]", "def mini(a,b):\n\tif a < b: \n\t\treturn a\n\treturn b", "def smallest_diff(a, b):\n b.sort()\n smallest_diff = None\n\n for n in a:\n idx = bisect_left(b, n)\n diff = min(abs(b[idx - 1] - n), abs(b[idx] - n))\n if smallest_diff is None or smallest_diff > diff:\n smallest_diff = diff\n\n return smallest_diff", "def test_perf_min(self):\n dict_time = timeit.timeit(\n \"min(keys_dict.keys())\",\n setup=\"from random import sample;\"\n \"keys_dict = {key: key for key in sample(range(-1000, 1000), 2000)}\",\n number=1000\n )\n dict_sort_time = timeit.timeit(\n \"sorted(keys_dict.keys())[1]\",\n setup=\"from random import sample;\"\n \" keys_dict = {key: key for key in sample(range(-1000, 1000), 2000)}\",\n number=1000\n )\n tree_time = timeit.timeit(\n \"keys_tree.min()\",\n setup=\"from amp_trees import SplayDict;\"\n \"from random import sample;\"\n \"keys_tree = SplayDict((key, key) for key in sample(range(-1000, 1000), 2000))\",\n number=1000\n )\n self.assertGreater(dict_time, tree_time, \"Min method is slow.\")\n self.assertGreater(dict_sort_time, tree_time, \"Max method is slow.\")", "def quick_e_score(self, n1, n2):\n if n1.needs_update:\n n1._update()\n if n2.needs_update:\n n2._update()\n dists = cdist(n1.mat, n2.mat)\n return -np.min(dists)", "def min_distance(s1, s2):\n n = len(s1)\n m = len(s2)\n matrix = [([0]*(m+1)) for i in xrange(n+1)]\n for i in xrange(m+1):\n matrix[0][i] = i\n for i in xrange(n+1):\n matrix[i][0] = i\n for i in xrange(1,n+1):\n for j in xrange(1,m+1):\n temp = min(matrix[i-1][j]+1, matrix[i][j-1]+1)\n d = 0 if s1[i-1]==s2[j-1] else 1\n matrix[i][j] = min(temp, matrix[i-1][j-1]+d)\n return matrix[n][m]", "def nearest_difference(evs1, evs2):\n\n sigma = calc_sigma(evs1)\n nearestDiff = zeros((vecLen-1), dtype='d')\n for j in range(vecLen-1):\n minimum = infty\n for i in range(vecLen2):\n diff = absolute(evs1[j] - evs2[i]) / sigma[j]\n if diff < minimum:\n minimum = diff\n del i\n nearestDiff[j] = minimum\n del j\n\n return nearestDiff", "def compare_distance(self, a, b):\n a_dist = int(a['distance'])\n b_dist = int(b['distance'])\n if a_dist < b_dist:\n return -1\n elif a_dist > b_dist:\n return 1\n else:\n return 0", "def min_args(dico: Dict) -> List:\n\n positions = []\n min_value = float(\"inf\")\n for k, v in dico.items():\n\n if v == min_value:\n\n positions.append(k)\n\n if v < min_value:\n\n min_value = v\n positions = [k]\n\n return positions", "def wilcoxon_z(vals1: Union[Dict[Any, float], List[float]],\n vals2: Optional[Union[Dict[Any, float], List[float]]] = None) -> float:\n return wilcoxon(vals1, vals2)[0]", "def min_for_dic_value(dic_values_array, possible_keys_array):\n # initialize with empty array\n all_dic = {}\n for key in possible_keys_array:\n all_dic[key] = []\n\n # loop thorough dictionaries to gather all values for one key into array\n for dic in dic_values_array:\n for key in possible_keys_array:\n all_dic[key].append(dic[key])\n\n # now min_dic for every key has its minimum value\n min_dic = {}\n for key in possible_keys_array:\n min_dic[key] = min(all_dic[key])\n\n return min_dic", "def find_closest_atom(coords1, coords2):\n\n coords1 = np.array(coords1)\n coords2 = np.array(coords2)\n diff = coords2[:, np.newaxis] - coords1[np.newaxis, :]\n dist = np.einsum('ijk->ij', diff**2)**0.5\n index = np.argmin(dist)\n return index", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n for key in d1:\n total += d1[key]\n for item in d2:\n if item in d1:\n score += d2[item] * math.log(d1[item]/total)\n else:\n score += d2[item] * math.log(0.5/total)\n return score", "def __lt__(self, other):\n return self.x ** 2 + self.y ** 2 < other.x ** 2 + other.y ** 2", "def find_min(self):\n if self.is_empty():\n return None\n else:\n p = self.first()\n return (p.key(), p.value())", "def dist(self, elem_a, elem_b):\n\n # Default to empty dict in case the key doesn't exist to simplify\n # return logic\n direct_dist = self.distMap.get(elem_a, {}).get(elem_b)\n if direct_dist is None:\n reverse_dist = self.distMap.get(elem_b, {}).get(elem_a)\n if reverse_dist is None:\n # No entry stored for this pair - error out\n return -1\n\n # Found reverse mapping\n return reverse_dist\n\n # Found direct mapping\n return direct_dist", "def _findMin(p, A):\n\n m=(-1, (0,0))\n for p0 in A:\n dist = np.linalg.norm(p0-np.array(p))\n if m[0]==-1 or m[0]>dist:\n m = (dist, p0)\n \n return tuple(m[1])", "def scalar_min(self, dst, src0, src1):\n return self._scalar_binary_func('min', dst, src0, src1)", "def distance(self, keyOne, keyTwo):", "def get_min_dist(x0, y0, arr):\n dist = np.hypot(arr.T[0] - x0, arr.T[1] - y0)\n min_dist = np.min(dist)\n val = np.argmin(dist)\n return min_dist, arr[val]", "def distance(mass_1: ObjectMass, mass_2: ObjectMass) -> int:\n\n # collect orbit hops\n orbits_1 = mass_1.get_orbit_hops()\n\n orbits_2 = mass_2.get_orbit_hops()\n\n # find common orbit hop with least amount of hops\n common_hops: set = orbits_1.keys() & orbits_2.keys()\n\n hop = common_hops.pop()\n smallest_total_hops = orbits_1[hop] + orbits_2[hop]\n for hop in common_hops:\n total_hops = orbits_1[hop] + orbits_2[hop]\n\n if total_hops < smallest_total_hops:\n smallest_total_hops = total_hops\n\n return smallest_total_hops", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n\n for element in d1:\n total += d1[element]\n\n for item in d2:\n if item in d1:\n score += math.log(d1[item]/total) * (d2[item])\n else:\n score += math.log(0.5/total) * (d2[item])\n return score", "def minDist(l, a, b):\n pre = 0\n rt = float('INF')\n for i in range(len(l)):\n if l[i] == a or l[i] == b:\n pre = i\n break\n\n for i in range(pre+1, len(l)):\n if l[i] == a or l[i] == b:\n if l[i] != l[pre] and i - pre < rt:\n rt = i - pre\n pre = i\n return rt", "def linf(x1, x2):\n return np.max(np.abs(x1 - x2))", "def argmin_EMD(d1, d2):\n global ideal_difference\n\n if (distributions_EMD(d1, get_ideal_difference_distribution()) <=\n distributions_EMD(d2, get_ideal_difference_distribution())):\n return d1\n return d2", "def getMinimum(self):\n v1 = Vector(*self.p1)\n v2 = Vector(*self.p2)\n if v1.angle < v2.angle:\n return self.p1\n else:\n return self.p2", "def shortest(self, word1, word2):\n a, b = self.d[word1], self.d[word2]\n m, n, i, j, res = len(a), len(b), 0, 0, float('inf')\n while i < m and j < n:\n res = min(res, abs(a[i] - b[j]))\n if a[i] > b[j]:\n j += 1\n else:\n i += 1\n return res", "def minimum_subset_distance(D, limits1, limits2):\n score = numpy.ones( (limits1[1]) )\n for i in xrange(limits1[1]):\n for j in xrange(limits2[1]-limits2[0]):\n score[i] = min(score[i], D[i,j+limits2[0]-1])\n #print i, j, D[i,j+limits2[0]-1], score[i], min(score[i], D[i,j+limits2[0]-1])\n return score", "def smallest_distance(self, clusters):\n i, j = numpy.unravel_index(numpy.argmin(clusters), clusters.shape)\n return clusters[i, j], i, j", "def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)", "def distance(p1, p2):\n dist = 0\n for k in set([*p1.keys(), *p2.keys()]):\n dist += (p1.get(k, 0) - p2.get(k, 0))**2\n return math.sqrt(dist)", "def hellSimilarity(topicDict1,topicDict2):\n K = len(topicDict1)\n hellDis = 0\n for key in topicDict1.keys():\n if key not in topicDict2:\n print '%d is not in another dict...' % key\n return\n else:\n if topicDict1[key] < 0:\n topicDict1[key] = 1.0 / 10000000\n if topicDict2[key] < 0:\n topicDict2[key] = 1.0 / 10000000\n hellDis += (math.sqrt(topicDict1[key]) - math.sqrt(topicDict2[key]))**2\n hellDis = math.sqrt(hellDis)\n #distance\n hellDis = hellDis * (1.0/math.sqrt(2))\n if hellDis == 0:\n hellDis = 1.0 / 10000000\n #similarity\n hellSimilarity = 1.0 / hellDis\n return hellSimilarity", "def solve_part_one(wire_one_map, wire_two_map):\n return int(min([manhattan_distance(x, y) for (x, y) in find_intersection(wire_one_map, wire_two_map)]))", "def test_compare(self): \n d1 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.1]])\n )\n d2 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.5]])\n )\n\n # These are very loose bounds\n assert d1 < d2", "def min_distance_to_aligned_shots(shot_id, aligned_shot_ids, gps_points_dict):\n if shot_id in gps_points_dict:\n return 0\n\n distances_dict = get_distance_to_aligned_shots(shot_id, aligned_shot_ids)\n return min(distances_dict.values(), key=abs)", "def test_find_second_smallest(self):\n secondSmallestValue = sorted(self.values)[1]\n valueFound = self.tree.findSecondSmallest(self.tree.root)\n self.assertEquals(secondSmallestValue, valueFound)", "def compare(l1, l2):\n if link_weights[l1] < link_weights[l2]:\n return 1\n elif link_weights[l1] == link_weights[l2]:\n return 0\n else:\n return -1", "def distance_map(xs1, xs2):\n return jax.vmap(lambda x1: jax.vmap(lambda x2: euclidean_distance(x1, x2))(xs2))(xs1)", "def __lt__(self, other):\n return sum([node.h for node in self.nodes]) + self.cost < sum([node.h for node in other.nodes]) + other.cost", "def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units} )\n return latmv", "def nearest_min(dist_matrix):\n # much faster than np.where\n i, j = np.unravel_index(\n np.argmin(dist_matrix), \n dims=dist_matrix.shape\n )\n return i, j", "def lowest(t1,t2):\n compare_len = min(len(t1), len(t2))\n for i in range(0,compare_len):\n if t1[i] < t2[i]:\n return t1\n elif t1[i] > t2[i]:\n return t2\n\n # if here, identical to compare_len; just pick one\n return t1", "def sub_vectors(d1,d2):\n #creates a new dictionnary to deep copy the inputs\n empty_dict = {}\n \n #creates a deep copy of d1 into an empty dictionnary\n for key in d1:\n empty_dict[key] = d1[key]\n #checks if any keys if a value of 0\n if empty_dict[key] == 0:\n #if so we would delete that key\n del empty_dict[key]\n \n #iterates through the second vector\n for key in d2:\n #checks if the current key in d2 is in d1\n if key in d1:\n #if so we would substract the value of d1 by d2\n empty_dict[key] = d1[key] - d2[key]\n #checks if the current key has the value of 0\n if empty_dict[key] == 0:\n #if so we would delete that key \n del empty_dict[key]\n \n elif key not in d1:\n #creates a new key in the new dictionnary if current key not found in d1\n empty_dict[key] = -d2[key]\n #checks if the current key has a value of 0\n if empty_dict[key] == 0:\n #deletes the current key has a value of 0\n del empty_dict[key]\n \n #returns new substracted dictionnary \n return empty_dict", "def min(self):\n p = self._find_min()\n item = p.element()\n return (item._key, item._value)", "def prolongation(kv1, kv2):\n g = kv2.greville()\n C1 = collocation(kv1, g).A\n C2 = collocation(kv2, g)\n P = scipy.sparse.linalg.spsolve(C2, C1)\n # prune matrix\n P[np.abs(P) < 1e-15] = 0.0\n return scipy.sparse.csr_matrix(P)", "def l1(x1, x2):\n return np.abs(x1 - x2)", "def _require_positive_targets(y1, y2):\n offset = abs(min(y1.min(), y2.min())) + 1\n y1 += offset\n y2 += offset\n return y1, y2", "def min_or_none(val1, val2):\n return min(val1, val2, key=lambda x: sys.maxint if x is None else x)", "def minimum(self, start, end):\n return self.foldl1(start, end, min)", "def getXmin(self):\n return min(self.p1.x, self.p2.x)", "def minDistBoundingBoxes(boxes1, boxes2):\n\n dist = np.empty((boxes1.shape[0], boxes2.shape[0]))\n for box1 in range(boxes1.shape[0]):\n for box2 in range(boxes2.shape[0]):\n dist[box1, box2] = minDistBoundingBox(boxes1[box1, :],\n boxes2[box2, :])\n return dist", "def lowest(graph, value1, value2):\n path1 = []\n onePath = []\n onePath.append(value1)\n path1 = pathDAG(graph, value1, path1, onePath)\n\n path2 = []\n onePath = []\n onePath.append(value2)\n path2 = pathDAG(graph, value2, path2, onePath)\n\n count = 0\n dictLCA = {} #If there are several paths, the diferent LCAs is saved here\n\n for path in path1:\n \n for node1 in path:\n count +=1\n for paths in path2:\n \n for node2 in paths:\n \n if node1 == node2:\n if not count in dictLCA:\n dictLCA[count] = node1\n else:\n dictLCA[count] = dictLCA[count] + \", \" + node1\n count = 0\n break\n else:\n continue #executed if inner loop do not breaks\n break #executed if inner loop do break\n else:\n continue\n break #Will break when finding the LCA for each path\n\n \n return dictLCA[min(dictLCA)] #min() looks for the CA with the shortest path", "def compareFunction( self, first, second ):\n for ascending,column in self.sortOrder:\n aValue,bValue = column.get(first),column.get(second)\n diff = cmp(aValue,bValue)\n if diff:\n if not ascending:\n return - diff \n else:\n return diff \n return 0", "def closest_points_naive(self, x, y):\r\n # Running time: O(n ** 2)\r\n\r\n dist = []\r\n for i in range(len(x)):\r\n for j in range(i+1, len(x)):\r\n d = self.get_euclidean_distance(x[i], x[j], y[i], y[j])\r\n dist.append(d)\r\n \r\n return min(dist)", "def closest_distance(node_a, node_b):\n min_distance = 999999\n for loc_a in node_a.locations:\n for loc_b in node_b.locations:\n distance = abs(loc_a - loc_b)\n if distance < min_distance:\n min_distance = distance\n return min_distance" ]
[ "0.69349235", "0.6200123", "0.6040021", "0.598441", "0.59417975", "0.59298074", "0.5921851", "0.5919063", "0.589691", "0.58484435", "0.5841207", "0.57669926", "0.575807", "0.57121885", "0.570274", "0.56867063", "0.5659589", "0.5648681", "0.5617436", "0.5614321", "0.56119514", "0.560704", "0.559953", "0.5594859", "0.55704874", "0.55616343", "0.5559555", "0.5555257", "0.5550604", "0.5543384", "0.554046", "0.5472846", "0.54674", "0.54575175", "0.54408026", "0.5426246", "0.54087096", "0.5407169", "0.5401506", "0.5398064", "0.5396973", "0.5360181", "0.53579473", "0.53530985", "0.53517294", "0.5345045", "0.5340295", "0.5338355", "0.53335047", "0.53144383", "0.53128713", "0.5299715", "0.52947146", "0.529343", "0.5290127", "0.52795833", "0.5279207", "0.5265525", "0.52592623", "0.5257373", "0.5253985", "0.52311933", "0.5228072", "0.5217577", "0.5216501", "0.52108526", "0.52042985", "0.52004045", "0.51916736", "0.5184784", "0.5184191", "0.51793075", "0.5178152", "0.5160376", "0.5157706", "0.51534307", "0.51454645", "0.514331", "0.5135389", "0.51316434", "0.5130867", "0.5127791", "0.51268655", "0.5123031", "0.5122978", "0.51173973", "0.5115783", "0.5114502", "0.51139164", "0.51132464", "0.51047844", "0.5103426", "0.51025283", "0.5099633", "0.50993025", "0.5096094", "0.5093655", "0.5090906", "0.5084108", "0.5079297" ]
0.78676057
0
return logprobability that dictionary d came from the distribution of data in the normalized dictionary nd1 and nd2
def compareDictionaries(self, d, nd1, nd2): normnd1 = self.normalizeDictionary(nd1) normnd2 = self.normalizeDictionary(nd2) total_log_prob1 = 0.0 total_log_prob2 = 0.0 epsilon = self.smallestValue(normnd1,normnd2)/2 for x in d: if x not in normnd1: total_log_prob1 += log(epsilon) else: total_log_prob1 += log(normnd1[x])*d[x] for x in d: if x not in normnd2: total_log_prob2 += log(epsilon) else: total_log_prob2 += log(normnd2[x])*d[x] return [total_log_prob1, total_log_prob2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(d: D) -> NumDict:\n\n return d.log()", "def compare_dictionaries(d1, d2):\r\n score = 0\r\n gef = 0\r\n for z in d1:\r\n gef += d1[z]\r\n total = gef\r\n \r\n for x in d2:\r\n if x in d1:\r\n score += math.log(d1[x] / total) * d2[x] \r\n else:\r\n score += math.log(0.5/total) * d2[x]\r\n return score", "def get_entropy(dictionary):\n my_sum = 0\n weighted_sum_of_logs = 0\n for freq in dictionary.values():\n if freq:\n my_sum += freq\n weighted_sum_of_logs += freq * math.log(freq)\n return math.log(my_sum) - weighted_sum_of_logs / my_sum", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n for key in d1:\n total += d1[key]\n for item in d2:\n if item in d1:\n score += d2[item] * math.log(d1[item]/total)\n else:\n score += d2[item] * math.log(0.5/total)\n return score", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n\n for element in d1:\n total += d1[element]\n\n for item in d2:\n if item in d1:\n score += math.log(d1[item]/total) * (d2[item])\n else:\n score += math.log(0.5/total) * (d2[item])\n return score", "def get_probability(fields, dic):\r\n sum_ = sum(dic.values())\r\n p = 0.0\r\n for f in fields:\r\n value = dic.get(f, 0.0) + 0.0001\r\n p = p + math.log(float(value)/float(sum_))\r\n return p", "def get_ngramlogprobs(freqdict):\n return", "def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def log_probability(self, samples):\n pass", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n \n for i in d1:\n total = total + d1[i]\n for i in d2:\n if i in d1:\n if total == 0:\n score = score\n else:\n probablility = (d1[i] / total)\n score = score + (math.log10(probablility) * d2[i])\n else:\n if total == 0:\n score = score\n else:\n score = score + ((0.5 / total) * d2[i])\n return score", "def distributions_EMD(d1, d2):\n return ss.wasserstein_distance(d1.get_probs(), d2.get_probs()) / len(d1.get_probs())", "def probability_density(dic):\n\n var = dic['var']\n par = dic['par']\n y1 = dic['y']\n y = y1.conjugate() * y\n return dic_result(var,par,y)", "def log_prob(self, th):\n\n\t\tmask = self.__low <= th <= self.__high\n\t\tif len(th.shape) == 1:\n\t\t\tlength = 1\n\t\telif len(th.shape) == 2:\n\t\t\tlength = th.shape[0]\n\t\telse:\n\t\t\traise RuntimeError(\"th must be either (d,) or (n_samples, d)\")\n\t\tlogprobs = np.empty(length)\n\t\tlogprobs[mask] = 0.\n\t\tlogprobs[~mask] = -float('inf')\n\t\treturn logprobs", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def get_log_odds_score(observed, expected):\n log_ratio = dict()\n for pe in expected.keys():\n try:\n log_ratio[pe] = int(round(2*log(observed[pe]/expected[pe], 2),0))\n except KeyError:\n log_ratio[pe] = int(-99)\n\n return log_ratio", "def log_entropy(dm):\n size = len(dm)\n entropy = 0\n w, v = np.linalg.eig(dm)\n for n in range(size):\n if w[n] != 0:\n entropy = entropy - w[n] * np.log2(w[n])\n return entropy", "def calc_marginal_entropy(data_stat):\n label_col = data_stat.keys()[-1]\n label_info = data_stat[label_col]\n label_lengths = []\n for label_poses in label_info.values():\n label_lengths.append(len(label_poses))\n # print(label_lengths)\n majority = max(label_lengths)\n minority = min(label_lengths)\n total = float(majority + minority)\n entropy = -(minority/total*math.log(minority/total,2)+majority/total*math.log(majority/total,2))\n return entropy", "def log_prob(self):", "def prob_2_entropy(prob):\r\n n, c, h, w = prob.size()\r\n return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)", "def _logprobratio(prob1, prob2):\n return log(prob1) / log(prob2)", "def __dNdlog2dN(self,Dp,dNdlogDp):\n \n x = np.log10(Dp)\n y = (x[1:]+x[:-1])/2.\n y = np.pad(y,1,'constant',constant_values=(x[0]-(y[0]-x[0]),x[-1]+(x[-1]-y[-1])))\n dlogDp = np.diff(y)\n return dNdlogDp*dlogDp # cm-3", "def joint_logpdf(self, x1, x2 = None):\n dists = self.conditionalMVNs\n joint_pdfs = np.array([d.joint_pdf(x1, x2) for d in dists])\n return np.log(np.sum(self.weights * joint_pdfs))", "def _fe_compute_domain_entropy(sample):\n # Compute entropy of domain.\n result = OrderedDict()\n p, lns = Counter(sample['domain']), float(len(sample['domain']))\n entropy = -sum(count / lns * math.log(count / lns, 2) for count in list(p.values()))\n\n result['entropy'] = entropy\n return result", "def entropy(message):\n message = letter_freq(message)\n n = sum(message.values())\n h = 0\n for n_i in message.values():\n p_i = n_i / n\n h += -p_i * log2(p_i)\n return h", "def EntropyD(dist): \n if np.any(dist>0):\n dist2 = dist/np.sum(dist)\n return -np.sum(dist2[dist2>0]*np.log2(dist2[dist2>0]))\n else:\n return 0", "def statsfromcounts(self,countMatrix):\n countSum = np.sum(countMatrix,dtype=np.float64)\n if countSum > 0.:\n p = countMatrix/countSum\n entropies = (-p)*np.log(p)\n # p=0 yields infinite log and hence nan entropy. We define\n # 0log(0) as 0 though:\n entropies[np.isnan(entropies)] = 0.\n entropy = np.sum(entropies)\n else:\n p = np.zeros(countMatrix.shape)\n entropy = 0.\n #\n return {'p':p,'entropy':entropy}", "def prob_dist(line1, line2, model):\n vocab = set(counts_un.keys())\n probs = dict()\n for line3 in vocab:\n probs[line3] = model.get_trigram_prob(line1, line2, line3)\n return probs", "def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):\n raise NotImplementedError()", "def _logprob(self, sample):\n return 0, 0", "def _calculate_batched_logprob(self, mu, std, x):\n # Set up lower bound of std, since zero std can lead to NaN log-probability\n # Used for: torch.clamp(std_i, min=min_std...)\n # min_std = 1e-12\n \n log_probs = []\n \n # Iterate over all density components\n for i in range(self.num_densities):\n # Retrieve means and stds\n mu_i = mu[:, i, :]\n std_i = std[:, i, :]\n # Thresholding std, if std is 0, it leads to NaN loss. \n # std_i = torch.clamp(std_i, min=min_std, max=std_i.max().item())\n # Create Gaussian distribution\n dist = Normal(loc=mu_i, scale=std_i)\n # Calculate the log-probability\n logp = dist.log_prob(x)\n # Record the log probability for current density\n log_probs.append(logp)\n \n # Stack log-probabilities with shape [N, K, D]\n log_probs = torch.stack(log_probs, dim=1)\n \n return log_probs", "def log_prob(self, th):\n\n\t\tif len(th.shape) == 1:\n\t\t\tth1, th2 = th[0], th[1]\n\t\telif len(th.shape) == 2:\n\t\t\tth1, th2 = th[:,0], th[:,1]\n\t\telse:\n\t\t\traise RuntimeError(\"th must be either (2,) or (n_samples, 2)\")\n\t\tmask = (\n\t\t\t\t (th1 >= -2.) \n\t\t\t\t* (th1 <= 2.)\n\t\t\t\t* (th2 >= -1 - th1)\n\t\t\t\t* (th2 >= th1 - 1)\n\t\t\t\t* (th2 <= 1) \n\t\t\t\t* (th2 >= -1) # Redundant I think but who cares\n\t\t\t )\n\t\tlogprob = np.ones(th1.size)*self.log_prob_val\n\t\tlogprob[~mask] = -float(\"inf\")\n\t\treturn logprob", "def calc_conditional_entropy(map,data_stat,attribute):\n #acquire the data info of the attribute stored in data_stat\n data_info = data_stat[attribute]\n #acquire the label info\n # label_col = len(data_stat)-1\n label_col = data_stat.keys()[-1]\n # print(data_stat.keys())\n label_info = data_stat[label_col]\n #acquire the data \n data = map[attribute]\n labels = map[label_col]\n conditional_entropy =0\n for data_type in data_info:\n specific_entropy = 0\n for label_type in label_info: \n #attribute data indices where all data entries are equal to a speicifc value\n data_with_spec_val_idx = data_info[data_type]\n #label indices where all labels are of same value\n spec_label_idx = label_info[label_type]\n #the intersection of the two indices above\n intersect_idx = np.intersect1d(data_with_spec_val_idx,spec_label_idx)\n #conditional probability of label being of specific value given speicific data value\n temp_prob = len(intersect_idx)/float(len(data_with_spec_val_idx))\n if temp_prob!=0:\n specific_entropy += temp_prob*math.log(temp_prob,2)\n specific_entropy = -specific_entropy\n prob = len(data_with_spec_val_idx)/float(len(data))\n conditional_entropy += prob * specific_entropy\n return conditional_entropy", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n for cls in self.classes:\n class_probability = self.prior_prob[cls]\n for key, value in datum.items():\n relative_feature_values = self.likelihoods[cls][key]\n class_probability += math.log(relative_feature_values.get(datum[key], 0.01))\n\n logJoint[cls] = class_probability\n\n return logJoint", "def calculate_entropy(prob):\n return -(prob * math.log(prob,2))", "def dlogdp(self):\n return np.log10(self.bins[:, -1]) - np.log10(self.bins[:, 0])", "def gc_prob_density(r):\n return np.exp(_interp_ln_dens(r))", "def logp(self, args):\n mean, stddev, action = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n logp = dist.log_prob(action)\n return logp", "def calc_match_probability(obs, pred1):\n \n # Throw away any non-atom columns\n obs_reduced = obs.loc[:, self.pars[\"atom_set\"].\n intersection(obs.columns)]\n pred1_reduced = pred1.loc[self.pars[\"atom_set\"].\n intersection(pred1.index)]\n \n # Calculate shift differences for each observed spin system\n delta = obs_reduced - pred1_reduced\n \n # Make a note of NA positions in delta, and set them to zero \n # (this avoids warnings when using norm.cdf later)\n na_mask = delta.isna()\n delta[na_mask] = 0\n \n if self.pars[\"prob_method\"] == \"delta_correlation\":\n overall_prob = pd.Series(index=delta.index)\n overall_prob[:] = 1\n \n d_mean = pd.read_csv(\"../data/d_mean.csv\", header=None, \n index_col=0).loc[delta.columns,1]\n d_cov = (pd.read_csv(\"../data/d_cov.csv\", index_col=0).\n loc[delta.columns,delta.columns])\n \n mvn = multivariate_normal(d_mean, d_cov)\n \n overall_prob = mvn.logpdf(delta)\n \n # Penalise missing shifts, unless also missing in predictions\n overall_prob = (overall_prob + log10(default_prob) * \n (na_mask.sum(axis=1) - pred1_reduced.isna().sum()))\n \n else:\n prob = delta.copy()\n prob.iloc[:,:] = 1\n \n for c in delta.columns:\n if self.pars[\"prob_method\"] == \"cdf\":\n # Use the cdf to calculate the probability of a \n # delta *at least* as great as the actual one\n prob[c] = log10(2) + norm.logcdf(-1*abs(\n pd.to_numeric(delta[c])), scale=atom_sd[c]*sf)\n elif self.pars[\"prob_method\"] == \"pdf\":\n prob[c] = norm.logpdf(pd.to_numeric(delta[c]), \n scale=atom_sd[c]*sf) \n elif shift_correlation:\n print(\"shift_correlation not yet implemented. Defaulting to pdf.\")\n prob[c] = norm.logpdf(pd.to_numeric(delta[c]), \n scale=atom_sd[c]*sf)\n else:\n print(\"Method for calculating probability not recognised. Defaulting to pdf.\")\n prob[c] = norm.logpdf(pd.to_numeric(delta[c]), \n scale=atom_sd[c]*sf)\n \n # In positions where data was missing, use default probability\n prob[na_mask] = log10(default_prob)\n \n # Calculate penalty for a HADAMAC mismatch\n if use_hadamac:\n # If the i-1 aa type of the predicted residue matches the \n # HADAMAC group of the observation, probability is 1.\n # Otherwise, probability defaults to 0.01\n prob[\"SS_classm1\"] = 0.01\n if type(pred1[\"Res_typem1\"])==str: # dummies have NaN\n prob.loc[obs[\"SS_classm1\"].str.find(\n pred1[\"Res_typem1\"])>=0, \"SS_classm1\"] = 1\n \n # Calculate overall probability of each row\n overall_prob = prob.sum(skipna=False, axis=1)\n \n return(overall_prob)", "def cohensd2problarger(d):\n\n return stats.norm.cdf(d / np.sqrt(2))", "def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h", "def get_dists_2():\n d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d2 = Distribution(['0', '1'], [1 / 3, 2 / 3])\n d3 = Distribution(['0', '1'], [2 / 5, 3 / 5])\n return d1, d2, d3", "def cond_entropy(joint_prob, cond_prob):\n # Computing log2(P cond)\n log2_p = (np.ma.log2(cond_prob)).filled(0)\n # Multipling element wise the arrays\n prod_entropy = np.multiply(joint_prob, log2_p)\n # Getting the - sum of the resulting array.\n H = -( np.sum(prod_entropy))\n return H", "def normD(dictin):\n \n tmp1 = np.sum(np.multiply(dictin, dictin), axis=0)\n for k in range(len(tmp1)):\n if tmp1[k] == 0:\n tmp1[k] = 1e-6\n \n #tmp = 1 / np.sqrt(np.sum(np.multiply(dictin, dictin), axis=0))\n tmp = 1 / np.sqrt(tmp1)\n print(np.array(tmp1))\n return np.dot(dictin, np.diag(tmp))", "def logprob(predictions, labels):\n # prevent negative probability\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def proba_from_log_odds(self, log_odds):\n return (1/(1 + math.exp(log_odds)))", "def _kld_gauss(self, mean_1, std_1, mean_2, std_2):\n kld_element = (2 * torch.log(std_2) - 2 * torch.log(std_1) + (std_1.pow(2) + (mean_1 - mean_2).pow(2)) / std_2.pow(2) - 1)\n return\t0.5 * torch.sum(kld_element)", "def conditionalDistribution(self, d, v):\n probabilities_ts = np.ones((self.n_topic_components, self.n_sentiment_components))\n firstFactor = (self.n_ds[d] + self.alphaVec) / \\\n (self.n_d[d] + np.sum(self.alphaVec))\n secondFactor = np.zeros((self.n_topic_components,self.n_sentiment_components))\n for s in range(self.n_sentiment_components):\n \n secondFactor[:,s] = ((self.n_dst[d, s, :] + self.gammaVec) / \\\n (self.n_ds[d, s] + np.sum(self.gammaVec)))\n\n thirdFactor = (self.n_vts[v,:, :] + self.beta) / \\\n (self.n_ts + self.n_vts.shape[0] * self.beta)\n\n #forthFactor = np.zeros((self.n_topic_components, self.n_sentiment_components))\n #for k in range(self.n_topic_components):\n # forthFactor[k,:] = np.exp(np.dot(self.topic_embeddings[k,:],self.word_embeddings[v,:]))/np.sum(np.exp(np.dot(self.topic_embeddings[k,:],self.word_embeddings.T)))\n \n forthFactor = np.exp(np.dot(self.topic_embeddings,self.word_embeddings[v,:]))/np.sum(np.exp(np.dot(self.topic_embeddings,self.word_embeddings.T)),-1)\n probabilities_ts *= firstFactor[:, np.newaxis]\n #probabilities_ts *= secondFactor * thirdFactor\n probabilities_ts *= secondFactor * ((1-self.lambda_)*thirdFactor + self.lambda_*forthFactor)\n probabilities_ts /= np.sum(probabilities_ts)\n \n return probabilities_ts", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def nll_logprobs(self, input, target_idx):\n raise NotImplementedError()", "def log_prob(self, th):\n\n\t\tif len(th.shape) == 2:\n\t\t\tth0, th1 = th[:,0], th[:,1]\n\t\t\tmask = (th0 > 0.) * (th1 > 0.)\n\t\telif len(th.shape) == 1:\n\t\t\tth0, th1 = float(th[0]), float(th[1])\n\t\t\tmask = torch.tensor([th0 > 0., th1 > 0.])\n\t\telse:\n\t\t\traise IndexError(\"This class is only for 2D Gamma prior for GSE model\")\n\t\tth0, th1 = torch.as_tensor(th0), torch.as_tensor(th1)\n\t\tvals = (self.beta_prior.log_prob(th0) + self.gamma_prior.log_prob(th1)).reshape(-1)\n\t\tvals = vals.numpy()\n\t\tvals[~mask] = -float('inf')\n\t\treturn vals", "def get_log_prob(self, latent, obs):\n return self.get_log_prob_from_latent_dist(self.get_latent_dist(obs), latent)", "def bhattacharyya(dict1, dict2, inside01=True):\n s = 0\n max_value_possible = 0\n for i in {*dict1, *dict2}:\n s+=(dict1.get(i,0)*dict2.get(i,0))**.5\n if inside01:\n max_value_possible+= (max(dict1.get(i,0),dict2.get(i,0))**.5)\n if inside01:\n return -math.log(s)/max_value_possible if s!=0 else -np.inf\n else:\n return -math.log(s) if s!=0 else -np.inf", "def calc_probs(log_p):\n\n N = log_p.shape[0]\n\n log_Z_per_N = np.zeros(shape=(N, 1))\n\n for i in range(N):\n\n log_Z_per_N[i] = log_norm(log_p[i])\n\n log_p_new = log_p - log_Z_per_N\n\n p = np.exp(log_p_new)\n\n # log_Z = log_norm(log_p)\n\n # p = np.exp(log_p - log_Z)\n\n return p", "def logGauss(self):\n #firstly initialise an array to store the values\n log = np.zeros([self.num])\n \n #now want to loop through each of the points in the collections\n for i in range(self.num):\n #get the point as an array\n point = self.pick(i)\n #key characteristic of standard normal: can treat as product of independent 1D normals\n log[i] = self.d - np.log(np.sqrt(2 * np.pi)) - 0.5 * np.sum(point**2)\n return log", "def likelihoods(d_given_h, priors):\r\n # check that the lists of Pr(D|H_i) and priors are equal\r\n length = len(d_given_h)\r\n if length != len(priors):\r\n raise ValueError(\"Lists not equal lengths.\")\r\n # find weighted sum of Pr(H_i) * Pr(D|H_i)\r\n wt_sum = 0\r\n for d, p in zip(d_given_h, priors):\r\n wt_sum += d * p\r\n # divide each Pr(D|H_i) by the weighted sum and multiply by its prior\r\n # to get its likelihood\r\n return [d / wt_sum for d in d_given_h]", "def get_log_likelihood(response_probability, observed_response):\n \n return np.log(response_probability[observed_response])", "def calc_prob(wds, dic, neg, pos):\n tot = neg + pos\n p_neg = float(neg) / tot\n p_pos = float(pos) / tot\n ct_neg = sum(dic[\"neg\"].values())\n ct_pos = sum(dic[\"pos\"].values())\n V_neg = len(dic[\"neg\"])\n V_pos = len(dic[\"pos\"])\n V = V_neg + V_pos\n cstar_neg = log(p_neg)\n cstar_pos = log(p_pos)\n\n for term in wds:\n\n # if word from test doc is in training doc dictionary\n # under class \"neg\" compute this smoothed probability\n if term in dic[\"neg\"]:\n\n p_wi_neg = float(dic[\"neg\"][term] + 1) / (ct_neg + V + 1)\n\n # otherwise compute this smoothed probability\n else:\n\n p_wi_neg = 1.0 / (ct_neg + V + 1)\n\n # add to the cstar_neg variable\n cstar_neg += (wds[term] * log(p_wi_neg))\n\n # if word from test doc is in training doc dictionary\n # under class \"pos\" compute this smoothed probability\n if term in dic[\"pos\"]:\n\n p_wi_pos = float(dic[\"pos\"][term] + 1) / (ct_pos + V + 1)\n\n # otherwise compute this smoothed probability\n else:\n\n p_wi_pos = 1.0 / (ct_pos + V + 1)\n\n # add to the cstat_pos variable\n cstar_pos += (wds[term] * log(p_wi_pos))\n\n # return a tuple of the two probabilities\n return cstar_neg, cstar_pos", "def distance_to_proba(self, d):\n e = np.exp(-d/self.kernel_windows) # TODO : find a good heuristic for that kernel_windows\n \n return e / e.sum(axis=1, keepdims=True)", "def prior(training_data, label_list):\n\n smooth = 1 # smoothing factor\n logprob = {}\n # TODO: add your code here\n numfile1 = 0\n numfile2 = 0\n for dic in training_data:\n if(dic[\"label\"] == label_list[0]):\n numfile1 += 1\n elif(dic[\"label\"] == label_list[1]):\n numfile2 += 1\n numtotal = numfile1 + numfile2\n\n prob1 = (numfile1+smooth)/(numtotal+2)\n prob2 = (numfile2 + smooth) / (numtotal + 2)\n\n logprob[label_list[0]] = math.log(prob1)\n logprob[label_list[1]] = math.log(prob2)\n\n\n return logprob", "def log_prob(self, x, y):\n p = self.tag_log_prob(y)\n for i in range(len(y)):\n if self.out_prob(x[i], y[i]) == 0:\n return -math.inf\n\n p += math.log2(self.out_prob(x[i], y[i]))\n\n return p", "def __compute_entropy_probability(probability:np.ndarray) -> float:\n entropy = -np.sum(probability * np.log2(probability))\n return entropy", "def name_distribution_from_dict(d):\n def get_number_chars(los):\n \"returns the number of characters in the given list of strings\"\n res = 0\n for s in los:\n res += len(s)\n return res\n\n dist = dict((k, get_number_chars(v)) for (k, v) in d.items())\n total = 0\n print dist\n for k, v in dist.items():\n total += v\n\n return dict((k, v/float(total)) for (k, v) in dist.items())", "def _entropy_filter(self, prob1, prob2):\n\n\n # calculate merged prob.\n prob_merged = (prob1 + prob2)/2\n # Compute entropy for each prob.\n H1 = -prob1 * math.log(prob1) - (1-prob1) * math.log(1-prob1)\n H2 = -prob2 * math.log(prob2) - (1-prob2) * math.log(1-prob2)\n Hm = -prob_merged * math.log(prob_merged) - (1-prob_merged) * math.log(1-prob_merged)\n\n H_min = min(H1, H2, Hm)\n\n if H_min == H1:\n return prob1\n elif H_min == H2:\n return prob2\n else:\n return prob_merged", "def _normal_log_prob(self, r, scale_log):\n return -(r**2) / 2 - scale_log - self.const", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def get_log_probss(self, latent, obs, obs_id):\n\n if self.use_alphabet:\n obs, alphabet = obs\n else:\n alphabet = None\n\n num_particles, batch_size, num_arcs, _ = latent.shape\n _, num_rows, num_cols = obs.shape\n latent_log_prob = self.get_latent_dist(alphabet).log_prob(latent)\n obs_dist = self.get_obs_dist(latent.view(num_particles * batch_size, num_arcs, 2))\n if hasattr(obs_dist, \"log_prob_with_id\"):\n obs_log_prob = obs_dist.log_prob_with_id(\n obs[None]\n .expand(num_particles, batch_size, num_rows, num_cols)\n .reshape(num_particles * batch_size, num_rows, num_cols),\n obs_id[None].expand(num_particles, batch_size).reshape(num_particles * batch_size),\n ).view(num_particles, batch_size)\n else:\n obs_log_prob = obs_dist.log_prob(\n obs[None]\n .expand(num_particles, batch_size, num_rows, num_cols)\n .reshape(num_particles * batch_size, num_rows, num_cols)\n ).view(num_particles, batch_size)\n\n if hasattr(self, \"likelihood_weight\"):\n obs_log_prob = obs_log_prob * self.likelihood_weight\n\n return latent_log_prob, obs_log_prob", "def persentropy(dgms):\n import numpy as np\n import math\n result = []\n\n #calculate PE for all diagrams\n for dgm in dgms:\n dgm_np = np.array(dgm)\n \n #remove the point at infinity\n inf_index = [i for i,k in enumerate(dgm_np) if k[1] == math.inf]\n if len(inf_index) > 0:\n dgm_np = np.delete(dgm_np, inf_index,0)\n\n L = np.sum(dgm_np[0:,1] - dgm_np[0:,0])\n ls = dgm_np[0:,1] - dgm_np[0:,0]\n ps = ls / L\n Hs = ps * np.log(ps)\n H = - np.sum(Hs)\n result.append(H)\n \n return np.array(result)", "def log_predictive_density(self, x_test, y_test):\r\n mu_star, var_star = self._raw_predict(x_test)\r\n return self.likelihood.log_predictive_density(y_test, mu_star, var_star)", "def compute_expected_log_prob(self):\n for (w, t), val in np.ndenumerate(self.e_log_prob):\n self.e_log_prob[w][t] = self.mean[w][t + 1] - np.log(self.zeta[t])\n return self.e_log_prob", "def dndlogdp(self):\n return self.data[self.bin_labels]", "def compute_MAP_hypothesis(D, hypotheses, p_vector):\n MAP, MAP_max = None, None\n for i in range(3,5):\n p_hi = p_vector[i]\n _p_cond = p_cond(D, hypotheses[i])\n if p_hi == 0 or _p_cond == 0:\n # Depending on the dataset, probabilities can sometimes go to 0,\n # which breaks the log method. Don't let the happen.\n continue\n # math.log accepts a base as the second argument\n print \"made it here\"\n MAP_val = -math.log(_p_cond, 2) - math.log(p_hi, 2)\n if MAP_max is None or MAP_max < MAP_val:\n MAP = hypotheses[i] # this can be modified to return just the index, if need be.\n return MAP", "def h(self, probs):\n\n return np.sum(-p*np.log2(p) if p > 0 else 0 for p in np.nditer(probs))", "def get_prob_for_distributions(p):\n w1 = p[0]\n mu1 = p[1]\n sigma1 = p[2]\n w2 = p[3]\n mu2 = p[4]\n sigma2 = p[5]\n w3 = p[6]\n mu3 = p[7]\n sigma3 = p[8]\n dist_range = (0, 4.330310991999920844e+01)\n x = np.linspace(dist_range[0], dist_range[1], 1000)\n A1 = np.array(w1 * mlab.normpdf(x, mu1, sigma1)).sum()\n A2 = np.array(w2 * mlab.normpdf(x, mu2, sigma2)).sum()\n A3 = np.array(w3 * mlab.normpdf(x, mu3, sigma3)).sum()\n p1 = A1 / (A1 + A2 + A3)\n p2 = A2 / (A1 + A2 + A3)\n p3 = A3 / (A1 + A2 + A3)\n return p1, p2, p3", "def log_data_prob(self, x):\n _dist = norm(self.data, self.err)\n lp = _dist.logpdf(x)\n for i in range(6):\n lp[np.isnan(lp[:,i]),i] = self.default_priors[np.isnan(lp[:,i]),i]\n\n return lp.sum(axis=1)", "def log_probability_ratio(self, a, b):\n pass", "def compute_my_variability(event_log: Log) -> float:\n prefixes: List[List[Event]] = []\n bar: Bar = IncrementalBar(\"Prefix generation\", max=len(event_log.trace_list))\n for trace in event_log.trace_list:\n trace_prefixes: List[List[Event]] = trace.get_all_prefixes()\n\n for prefix in trace_prefixes:\n if prefix not in prefixes:\n prefixes.append(prefix)\n bar.next()\n bar.finish()\n\n entropy: float = 0\n\n bar = ShadyBar(\"Prefix likelihood estimation\", max=len(prefixes))\n for prefix in prefixes:\n\n p: float = _prefix_likelihood_estimator(event_log, prefix)\n entropy += p * logarithm(p, 10)\n\n bar.next()\n bar.finish()\n\n entropy *= -1\n\n return entropy", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions)))", "def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())", "def dlog(g, h, n):\n elts_generated = elts_generated_by_g_in_zn(g, n)\n # If this assert fails, then g wasn't actually a generator\n assert(h in elts_generated)\n return elts_generated.index(h)", "def get_log_prob_from_latent_dist(self, latent_dist, latent):\n return latent_dist.log_prob(latent)", "def logprob_dc(counts, prior, axis=None):\n # Note that this excludes the factorial(counts) term, since we explicitly\n # track permutations in assignments.\n return gammaln(np.add(counts, prior, dtype=np.float32)).sum(axis)", "def maxlogoddsscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n log_odds_list = []\n pwm_dictionary_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(len(seq) - 1):\n log_odds_score = 0\n log_odds_score_rc = 0\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq):\n log_odds_score += 0.0\n log_odds_score_rc += 0.0\n elif seq[j + i] not in [\"A\", \"C\", \"G\", \"T\"]:\n log_odds_score += 0.0\n log_odds_score_rc += 0.0\n else:\n q = pwm_dictionary[seq[j + i]][j]\n q_rc = pwm_dictionary_rc[seq[j + i]][j]\n if q == 0 or q_rc == 0:\n q = 0.000000000000000000000000000001\n # make this as close to zero as possible\n q_rc = 0.000000000000000000000000000001\n else:\n q = pwm_dictionary[seq[j + i]][j]\n q_rc = pwm_dictionary_rc[seq[j + i]][j]\n log_odds_score += (np.log(q / 0.25) / np.log(2)) * 100\n log_odds_score_rc += (np.log(q_rc / 0.25) / np.log(2)) * 100\n log_odds_list.append(log_odds_score)\n # FIXME: There was an error here in which we did not include \n # the reverse complement in the computation\n log_odds_list.append(log_odds_score_rc)\n max_log_odds = max(log_odds_list)\n return max_log_odds", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def crossEntropy(p_m1):\n p_m2 = 1 - p_m1\n D = - p_m1*math.log(p_m1) - p_m2*math.log(p_m2)\n return D", "def _logp(self, trace, **inputs):\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n\n phen_probs = [calc_log(trace[idx])\n for idx in np.random.randint(0, len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp", "def compute_joint_probability(token_list, token_probabilities, use_log_prob=False):\n\n log_prob = 0\n\n for word in token_list:\n\n # do not allow zero probabilites\n assert word in token_probabilities\n\n if use_log_prob:\n log_prob += token_probabilities[word]\n else:\n log_prob += log10(token_probabilities[word])\n\n if use_log_prob:\n return log_prob\n\n return 10**log_prob", "def sentence_logprob(self, sentence):\n line = get_ngrams(sentence,3)\n log_por = 0.0\n for item in line:\n raw_por = self.smoothed_trigram_probability(item)\n log_por = log_por+math.log2(raw_por)\n\n return float(log_por)", "def log_prob(sentence, LM, smoothing=False, delta=0, vocabSize=0):\n word_list = sentence.split()\n log_prob = 0\n for i in range(len(word_list)-1):\n print(word_list[i], word_list[i+1])\n bi_count = LM['bi'][word_list[i]][word_list[i+1]]\n uni_count = LM['uni'][word_list[i]]\n if uni_count == 0 and smoothing:\n return float('-inf')\n log_prob += log(((bi_count + delta)/(uni_count + delta * vocabSize)))\n return log_prob", "def _kld_update(p: Tensor, q: Tensor, log_prob: bool) ->Tuple[Tensor, int]:\n _check_same_shape(p, q)\n if p.ndim != 2 or q.ndim != 2:\n raise ValueError(f'Expected both p and q distribution to be 2D but got {p.ndim} and {q.ndim} respectively')\n total = p.shape[0]\n if log_prob:\n measures = torch.sum(p.exp() * (p - q), axis=-1)\n else:\n p = p / p.sum(axis=-1, keepdim=True)\n q = q / q.sum(axis=-1, keepdim=True)\n measures = _safe_xlogy(p, p / q).sum(axis=-1)\n return measures, total", "def probability(self):\r\n \r\n my_dict = dict()\r\n \r\n for i in self.__dtmc:\r\n \r\n sum_Pij = float(sum([self.__dtmc[i][j] for j in self.__dtmc[i]]))\r\n \r\n if sum_Pij == 0:\r\n \r\n my_dict[i] = dict()\r\n \r\n elif sum_Pij > 0:\r\n \r\n if i not in my_dict:\r\n \r\n my_dict[i] = dict()\r\n \r\n for j in self.__dtmc[i]:\r\n \r\n Pij = self.__dtmc[i][j] / sum_Pij\r\n \r\n my_dict[i][j] = Pij\r\n \r\n return my_dict", "def sumLogProb(a, b):\n if a > b:\n return a + log1p(exp(b - a))\n else:\n return b + log1p(exp(a - b))", "def log_normal(x, m, log_v):\n ################################################################################\n # TODO: Modify/complete the code here\n # Compute element-wise log probability of normal and remember to sum over\n # the last dimension\n ################################################################################\n # print(\"q_m\", m.size())\n # print(\"q_v\", v.size())\n const = -0.5 * x.size(-1) * torch.log(2 * torch.tensor(np.pi))\n # print(const.size())\n log_det = -0.5 * torch.sum(log_v, dim=-1)\n # print(\"log_det\", log_det.size())\n log_exp = -0.5 * torch.sum((x - m) ** 2 / (log_v.exp()), dim=-1)\n\n log_prob = const + log_det + log_exp\n\n ################################################################################\n # End of code modification\n ################################################################################\n return log_prob", "def normal_logprob(mu, sigma, z):\n normalization_constant = (-sigma.log() - 0.5 * np.log(2 * np.pi))\n square_term = -0.5 * ((z - mu) / sigma)**2\n logprob_vec = normalization_constant + square_term\n logprob = logprob_vec.sum(1)\n return logprob", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def log_prob(target_distribution, x0, xs, accepteds):\n return np.mean([target_distribution.log_probability(x) for x in xs])" ]
[ "0.65142924", "0.6498203", "0.6478609", "0.6398017", "0.63931054", "0.63005465", "0.6257267", "0.6251998", "0.6210485", "0.6205614", "0.618205", "0.61285675", "0.61219615", "0.6059671", "0.6059194", "0.59403294", "0.592365", "0.59173363", "0.59089893", "0.5902279", "0.5892993", "0.58912075", "0.5871409", "0.58429617", "0.58380187", "0.5825428", "0.58085996", "0.5798087", "0.5795346", "0.57591474", "0.57514066", "0.57439834", "0.5729472", "0.5724085", "0.57136685", "0.5710682", "0.5706223", "0.56982", "0.56857467", "0.56789964", "0.56772995", "0.56649125", "0.5662504", "0.56549144", "0.5648961", "0.56169796", "0.5612668", "0.5612403", "0.56074584", "0.5605101", "0.56039745", "0.55972683", "0.55972683", "0.5588623", "0.55851865", "0.558379", "0.55715764", "0.55667365", "0.5549687", "0.5545053", "0.5540921", "0.5540385", "0.55388016", "0.5532724", "0.55288345", "0.55279374", "0.55274564", "0.55232227", "0.5513677", "0.550607", "0.55059195", "0.55043656", "0.5490318", "0.5489", "0.5483226", "0.54783493", "0.5478044", "0.5476628", "0.5474738", "0.5472136", "0.5470803", "0.5461828", "0.5456524", "0.5455434", "0.545268", "0.54523176", "0.5445974", "0.5442627", "0.5437678", "0.5433865", "0.5430892", "0.5418283", "0.5416512", "0.5413404", "0.5412864", "0.54089034", "0.54066783", "0.5401446", "0.53986615", "0.5396395" ]
0.782096
0
creates all of the dictionaries from input string self.text
def createAllDictionaries(self): self.makeSentenceLengths() self.makeWords() self.makeStems() self.makeGerund() self.makeWordLengths()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_text(self, text: str) -> SectionDict:", "def initialize_gensim_dictionary(text):\n dct = Dictionary(text)\n return dct", "def convert_to_dict(text):\n content_dict = dict()\n content_dict['clean_text'] = text\n return content_dict", "def preprocess_text(text: str) -> Tuple[List[str], Dict]:\n raise NotImplementedError", "def main_dictionary():\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n dictionary(line.split(\" \"))", "def _handle_dict(string):\n dict_lines = [line.split(Parser.FIELD_DELIM) for line in string.split(Parser.LINE_DELIM)\n if Parser.FIELD_DELIM in line]\n cur_dict = 0\n results = [{}]\n for line in dict_lines:\n if line[0] in results[cur_dict]:\n results.append({})\n cur_dict += 1\n results[cur_dict][line[0]] = line[1]\n return results", "def __init__(self, input_string):\n self.words_to_counts = {}\n self.split_and_populate_words(input_string)", "def __init__(self): # TODO: consider storing all text in an actual text file and reading from it.\n\t\tself.start_text = '''\\nI'm at the entrance to the dungeon. I sure hope I find treasure inside, \\nand not anything nasty!\n\t\t'''\n\n\t\tself.empty_text = '''\\nI'm entering a large, dark room. Looking around, there appears to be nothing \\ninside other than dust, debris and more dust. This room is empty.'''\n\t\t\n\n\t\tself.monster_text = '''\\nI've entered a very dark room. Something is approaching...it's a Monster!\n\t\t'''\n\n\t\tself.treasure_text = '''\\nI'm standing in a room with a very high ceiling. There's an alter at the \\ncenter with something on top...it's treasure!'''\n\t\t\n\n\t\tself.exit_text = '''\\nI'm standing in a long, narrow corridor. There's a large, engraded gate at the \\nend of this passage. I think this must be the exit!'''\n\n\t\t# all the text entries stored in one dictionary, indexed by room type\n\t\t\n\t\tself.room_book = {'Start':self.start_text, 'Empty':self.empty_text, 'Monster':self.monster_text, 'Treasure':\n\t\t\tself.treasure_text, 'Exit':self.exit_text}", "def make_data_dict_from_str(self,reg_exp,data_str):\n data_list=reg_exp.split(data_str)\n data_list.pop(0)\n data_dict=dict(zip(data_list[0::2],data_list[1::2]))\n # get rid of \\n at the end of the strings\n reg_exp_strip_n=re.compile(r'\\n$')\n for key in data_dict.keys():\n data_dict[key]=reg_exp_strip_n.sub('',data_dict[key])\n return data_dict", "def _build_data_from_text(self, text):\n try:\n record = json.loads(text)\n except Exception as e:\n logging.error(f\"Exception: {e}\")\n logging.error(f\"datapoint: {text}\")\n raise e\n return record", "def init() -> None:\n init_dict()\n parse_file(\"alphabet.txt\", letters)\n parse_file(\"numbers.txt\", numbers)\n parse_file(\"symbols.txt\", symbols)", "def prepare_data(data: list) -> dict:\n d = {}\n for t in data:\n d[t[0]] = read_text(t[1])\n return d", "def transform_string_to_dictionary(data_text: str) -> Dict[str, float]:\n # TODO: create an empty population dictionary\n # TODO: iterate through each line of the data set\n # TODO: extract the ordered pair on this line\n # the ordered pair has the format:\n # (Date, population count in thousands of persons)\n # TODO: extract the dat and store it as a string\n # TODO: convert the population count to a float and store it\n # TODO: add the new key-value pair to the dictionary where\n # the key is the date and the population is the value\n # TODO: return the population dictionary", "def makeDict(self, s):\n out = {}\n entries = s.split(self.dataDelimiterEntry)\n for e in entries:\n if e == \"\":\n continue\n c = e.split(self.dataDelimiterKey)\n out[c[0]] = c[1]\n return out", "def parse_text(self):\n self.text={}\n for i, lang in enumerate(LANGS):\n text=file(self.src).read()\n self.text[lang]=\"\"\n extracted, finish = \"\", 0\n start_string, stop_string = r\"<!--%s-->\" % lang, r\"<!--/%s-->\" % lang\n # Iterates to check multiple blocks of text within the file!\n # Pay attention to infinite loops!\n # AttributeError exception raised when no more blocks to extract exist\n while True:\n try:\n start=re.compile(start_string, re.IGNORECASE).search(text).span()[1]\n finish=re.compile(stop_string, re.IGNORECASE).search(text).span()[0]\n extracted+=text[start:finish]\n text=text[finish+1:]\n except AttributeError:\n break\n self.text[lang]+=extracted", "def parse_variables(self, text, separator=None):\n\n def splitter(x, separator=None):\n if len(x) > 1:\n y = x.split(separator)\n return (y[0], y[-1])\n return (None, None)\n\n return dict(splitter(x, separator=separator) for x in text.split(\"\\n\"))", "def create_model_owc(text: str) -> Dict[str, Set[str]]:\n dict_so_far = {}\n list_of_words = str.split(text)\n\n\n for x in range(0, len(list_of_words)):\n \"\"\"\n check if the word is followed by a period and add it to the follow list if it is, then remove the period to \n check if the word is followed by something else\n \"\"\"\n if list_of_words[x][-1] == '.':\n list_of_words[x] = list_of_words[x][0:-1]\n update_follow_set(dict_so_far, list_of_words[x], '.')\n\n else:\n update_follow_set(dict_so_far, list_of_words[x], list_of_words[x + 1].rstrip('.'))\n return dict_so_far", "def createDict(self):\n data = d.Dictionary.dictionary\n while True:\n filtered = [line.strip() for line in data if len(line) == self.wordLen]\n if len(filtered) == 0:\n self.setNewLen()\n else:\n break\n return filtered", "def inf2dict(text):\n lines = text.strip().split('\\n')\n pairs, extra_lines = split_lines(lines)\n return parse_pairs(pairs, extra_lines)", "def from_text(text):\n\n return _from_text(text, _by_text)", "def from_text(text):\n return parse(text)", "def create_dict(text):\n #On/Off case sensitivity\n text = text.lower() \n\n #handy one liner that splits words apart via whitespace, and \n #removes punctuation. Results in list of words.\n word_list = [s.strip(string.punctuation) for s in text.split()]\n \n d = dict()\n for word in word_list:\n d[word] = d.get(word, 0) +1\n return d", "def split_text(text: str) -> List[Dict[str, str]]:\n # split into paragraphs\n lines = text.splitlines()\n groups = common.group_list(lines, lambda a, _: a.strip() == '')\n paras = ['\\n'.join(item) for empty_line, item in groups if not empty_line]\n\n def _fallback(p, type):\n logging.warn(f'Wrong {type} format:\\n' + p)\n cells.append({'type': 'text', 'source': p})\n\n cells = []\n for p in paras:\n lines = p.splitlines() + ['']\n p += '\\n'\n if p.startswith('#'):\n # parse title\n if not _is_mark(lines[1:]):\n _fallback(p, 'title')\n else:\n m = re.match(r'#+ *', lines[0])\n cells.append({\n 'type': 'title',\n 'prefix': m[0],\n 'source': lines[0][m.span()[1]:],\n 'mark': '\\n'.join(lines[1:])})\n elif p.startswith('$$'):\n # parse equations\n m = re.findall(r'\\$\\$', p)\n if len(m) != 2:\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'equation', 'source': p})\n elif p.startswith('!['):\n # parse images\n if not lines[0].strip().endswith(')') or not _is_mark(lines[1:]):\n _fallback(p, 'image')\n else:\n cells.append({'type': 'image', 'source': p})\n elif p.startswith('|'):\n # parse table\n for i, l in enumerate(lines):\n if not l.startswith('|'):\n break\n if not _is_mark(lines[i:]):\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'table', 'source': p})\n else:\n groups = common.group_list(lines, _list)\n for prefix, item in groups:\n if len(prefix.split('__')) == 2:\n prefix = prefix.split('__')[0]\n source = '\\n'.join(item)[len(prefix):]\n if prefix == '':\n cells.append({'type': 'text', 'source': source})\n else:\n cells.append({\n 'type': 'list',\n 'prefix': prefix,\n 'source': source})\n return cells", "def parse_file(text):\n tmp = list(map(lambda x: x.replace('\\n', ''), text.split(';')))\n output = {}\n for item in tmp:\n if not '=' in item:\n continue\n firstequal = item.find('=')\n identifier = item[:firstequal].strip()\n obj = item[firstequal + 1:].strip()\n output[identifier] = obj\n return output", "def __init__(self):\r\n #\r\n # Create dictionaries for each characteristic\r\n #\r\n self.words = {} # For counting words\r\n self.wordlengths = {} # For counting word lengths\r\n self.stems = {} # For counting stems\r\n self.sentencelengths = {} # For counting sentence lengths\r\n #\r\n # Create another of your own\r\n #\r\n self.gerund = {} # For counting words with ing \r\n self.text = ''", "def iterate_over_matches(self, text: str) -> Generator[dict, None, None]:\n\n for loc, value in self.automaton.iter(text):\n yield dict(start=loc - value + 1, end=loc + 1, text=text[loc - value + 1:loc + 1])", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def __init__(self, text):\n self.root = TrieNode(\"\")\n self.text = text\n\n # insert each word to trie\n for i in range(len(text)):\n self.insert(text[i])\n self.saved_node = None\n self.list = []", "def get_structure(self):\n main = {}\n for line in self.load():\n match = re.match('^\\s*([A-Za-z0-9_]+)(\\((\\d+)\\))?=(.*)$', line)\n if match:\n key = match.group(1)\n index = match.group(3)\n value = match.group(4)\n if index is None:\n main[key] = self.parse_data_value(value)\n else:\n if key not in main:\n main[key] = []\n main[key].append(self.parse_data_value(value))\n #else:\n # print(line)\n return main", "def __init__(self, text):\n # BEGIN Question 2\n self.text = text\n self.word_set = []\n # END Question 2", "def extract(self, text: str) -> list:\n nes={}\n if self.ner_model == 'spacy':\n nes=self.extract_spacy(text)\n return nes", "def _construct_report(self, text):\n result = []\n reports = self._clean_text(text)\n\n for report in reports:\n _dict = self._report_to_dict(report)\n if _dict: result.append(_dict)\n\n return result", "def _initialize_attributes(self, string_as_file):\n for row in string_as_file:\n first = row[0]\n second = row[1]\n third = row[3]\n match first:\n case 'quadrat':\n self.quadrat = { 'id': second, 'comment': third }\n case 'waypoint':\n self.waypoint = { 'name': second, 'comment': third }", "def olive_parser(text: str) -> dict:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n page_no = root['page_no']\n identifier = root['id']\n language = root['language']\n title = soup.meta['name']\n entity_type = root['entity_type']\n issue_date = soup.meta['issue_date']\n\n out = {\n \"meta\": {\n \"language\": None,\n \"type\": {}\n },\n \"r\": [],\n \"stats\": {},\n \"legacy\": {\"continuation_from\": None, \"continuation_to\": None},\n }\n out[\"meta\"][\"title\"] = title\n out[\"meta\"][\"page_no\"] = [int(page_no)]\n out[\"meta\"][\"language\"] = normalize_language(language)\n out[\"meta\"][\"type\"][\"raw\"] = entity_type\n out[\"meta\"][\"issue_date\"] = issue_date\n\n new_region = {\n \"c\": [],\n \"p\": []\n }\n\n new_paragraph = {\n \"l\": []\n }\n\n new_line = {\n \"c\": [],\n \"t\": []\n }\n\n new_token = {\n \"c\": [],\n \"tx\": \"\"\n }\n\n for primitive in soup.find_all(\"primitive\"):\n\n # store coordinate of text areas (boxes) by page\n # 1) page number, 2) coordinate list\n region = copy.deepcopy(new_region)\n region[\"c\"] = [int(i) for i in primitive.get('box').split(\" \")]\n\n para = None\n line = None\n line_counter = 0\n\n for tag in primitive.find_all(recursive=False):\n\n if tag.name == \"l\":\n\n if para is None and line is None:\n para = copy.deepcopy(new_paragraph)\n line = copy.deepcopy(new_line)\n\n if line_counter > 0 and line is not None:\n line = normalize_line(line, out[\"meta\"][\"language\"])\n para[\"l\"].append(line)\n\n if tag.get(\"p\") in [\"S\", \"SA\"] and line_counter > 0:\n region[\"p\"].append(para)\n para = copy.deepcopy(new_paragraph)\n\n line = copy.deepcopy(new_line)\n line[\"c\"] = [\n int(i)\n for i in tag.get('box').split(\" \")\n ]\n line_counter += 1\n\n if tag.name in [\"w\", \"q\"]:\n\n # store coordinates of each token\n # 1) token, 2) page number, 3) coordinate list\n t = copy.deepcopy(new_token)\n t[\"c\"] = [int(i) for i in tag.get('box').split(\" \")]\n t[\"tx\"] = tag.string\n t[\"s\"] = int(tag.get('style_ref'))\n\n if tag.name == \"q\" and tag.get('qid') is not None:\n qid = tag.get('qid')\n normalized_form = soup.find('qw', qid=qid).text\n t[\"nf\"] = normalized_form\n t[\"qid\"] = qid\n\n # append the token to the line\n line[\"t\"].append(t)\n\n # append orphan lines\n if line is not None:\n line = normalize_line(line, out[\"meta\"][\"language\"])\n para[\"l\"].append(line)\n\n region[\"p\"].append(para)\n\n if para is not None:\n out[\"r\"].append(region)\n\n out[\"legacy\"][\"id\"] = identifier\n out[\"legacy\"][\"source\"] = soup.link['source']\n \"\"\"\n # I suspect this could be deleted\n out[\"legacy\"][\"word_count\"] = int(soup.meta['wordcnt'])\n out[\"legacy\"][\"chars_count\"] = int(soup.meta['total_chars_count'])\n suspicious_chars_count = int(soup.meta['suspicious_chars_count'])\n out[\"legacy\"][\"suspicious_chars_count\"] = int(suspicious_chars_count)\n \"\"\"\n out[\"legacy\"][\"first_id\"] = soup.link['first_id']\n out[\"legacy\"][\"last_id\"] = soup.link['last_id']\n out[\"legacy\"][\"next_id\"] = soup.link['next_id']\n out[\"legacy\"][\"prev_id\"] = soup.link['prev_id']\n\n if root.has_attr('continuation_from'):\n out[\"legacy\"][\"continuation_from\"] = root['continuation_from']\n\n if root.has_attr('continuation_to'):\n out[\"legacy\"][\"continuation_to\"] = root['continuation_to']\n\n return out", "def parse_bibtex(self, data: str) -> Dict:\n\n new_bib = [line for line in data.splitlines() if \"= ,\" not in line]\n new_bib = \"\\n\".join(new_bib)\n bib_db: bibtexparser.bibdatabase.BibDatabase = bibtexparser.loads(new_bib)\n result = dict()\n for entry in bib_db.entries:\n osti_id = entry[\"ID\"].split(\"_\")[1]\n result[osti_id] = entry\n return result", "def suggestion_dictionaries(text):\n tool = language_check.LanguageTool('en-US')\n matches = tool.check(text)\n for i, match in enumerate(matches):\n fromy = match.fromy\n fromx = match.fromx\n ruleId = match.ruleId\n replacements = match.replacements\n matches[i] = {\"fromx\": fromx, \"fromy\": fromy, \"ruleId\": ruleId, \"replacements\": replacements}\n return matches", "def __init__(self, dictionary):\n self.d = {}\n for word in dictionary:\n abbr = self.getAbbr(word)\n if abbr in self.d:\n self.d[abbr] += word,\n else:\n self.d[abbr] = [word]", "def __init__(self, text: str):\n self.words = WORDS_RE.findall(text)", "def _parser(self,\n search_str):\n return {line_index: parsed_line_keys for (line_index, parsed_line_keys)\n in enumerate(self._load_line(search_str=search_str))\n if parsed_line_keys\n }", "def fill_in_dict():\n # assign a 'data' list from the txt file\n data = open('words.txt')\n # assign an empty 'my_dict' dictionary\n my_dict = dict()\n\n for word in data:\n # fill in dictionarys wit a keys and empty values\n my_dict[word] = ''\n return(my_dict)", "def _build_data_from_text(self, text):\n # tokenize text if tokenizer is given\n if self.tokenizer is not None:\n data = self.tokenizer.text_to_ids(text)\n else:\n data = text\n\n return data", "def make_chains(self, input_text):\n\n chains = {}\n\n words = input_text.split()\n\n for i in range(len(words) - 2):\n key = (words[i], words[i + 1])\n value = words[i + 2]\n\n if key not in chains:\n chains[key] = []\n\n chains[key].append(value)\n\n return chains", "def _parse_input(self):\n parser = re.compile(r'(\\w+)\\D+(\\d+)\\D+(\\d+)\\D+(\\d+)')\n reindeer = {}\n for line in self.puzzle_input.splitlines():\n instruction = parser.match(line)\n if not instruction:\n continue\n name, speed, flight, rest = instruction.groups()\n reindeer[name] = Reindeer(int(speed), int(flight), int(rest))\n return reindeer", "def data_from_string(text):\n return json_load(text.replace(']],\\n', ']], '))", "def extract_spacy(self, text: str)->dict:\n ners=None\n try:\n persons=[]\n locations=[]\n orgs=[]\n misc=[]\n docs=[]\n if len(text)>1000000:\n docs=self._splitCount(text,1000000)\n else:\n docs.append(text)\n for doc in docs:\n doc_spacy = self.recognizer(doc)\n for token in doc_spacy:\n if token.ent_type_ == \"PER\":\n persons.append(token.text)\n if token.ent_type_ == \"LOC\":\n locations.append(token.text)\n if token.ent_type_ == \"ORG\":\n orgs.append(token.text)\n if token.ent_type_ == \"MISC\":\n misc.append(token.text)\n ners={\"persons\":list(set(persons)), \"locations\":list(set(locations)),\"orgs\":list(set(orgs)), \"misc\":list(set(misc))}\n except Exception as ex:\n print('Exception while extracting NERs')\n print(str(ex))\n finally:\n return ners", "def __init__(self, content):\n # Collect results extracted as JSON\n self._json_cache = [\n match.groups()\n for match in RE_JSON_KEY.finditer(content)\n ]", "def read(cls, text):\n\n\t\treturn cls._parse(cls._tokenize(text))", "def buildDict(self, words):\r\n for word in words:\r\n self.trie.addWord(word)", "def __init__(self, dictionary):\n self.dict = {}\n for word in dictionary:\n abbr = self.gen_abbr(word)\n if abbr not in self.dict:\n word_set = set([word])\n self.dict[abbr] = word_set\n else:\n self.dict[abbr].add(word)", "def make_mimic_dict(filename):\r\n with open(filename, 'r') as file:\r\n text = file.read().lower().replace(\"'\",'').split()\r\n mimic_dict = {}\r\n prev = ''\r\n for word in text:\r\n if not prev in mimic_dict:\r\n mimic_dict[prev] = [word]\r\n else:\r\n mimic_dict[prev].append(word)\r\n prev = word\r\n return mimic_dict", "def tokenize(text: str) -> Dict:\n tokens = TOK.tokenize(text)\n output = {\n 'words': tokens.words(),\n 'offsets': tokens.offsets(),\n 'pos': tokens.pos(),\n 'lemma': tokens.lemmas(),\n 'ner': tokens.entities(),\n 'sentences': tokens.sentences(),\n }\n return output", "def build_trie(text):\n\ttrie = Trie()\n\tnumbers, words = read_file(text)\n\tfor i in range(len(words)):\n\t\ttrie.insert(words[i],numbers[i])\n\treturn trie", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def generate_from_text(self, text):\n words = self.process_text(text)\n self.generate_from_frequencies(words)\n return self", "def init_dict() -> None:\n for elem in letters:\n ascii_dict[elem] = []\n for elem in numbers:\n ascii_dict[elem] = []\n for elem in symbols:\n ascii_dict[elem] = []", "def text_dict(text):\n hey_words = {'hello', 'hey', 'hi', 'heya', 'hiya', 'hai'}\n fine_words = {'fine', 'good', 'splendid', 'amazing', 'well', 'lovely', 'cool'}\n bye_words = {'goodbye', 'bye', 'adios'}\n thank_words = {'thanks', 'thank'}\n notes_words = {'notes', 'note'}\n tokens = set(text.split())\n\n if len(notes_words.intersection(tokens)) > 0:\n text = 'Ok sure. Please tell me what to note down.'\n\n elif len(fine_words.intersection(tokens)) > 0:\n text = '''Good to hear that. How may I help you?'''\n\n elif len(hey_words.intersection(tokens)) > 0:\n text = '''Hi Shivek. How are you doing today?'''\n\n elif len(thank_words.intersection(tokens)) > 0:\n text = '''You are welcome. Can I help you with anything else?'''\n\n elif len(bye_words.intersection(tokens)) > 0 or 'see you' in text or 'see ya' in text or 'no thank' in text:\n text = '''Ok goodbye!'''\n\n\n else:\n text = '''Sorry, I didn't get you. Can you please repeat?'''\n return text", "def from_string(cls, text, basename=\"(noname)\", *args, **keys):\n keys.pop(\"comment\", None) # discard comment if defined\n header, selector, comment = cls._parse_header_selector(text, basename)\n mapping = cls(basename, header, selector, comment=comment, **keys)\n try:\n mapping._check_hash(text)\n except crexc.ChecksumError as exc:\n ignore = keys.get(\"ignore_checksum\", False) or config.get_ignore_checksum()\n if ignore == \"warn\":\n log.warning(\"Checksum error\", \":\", str(exc))\n elif ignore:\n pass\n else:\n raise\n return mapping", "def createDictionnary():\n dic = {}\n root = \"https://www.airlinequality.com\"\n url_page = root+\"/review-pages/a-z-airport-reviews/\"\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}\n req = Request(url_page, headers=headers)\n webpage = urlopen(req).read()\n soup = bs(webpage, 'html.parser')\n\n r = soup.find_all('li')\n list_text = [str(val) for val in r if \"href=\\\"/airport-reviews/\" in str(val)\n and \"article\" not in str(val)]\n for texte in list_text:\n airport, link = getAirportLinks(texte)\n dic[airport.rstrip()] = root+link\n\n return dic", "def make_chains(text_string):\n\n chains = {}\n text_list = text_string.split()\n\n index = 0\n while index < (len(text_list) - 2):\n\n # create a variable to hold the current chain key\n chain_key = (text_list[index], text_list[index+1])\n # create a variable to hold the dictionary value\n new_value = text_list[index+2]\n\n if chain_key not in chains:\n chains[chain_key] = []\n\n chains[chain_key].append(new_value)\n\n index = index + 1\n # your code goes here\n\n return chains", "def variable_dicts(self):\n \n def get_variable_text(rtf_file):\n \"Returns a list of variable_texts for each variable\"\n st='Pos. = '\n return rtf_file.split(st)[1:]\n \n def get_variable_name(variable_text):\n st='Variable = '\n b=variable_text.split(st)[1]\n return b[b.find(' ')+1:b.find('\\t')]\n \n def find_pos(rtf):\n a=rtf\n b=a\n return b[b.find(' ')+1:b.find('\\t')]\n \n def find_variable_label(rtf):\n try:\n a=rtf\n b=a.split('Variable label = ')[1]\n return b[b.find(' ')+1:b.find('\\\\par')]\n except IndexError:\n return None\n \n def find_variable_type(rtf):\n if not 'This variable is ' in rtf: return ''\n a=rtf\n b=a.split('This variable is ')[1]\n i1=b.find(' ')+1\n i2=i1+b[i1:].find('}')\n return b[i1:i2]\n \n def find_SPSS_measurement_level(rtf):\n if not 'the SPSS measurement level is ' in rtf: return ''\n a=rtf\n b=a.split('the SPSS measurement level is ')[1]\n i1=b.find(' ')+1\n i2=i1+b[i1:].find('\\\\par')\n return b[i1:i2]\n \n def find_SPSS_user_missing_values(rtf):\n if not 'SPSS user missing values = ' in rtf: return dict()\n a=rtf\n d=a.split('SPSS user missing values = ')\n if len(d)<2: return None\n e=d[1]\n i1=e.find(' ')+1\n i2=i1+e[i1:].find('\\\\par')\n f=e[i1:i2]\n g=f.split(' ')\n i=' '.join([g[0],g[2],g[4]])\n return i\n \n def find_value_labels(rtf):\n if not 'Value = ' in rtf: return dict()\n a=rtf\n d=a.split('Value = ')[1:]\n z={}\n for e in d:\n value=e[e.find(' ')+1:e.find('\\t')]\n value=float(value)\n f=e.split('Label = ')[1]\n label=f[f.find(' ')+1:f.find('\\\\par')]\n z[value]=label\n #print(z)\n return z\n \n variable_texts=get_variable_text(self.rtf)\n #pprint(variable_texts[0:2])\n \n result=[]\n for variable_text in variable_texts:\n d={'pos':find_pos(variable_text),\n 'variable':get_variable_name(variable_text),\n 'variable_label':find_variable_label(variable_text),\n 'variable_type':find_variable_type(variable_text),\n 'SPSS_measurement_level':find_SPSS_measurement_level(variable_text),\n 'SPSS_user_missing_values':find_SPSS_user_missing_values(variable_text),\n 'value_labels':find_value_labels(variable_text) \n }\n result.append(d)\n \n return result", "def func2(string:str):\n with open(string,\"r\") as file:\n data = file.read()\n data = data.split(\"bandwidths [1]:\")[0]\n\n final = {}\n for i in range(1,3):\n final[\"formants [{}]\".format(i)] = []\n my_list = data.split(\"formants\")\n for i in range(2,4):\n final[\"formants [{}]\".format(i-1)].extend(list(map(pars_points,my_list[i].split(\"points \")[1:])))\n return final", "def form_dict(path):\n data={}\n try:\n f=codecs.open(path, \"r\", \"utf-8\")\n text=f.read()\n f.close()\n except Exception:text=None\n if text!=None:\n #print text\n lines=text.split(\"\\n\")\n for sline in lines:\n if sline!=\"\" or sline==None:line_data=sline.partition(\":\")\n if len(line_data)==3:\n try:\n kin=line_data[0].strip().decode(\"utf-8\")\n data[kin.lower()]=line_data[2].strip()\n except:pass\n return data", "def str2dic(self, string):\n dic = {}\n list0=string.split(\"&\")\n for i in list0:\n list2 = i.split(\"=\")\n dic[list2[0]] = list2[1]\n return dic", "def create(self, text):\r\n self.require_collection()\r\n request = http.Request('POST', self.get_url(), self.wrap_object(text))\r\n\r\n return request, parsers.parse_json", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def parse(cls, data, strict=False):\n\n txt = cls(strict=strict)\n\n while data:\n length = ord(data[0])\n item = data[1:length+1].split('=', 1)\n\n # Add the item only if the name is non-empty and there are\n # no existing items with the same name\n if item[0] and (item[0] not in txt):\n if len(item) == 1:\n txt[item[0]] = None\n else:\n txt[item[0]] = item[1]\n\n data = data[length+1:]\n\n return txt", "def train(s):\n # Creates a new dictionary.\n newDict = {}\n # Separates the string into a list based on the spaces between words.\n wordList = s.split(' ')\n # Loops through the entire list of words. If the word is not in the dictionary, add the current word as a key and add the next word to its list. If the word is in the dictionary, add the next word to the list of the current word. Accounts for the last word in the list by performing the same operation on the first word in the list so that it connects.\n for num in range(len(wordList)):\n if num == len(wordList) - 1:\n if wordList[num] not in newDict:\n newDict[wordList[num]] = []\n newDict[wordList[num]].append(wordList[0])\n else:\n newDict[wordList[num]].append(wordList[0])\n else:\n if wordList[num] not in newDict:\n newDict[wordList[num]] = []\n newDict[wordList[num]].append(wordList[num + 1])\n else:\n newDict[wordList[num]].append(wordList[num + 1])\n\n return newDict", "def parse(self) -> Dictionary:\n self.parsed_dictionary = dictionary = Dictionary()\n state = State.pre_signature\n for lineno, line in self.line_iter:\n lineno += 1\n line = decomment_and_normalize(line)\n if line == \"\": continue\n parsed = False\n expected_lines = State.expected_lines(state)\n for t in expected_lines:\n parsed, state = t.parse_line(state, dictionary, line, lineno)\n if parsed: break\n if not parsed:\n raise DictionaryParseError(lineno, expected_lines, self.source)\n if State.is_not_final(state):\n raise DictionaryParseError(lineno + 1, expected_lines, self.source)\n try:\n del dictionary._last_article\n del dictionary._last_definition\n del dictionary._last_example\n del dictionary._last_idiom\n except AttributeError:\n pass\n return dictionary", "def pgn2dict(txt):\n result = {}\n for line in txt:\n if not line:\n continue\n match = re.search(r'(\\w+) \"(.*)\"', line).groups()\n result[match[0]] = match[1].replace(\"'\", \"''\")\n\n return result", "def create_dictionaries(chars):\n return dict((c, i) for i, c in enumerate(chars)), dict((i, c) for i, c in enumerate(chars))", "def get_data(self):\n self.data = dict()\n # list to save all the attributes we are going to create\n self.attr = []\n # list to save all the groups available in the incomming input\n self.groups.extend(self.values.keys())\n # Grouping\n self.parse_data()", "def __init__(self, data):\n\n data = data['results']\n textList = {}\n ansList = {}\n diffList = {}\n for i in range(len(data)):\n key = str(i)\n if \"&\" in data[i]['question']:\n pass\n else:\n textList[key] = data[i]['question']\n ansList[key] = data[i]['correct_answer'].lower()\n diffList[key] = data[i]['difficulty']\n\n self.textList = textList\n self.ansList = ansList\n self.diffList = diffList\n self.data = data\n return", "def __init__(self, text):\n self.text = text\n self.letters = [letters[c] for c in self.text]\n self.width = sum(let.width + 1 for let in self.letters)\n self._offset = width\n self.is_done = False", "def build(ctx, inputs, output, cs):\n click.echo('chemdataextractor.dict.build')\n dt = DictionaryTagger(lexicon=ChemLexicon(), case_sensitive=cs)\n names = []\n for input in inputs:\n for line in input:\n tokens = line.split()\n names.append(tokens)\n dt.build(words=names)\n dt.save(output)", "def _parse(self, content):\n os.environ['ASTER_VERSION_DIR'] = self.dirn\n cfg = {}\n self._content = content\n for l in split_endlines(self._content):\n if not re.search('^[ ]*#', l):\n try:\n typ, nam, ver, val = l.split('|')\n #print '========>', typ, '//', nam, '//', ver, '//', val\n typ = re.sub('^[ ]*', '', re.sub('[ ]*$', '', typ)).strip()\n val = re.sub('^[ ]*', '', re.sub('[ ]*$', '', val)).strip()\n if val != '':\n val = osp.expandvars(val)\n if cfg.has_key(typ):\n cfg[typ].append(val)\n else:\n cfg[typ] = [val]\n except ValueError:\n pass\n return cfg", "def _split(self):\n \n self._words = []\n \n # (1) Expand contractions\n text = self._text.replace(\"'m \", \" am \")\n text = text.replace(\"'d \", \" would \")\n text = text.replace(\"'ll \", \" will \")\n text = text.replace(\"'ve \", \" have \")\n text = text.replace(\"'re \", \" are \")\n text = text.replace(\"can't \", \"can not \")\n text = text.replace(\"won't \", \"will not \")\n text = text.replace(\"n't \", \" not \")\n # Assume possesives are contractions of is\n text = text.replace(\"'s \", \" is \")\n text = text.replace(\"s' \", \"s \")\n \n # (2) Replace newlines, carriage returns, tabs, form feed with space.\n text = re.sub('[\\r\\n\\t\\f]', ' ', text)\n \n # (3) remove duplicate spaces\n text = re.sub(' +', ' ', text.strip())\n \n # Empty text\n if len(text) == 0:\n return \n \n # (4) Split text by whitespace (tokenize).\n words = text.split(' ')\n \n # (5) Separate out punctuation\n for word in words:\n length = len(word)\n \n begin = 0\n for i in range(0,length):\n if not word[i].isdigit() and not word[i].isalpha():\n # decimal, thousandths, fraction symbol\n if word[i] in ['.', ',', '/'] and i < length-1 and word[i+1].isdigit():\n continue\n # degree\n if word[i] in ['°'] and i < length-1 and word[i+1] in [ 'f', 'F', 'c', 'C']:\n continue\n # sign symbol\n if word[i] in ['-', '+'] and i < length-1 and (word[i+1].isdigit() or word[i+1] in ['.', ',']):\n # first char or exponent\n if begin == i or word[i-1] in ['e', 'E']:\n continue\n \n if begin != i:\n self._words.append( { 'word': word[begin:i], 'tag': Vocabulary.UNTAG } )\n if word[i] in [ '.', '?', '!', ',', ':', ';', '(', ')', '[', ']', '\"', '\\'', '¿', '¡']:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.PUNCT } )\n # non-printable ascii\n elif (ord(word[i]) >= 0 and ord(word[i]) <= 7) or (ord(word[i]) >= 14 and ord(word[i]) <= 31):\n pass\n else:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.SYMBOL } )\n begin = i + 1\n if begin < length:\n self._words.append( { 'word': word[begin:], 'tag': Vocabulary.UNTAG } )", "def make_chains(text_string):\n chains = {} \n\n words = text_string.split()\n \n for i in range(len(words) - 2):\n word_after_pair = words[i + 2]\n word_pair = (words[i], words[i + 1])\n\n if word_pair not in chains:\n chains[word_pair] = []\n #Need to make the value a list by putting brackets around it\n chains[word_pair].append(word_after_pair)\n\n return chains\n\n #print word_pair\n\n #tuple is in dict\n #tuple is not in dict, inlude it as a new addition to the list\n \n # input_text = {}\n # for text in \n \n # chains = make_chains(input_text)", "def __init__(self, words, text):\n\n self.prompts = words\n self.template = text", "def make_chains(text_string):\n\n chains = {}\n words = text_string.split()\n\n for i in range(len(words) - 2):\n key = (words[i], words[i + 1])\n value = words[i + 2]\n #print key, value\n\n if key not in chains:\n chains[key] = []\n chains[key].append(value)\n\n # print chains\n return chains", "async def parse(self, raw: str) -> dict:", "def make_trie(self, root, *words):\n _end = '_end_'\n for word in words:\n current_dict = root\n for letter in word:\n current_dict = current_dict.setdefault(letter, {})\n current_dict[_end] = word\n\n return root", "def loads(text):\n values = {}\n for line in text.splitlines():\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n name, value = line.split(\"=\", 1)\n values[name] = value\n return Options(options_values=values)", "def _parse_questions(self, text, params):\n\n try:\n data = json.loads(text)\n except ValueError as e:\n Utils.log(traceback.format_exc())\n raise Exception('Could not get content')\n\n output = {}\n output['count'] = data['count']\n output['pages'] = data['pages']\n output['page'] = params['page'] if 'page' in params else 1\n output['questions'] = []\n for q in data['questions']:\n output['questions'].append(self.convert_question(q))\n\n return output", "def _create_dictionary(self, document):\n words = self._normalize_words(document.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def make_tweet_dict( txt ):\n txtLow = ' ' + txt.lower() + ' '\n\n # result storage\n fvec = {}\n\n # search for each feature\n for test in testFeatures:\n\n key = test[0]\n\n fvec[key] = False;\n for tstr in test[1]:\n fvec[key] = fvec[key] or (txtLow.find(tstr) != -1)\n\n return fvec", "def parse_chat_input(text: str) -> dict:\n user_match = re.match(r\"user\\s[\\w]{1,12}\", text) #('user [uname]')\n chat_match = re.match(r\"say\\s([\\w\\s,.!()?]{1,140})\", text) #('say [message]')\n help_match = text == 'help' # plain help command\n choice = 0\n chat_data = None\n\n #Check for user creation\n if user_match is not None:\n commands = split_words(text)\n #2nd word should be the username i.e \"user foo\"\n choice = 1\n uname = commands[1]\n logging.info(\"User login requested for user: \" + uname)\n chat_data = {'username':uname}\n\n #Check for chat message\n elif chat_match != None:\n choice = 2\n chat_data = chat_match.group(1) #all matching text\n\n elif help_match:\n choice = 3\n\n return {'choice':choice, 'chat-data':chat_data}", "def make_chains(text_string):\n\n # Split text string into a list of words\n words = text_string.split()\n\n # Create dictionary\n chains = {}\n\n # Iterate over the index numbers of the list\n for i in range(len(words)-2):\n \n # Create a tuple of two n-grams\n bigrams = (words[i], words[i+1])\n\n # Check for repeat of keys / bigrams\n if bigrams not in chains:\n\n # If the key doesn't exists, add key to chains\n chains[bigrams] = [words[i+2]]\n\n # If bigram is in the list, append value to the type list\n else:\n chains[bigrams].append(words[i+2])\n\n # Import pprint\n # Pprint.pprint(chains)\n return chains", "def parse_dict(txt):\n pairs = txt[txt.index('{')+1:txt.rindex('}')].split(',') # need to inplement a correct split by comma\n d = {}\n for p in pairs:\n if p:\n splt = p.split(':')\n key = splt[0].strip()\n value = splt[1].strip()\n if value[0] == '{':\n value = parse_dict(value)\n d[key] = value\n return d", "def get_string_stech_dict(stech_string):\n stech_dict = {}\n try:\n stech_lst = stech_string.split(\",\") # Generates a stech list: [\"A:3\", \"B:2\", ...]\n for stech in stech_lst:\n chain, number = stech.split(\":\")\n stech_dict[chain] = int(number) # Chain id as key and number as value: { \"A\": 3, \"B\": 2, ...}\n return stech_dict\n except:\n sys.stderr.write(\"Stechometry string format is wrong, please follow this format: A:2,B:11,C:4, ...\")\n sys.exit(1)", "def __init__(self):\n self.kids = dict()\n self.val = None\n self.isWord = False", "def analyze_word(s):\n\n a = {}\n a['word'] = s\n a['n_letters'] = len(s)\n a['n_vowels'] = count_vowels(s)\n \n return a", "def parse_from_string(self, file_content: str):\n self._split_to_tokens(file_content)\n if not self._convert_tokens_to_dict():\n log.error('Failed to generate dictionary representation of file.')\n return None\n return self._result", "def get_dict_of_str2(self):\n pass", "def build_frequency_dict(text: bytes) -> Dict[int, int]:\n freq_dic = {}\n for elem in text:\n if elem not in freq_dic:\n freq_dic[elem] = 1\n else:\n freq_dic[elem] += 1\n return freq_dic", "def process(raw):\n entry = { }\n cooked = [ ]\n\n for line in raw:\n line = line.strip()\n if len(line) == 0 or line[0]==\"#\" :\n continue\n parts = line.split(';')\n if len(parts) == 3:\n entry[\"description\"] = parts[0].strip() #adding key and values to the dict\n entry[\"long\"] = parts[1].strip()\n entry[\"lat\"] = parts[2].strip()\n cooked.append(entry) #add this dict entry into the array\n entry = { }\n continue\n else:\n raise ValueError(\"Trouble wiht line: '{}'\\n\".format(line))\n \n return cooked #returning an array of dicts", "def __init__(self, words):\n self.d = {}\n for i, w in enumerate(words):\n self.d[w] = self.d.get(w, []) + [i]", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def __call__(self, text:str) -> List[Dict[str,any]]:\n\n # Extract matches\n if len(self.separators) == 0:\n return list(self.decorate_spans(self.keep_maximal_matches(self.iterate_over_matches(text))))\n else:\n return list(self.decorate_spans(self.keep_tokens(\n self.keep_maximal_matches(self.iterate_over_matches(text)), text, self.separators)))", "def parse(self, text):\n return self.dict.txt2vec(text)", "def _create_dictionaries(self, chars):\n dictionary = dict()\n for char in chars:\n dictionary[char] = len(dictionary)\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return dictionary, reverse_dictionary" ]
[ "0.6951967", "0.65007186", "0.6391913", "0.6171587", "0.6150453", "0.6135733", "0.60717666", "0.60355055", "0.5923922", "0.5921384", "0.5914638", "0.59050655", "0.59048057", "0.58697015", "0.5864588", "0.5859098", "0.58579123", "0.58231443", "0.5812766", "0.57927376", "0.57916284", "0.57827824", "0.5772654", "0.5770726", "0.57679754", "0.5759098", "0.5752025", "0.57501125", "0.5739393", "0.57367516", "0.57339466", "0.57208854", "0.5716723", "0.56964535", "0.56837374", "0.5677288", "0.567426", "0.565222", "0.5631077", "0.5630772", "0.56209064", "0.56208", "0.561497", "0.55892116", "0.5577832", "0.5573337", "0.554734", "0.55432034", "0.5519831", "0.5518066", "0.55072874", "0.5505701", "0.5483854", "0.54663914", "0.5461001", "0.54385495", "0.5437964", "0.5437381", "0.542576", "0.5413496", "0.5413317", "0.5412406", "0.54111797", "0.541042", "0.5405822", "0.53879243", "0.5387132", "0.5386882", "0.5385362", "0.5377617", "0.53691655", "0.5361802", "0.5361654", "0.53599936", "0.53597665", "0.53569806", "0.53566647", "0.5354019", "0.5332845", "0.533247", "0.53293425", "0.53246784", "0.5315912", "0.53156644", "0.53131306", "0.53071517", "0.5302007", "0.529727", "0.5295617", "0.5285538", "0.528382", "0.5278442", "0.5275525", "0.5270797", "0.527063", "0.5270202", "0.52660275", "0.5262441", "0.52617097", "0.5240852" ]
0.624055
3
Compute in a simple way, but O(NLog(N)) complexity
def missing_integer_simple(l): n = len(l)-1 expected = n*(n+1)/2 found = 0 for num in l: if num is not None: found += num print(expected-found)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute(n):\n if n == 1:\n return 1\n else:\n i = find_i(n)\n return 2 * compute(n - i) + 2 ** i - 1", "def time_complexities():\n return \"Best Case: O(n), Average Case: O(n), Worst Case: O(n)\"", "def solution(number): # O(N)\n m = {\n 0: 0,\n 1: 1\n } # O(1)\n\n for i in range(2, number + 1): # O(N)\n m[i] = m[i - 1] + m[i - 2] # O(1)\n\n return m[number] # O(1)", "def fn(k, i, j):\n if not (0 <= i < N and 0 <= j < N): return 0\n if k == 0: return 1 \n return 1/8*sum(fn(k-1, i+ii, j+jj) for ii, jj in ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)))", "def f(i):\n return e(2**N-1-i) ^ 2**(N-1)", "def U(xs):\n ret = 0\n for x in xs:\n ret += log(x)\n return ret", "def find_i(n):\n lst = []\n for i in range(1, n):\n lst.append(2 * compute(n - i) + 2 ** i - 1)\n result = min(lst)\n return lst.index(result) + 1", "def fn(i):\n if i == 0: return 1 # boundary condition \n ans = 0\n for k in range(1, N+1): \n if k not in seen and (k%i == 0 or i%k == 0): \n seen.add(k)\n ans += fn(i-1)\n seen.remove(k)\n return ans", "def fn(i, j, mv):\n if not (0 <= i < m and 0 <= j < n): return 1 \n if mv == 0: return 0\n return (fn(i-1, j, mv-1) + fn(i, j-1, mv-1) + fn(i, j+1, mv-1) + fn(i+1, j, mv-1)) % 1_000_000_007", "def fn(n):\n if n == 0: return 1\n return sum(fn(i)*fn(n-i-1) for i in range(n))", "def method2():\n n = 1000\n s = 0\n multiples = [3,5]\n total = []\n\n for m in multiples:\n total.append(0)\n\n minValue = 0\n while(minValue < 1000):\n minValue = 1000\n minPosition = 0\n for i, v in enumerate(total):\n if v < minValue:\n minValue = v\n minPosition = i\n\n temp = total[minPosition] + multiples[minPosition]\n\n if(temp < 1000) and (temp not in total):\n s += temp\n\n total[minPosition] = temp\n\n return s", "def get_n1(r,N):\n n1 = N - np.sum(r)\n return n1", "def optimize(self, ngen):\n res = 0\n for res in self(ngen):\n pass\n return res", "def C(n,k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def fn(x):\n if x <= 0: return int(x == 0)\n return sum(fn(x - xx) for xx in nums)", "def fn(n, k):\n if n == k: return 1\n if k == 0: return 0\n return ((n-1)*fn(n-1, k) + fn(n-1, k-1)) % 1_000_000_007", "def twentyone():\r\n \r\n notamicable = []\r\n isamicable = []\r\n \r\n for i in range(10000):\r\n if i not in notamicable and i not in isamicable:\r\n a = i\r\n b = amicable(findDivisors(a))\r\n c = amicable(findDivisors(b))\r\n if a == c and not a == b:\r\n isamicable.append(a)\r\n isamicable.append(b)\r\n else:\r\n notamicable.append(a)\r\n notamicable.append(b)\r\n \r\n print isamicable\r\n t = 0\r\n for v in isamicable:\r\n t += v\r\n return t", "def I (self, n):", "def F(N,k=0) :\n accum = 0.0\n for i in xrange(1,N+1-k) :\n accum += (1.0+F(N-1,k+i-1))/N\n return accum", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def fn(i, j):\n if not (0 <= i < n and 0 <= j < n): return inf\n if i == 0: return A[i][j]\n return min(fn(i-1, j-1), fn(i-1, j), fn(i-1, j+1)) + A[i][j]", "def schrage_nlogn(data):\n N = data.copy()\n for i in range(len(data)):\n N[i] = (N[i][0], N[i])\n heapq.heapify(N)\n \"\"\"\"\n mozna to zaltwic przy wczytaniu danych nie wplywa na zloznosc samego algorytmu\n \n N to tablica tablica krotek takich że (r , [r, p,q]), (r1, [r1 ,p1 , q1]) ........\n heapq sortuje po pierwszym elemncie dlatego tak\n \n G analogicznie z tym że sortowane jest malejaco po q więc G = [(q, [r, p ,q ]), (q1, [r1, p1, q1]) .......... ] \n \"\"\"\n G = []\n Pi = []\n t = N[0][0]\n start = timer()\n while len(G) != 0 or len(N) != 0:\n while len(N) != 0 and Schrage.save_min(N) <= t:\n e = heapq.heappop(N)\n heapq.heappush(G, (-e[1][2], e[1])) # O(log n)\n if len(G) != 0:\n e = heapq.heappop(G) # O(log n)\n Pi.append(e[1]) # O(1)\n t = t + e[1][1]\n else:\n t = N[0][0] # O(1)\n end = timer()\n executionTime = end - start\n return Pi, executionTime", "def P(N,L) :\n accum = 0.0\n sign = 1.0\n for i in xrange(len(L)+1) :\n accum2 = 0.0\n for combin in combinations(L,i) :\n term = 1.0\n j = 0.0\n for Li in combin :\n term *= Li/(N-j)\n j += 1\n accum2 += term\n accum += sign*accum2\n sign *= -1.0\n return accum", "def performance():\n\tn = 1024\n\twhile n < 5000000: \n\t\tsorted = range(n)\n\t\tnow = time()\n\n\t\t# Code whose performance is to be evalutated\n\t\tbs_contains(sorted, -1)\n\n\t\tdone = time()\n\n\t\tprint n, (done-now)*10000\n\t\tn *= 2", "def problem9_naive(n):\n for a in range(4, n, 4):\n for b in range(3, n - a):\n c = n - a - b\n if a ** 2 + b ** 2 == c ** 2:\n return a * b * c\n return None", "def m(self):\n\t\tn = 0\n\t\ti = self.k0\n\t\twhile 1:\n\t\t\tif i > self.j:\n\t\t\t\treturn n\n\t\t\tif not self.cons(i):\n\t\t\t\tbreak\n\t\t\ti = i + 1\n\t\ti = i + 1\n\t\twhile 1:\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1\n\t\t\tn = n + 1\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif not self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1", "def fn(i, j):\n if i == len(costs): return 0 # no more houses to paint \n return costs[i][j] + min(fn(i+1, jj) for jj in range(3) if j != jj)", "def cost_(theta, X, Y):\n if X.shape[1] + 1 != theta.size or X.shape[0] != Y.size:\n print(\"Inc dim\")\n return\n c = cost_elem_(theta, X, Y)\n s = 0\n for i in c:\n s = s + i\n return(s)", "def test_large_sum(self):\n for n in [10, 20, 30, 40, 50]:\n A = np.arange(n*n)\n A = np.reshape(A, (n, n))\n x = Variable(n, n)\n p = Problem(Minimize(at.sum_entries(x)), [x >= A])\n result = p.solve()\n answer = n*n*(n*n+1)/2 - n*n\n print(result - answer)\n self.assertAlmostEqual(result, answer)", "def solution(A):\n \"\"\"method 2 n**2\n east=[] #0\n west=[] #1\n for i in range(len(A)):\n if A[i] == 0:\n east.append(i)\n else:\n west.append(i)\n\n result = 0\n for e in east:\n count = 0\n for j in range(len(west)):\n if e > west[j]:\n continue\n if e < west[j]:\n count = len(west) - j\n result += count\n #print(e, count)\n break\n return result\n \"\"\"\n east=[] #0\n west=[] #1\n l = len(A)\n for i in range(len(A)):\n if A[i] == 0:\n east.append(i)\n else:\n west.append(i)\n\n result = {}\n for i in range(len(east)):\n e = east[i]\n if i == 0:\n result[e] = l - e - len(east)\n if i != 0:\n result[e] = result[east[i-1]] - (e - east[i-1]-1)\n\n #print(result)\n s = sum(result.values())\n if s > 1000000000:\n return -1\n return s", "def solution(A):\n \n cars = 0\n ones = 0\n\n for i in range(len(A), 0, -1):\n\n if A[i-1] == 1:\n ones += 1\n else:\n cars += ones\n\n return (-1 if cars > 1000000000 else cars)", "def sol(n, mem):\n if mem[n] != -1:\n return mem[n]\n \n mem[n] = 0\n for i in range(2, n+1):\n mem[n]+=sol(n-i, mem)*sol(i-2, mem)\n \n return mem[n]", "def prodi( iterable ):\n p= 1\n for n in iterable:\n p *= n\n return p", "def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))", "def compute(a):\n it = iter(a)\n while True:\n op = next(it)\n\n # Invalid opcode check\n if op not in (1, 2, 99):\n raise ValueError\n\n # Halt opcode\n if op == 99:\n break\n\n x, y, z = next(it), next(it), next(it)\n if op == 1:\n a[z] = a[x] + a[y]\n else:\n a[z] = a[x] * a[y]\n\n return a[0]", "def am(n):\r\n for i in range(1,n+1):\r\n if i not in d1.keys() :\r\n d1[i] = d1[i-1] + (i*((-1)**i))\r\n# print(d1)\r\n else:\r\n pass\r\n return d1[n]", "def ncore(self):", "def fn(vals):\n total = odd = 0 \n for i, x in enumerate(vals): \n if vals[0] == x: \n total += 1\n if i&1: odd += 1\n elif vals[0] ^ x != (1 << n) - 1: return inf\n ans = inf \n if len(vals) <= 2*total <= len(vals)+1: ans = min(ans, odd)\n if len(vals)-1 <= 2*total <= len(vals): ans = min(ans, total - odd)\n return ans", "def fn(n, k):\n if n <= k: return 0 # one mailbox for each house\n if k == 1: return mdist[0][n-1]\n ans = inf \n for nn in range(k-1, n): \n ans = min(ans, fn(nn, k-1) + mdist[nn][n-1])\n return ans", "def largest_sum_nonadjacents_numbers_1(L: List[int]) -> int:\n cache_n2 = 0\n cache_n1 = 0\n best_sum = 0\n for l in L:\n best_sum = max(l + cache_n2, cache_n1)\n # update cache for next iteration\n cache_n2 = cache_n1\n cache_n1 = best_sum\n\n return best_sum", "def fakultet (n = 1):\n sum = 1\n for i in range(n, 1, -1):\n sum *= i\n return sum", "def complexity(self):\n raise NotImplementedError()", "def index(i, j):\n return i * N + j", "def crt(a, n):\n p = i = prod = 1\n sm = 0\n\n for i in range(len(a)): prod *= n[i]\n for i in range(len(a)): \n p = prod // n[i]\n sm += a[i] * inverseMod(p, n[i]) * p\n return sm % prod", "def solution2(nums, K):\n s = 0\n sum_til = []\n for n in nums:\n s += n\n sum_til.append(s)\n\n l = len(nums)\n for i in range(l):\n for j in range(i+1, l):\n sum_ij = sum_til[j] if i == 0 else sum_til[j] - sum_til[i-1]\n if K != 0 and sum_ij % K == 0:\n return True\n if K == 0 and sum_ij == 0:\n return True\n return False", "def arrayManipulation_brute(n, queries):\n arr = [0] * n\n\n for i, row in enumerate(queries):\n a, b, k = row[0], row[1], row[2]\n for j in range(a - 1, b):\n arr[j] = arr[j] + k\n print(f'array size {arr.__sizeof__()/1000000}')\n return max(arr)", "def d(n):\n return sum(divisors(n))", "def d(n):\n if n not in d_memo:\n # Start with 1 so n isn't counted\n total = 1\n # Loop from 2 to sqrt(n)\n for i in xrange(2, int(n**0.5) + 1):\n if n % i == 0:\n total += i\n # Only add the other divisor if it isn't a square\n if i * i != n:\n total += n/i\n\n d_memo[n] = total\n\n return d_memo[n]", "def main(n):\n return sum(f(i) for i in xrange(n))", "def levin(x):\n summ = 0\n for t, l in x: # for the time and length of each algorithm\n summ += l + np.log(t)\n return summ", "def fn(n):\n if n == 1: return 1\n return max(max(i, fn(i))*max(n-i, fn(n-i)) for i in range(1, n//2+1))", "def k(n):\r\n primes = u.sieve(n)\r\n l = [1, 0]\r\n for i in range(2, n + 1):\r\n l1 = [l[r] * sopf(i - r, primes) for r in range(1, i)]\r\n s = (sum(l1) + sopf(i, primes)) // i\r\n l.append(s)\r\n return l[n]", "def answer():\n for k in range(2,3000):\n for j in range(k-1,0,-1):\n pj, pk = P(j), P(k)\n #print( j, k, pj, pk )\n if isPent(pk-pj):\n #print( j, k, pj, pk, pk+pj, isPent(pk+pj), pk-pj )\n if isPent(pk+pj) and isPent(pk-pj):\n return pk-pj", "def solution(n: int) -> int:\n sizearr = n + 1\n\n # create zero-filled multi_arr\n multi_arr = [[0 for x in range(sizearr)] for n in range(sizearr)]\n\n # base value is always skipped after being padded\n multi_arr[0][0] = 1\n for last in range(1, sizearr):\n for next in range(0, sizearr):\n multi_arr[last][next] = multi_arr[last - 1][next]\n if next >= last:\n multi_arr[last][next] += multi_arr[last - 1][next - last]\n\n return multi_arr[n][n] - 1", "def solution(N):\n max_distance = count = 0\n ones = []\n i = 0\n while (N > 0):\n if (N % 2 == 1):\n ones.append(i)\n i+=1\n N //= 2\n for k in range(1, len(ones)):\n max_distance = max(max_distance, ones[k] - ones[k-1])\n return max_distance", "def J (self, n):", "def solve(n, seq):\n\n return sum(seq) - (n-1) * (n-2) / 2", "def extinction_efficiency(self):\r\n n = np.arange(1, self.n + 1)\r\n return 2*np.sum((2*n+1)*np.real(self.a + self.b))/self.x**2", "def solution(N, A):\n arr = [0]*(N) # To hold the max value. This idea is also used in L09_MaxDoubleSliceSum_m_golden_slice.py\n minimum = maximum = 0\n for i in A:\n if i > N:\n minimum = maximum\n else:\n arr[i-1] = max(arr[i-1], minimum)\n arr[i-1] += 1\n if arr[i-1] > maximum:\n maximum = arr[i-1]\n\n for i in range(len(arr)):\n arr[i] = max(arr[i], minimum)\n\n return arr", "def transform(i, j, k):\n return i * N * N + j * N + k + 1", "def euler2(N):\n i = 0\n current_term = 0\n res = 0\n while current_term <= N:\n res += current_term\n i += 1\n current_term = fib(3*i)\n return res", "def recompose(x, list_p, N):\n res = 1\n for i in zip(x, list_p):\n plus = 1\n for j in range(i[0]):\n plus *= i[1]\n plus %= N\n res *= plus\n res %= N\n return int(res % N)", "def complexity(s, **kwargs):\n num, den = 1, 1\n for k in range(1, len(s)):\n k4 = 4**k # For DNA\n num += min(len(set(s[i:i+k] for i in range(len(s) - k + 1))), k4)\n den += min(len(s) - k + 1, k4)\n return num / den", "def elementary_summand(fixed, i):\n if i < fixed:\n return 0\n elif i == fixed:\n return 2\n else:\n return 1", "def f_exact(n, k):\n def fact(m):\n return math.factorial(m)\n\n partition = part(n, k)\n\n total = 0\n for p in partition:\n product = 1\n nodes_left = n\n counts = dict([(x, len(list(y))) for x, y in itertools.groupby(p)])\n for num in p:\n product *= fact(num - 1) * comb(nodes_left, num)\n nodes_left -= num\n for num in counts:\n product /= fact(counts[num])\n\n total += product\n return int(total)", "def ln_sum_i_neq_j(x):\n\tiw_size = x.size(0)\n\tbatch_size = x.size(1)\n\n\t# TODO: Would torch.expand instead of torch.repeat make this faster?\n\tinv_mask = torch.eye(iw_size).unsqueeze(dim=2).repeat(1, 1, batch_size)\n\tx_masked = x.view(1, iw_size, batch_size) - inv_mask*1000000.0\n\treturn logsumexp(x_masked, dim=1)", "def g(i):\n return int(np.log2(gc(i)^gc(i+1)))", "def B(n, k):\n assert 0 < k <= n\n global lookup\n for index_y in range(len(lookup), n + 1):\n lookup.append([1])\n min_value = min(index_y, k)\n for index_x in range(min_value):\n if index_x < len(lookup[index_y - 1]) - 1:\n lookup[index_y].append(lookup[index_y - 1][index_x] + lookup[index_y - 1][index_x + 1])\n else:\n lookup[index_y].append(lookup[index_y - 1][index_x])\n return lookup[n][k]", "def fn(n, k):\n if k == 0: return 1 \n if n <= 0 or k < 0: return 0 \n return fn(n-1, k) + fn(n, k-1) - fn(n-1, k-n)", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def idcg(k):\n res = sum([1.0 / math.log(i + 2, 2) for i in range(k)])\n if not res:\n return 1.0\n else:\n return res", "def problem(args:int) -> int:\r\n\ta, b, c = 1, 2, 0\r\n\tresult = [a]\r\n\twhile c <= args:\r\n\t\tc = a + b\r\n\t\ta = b\r\n\t\tb = c\r\n\t\tresult.append(a)\r\n\tresult = np.array(result)\r\n\treturn sum(result[result % 2 == 0])", "def sum_square_difference():\n lst = list(range(1, 101))\n acc = 0\n for e in lst:\n for f in lst:\n if e != f:\n acc = acc + (e * f)\n return acc", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n #computational complexity: O(N) since we iterate through n elements only once\n #space comlexity: O(N) since we need to store n elements in the array\n\n #(0) use hashtable to store the number and its index.\n hashTable={}\n #(1) iterating through all the itmes\n for i in range(len(nums)):\n complement = target - nums[i]\n #(2) check if the complement is in the hashTable. If not, put it into the hashtable\n if complement in hashTable:\n return [hashTable[complement],i]\n else:\n hashTable[nums[i]] = i", "def solution(n: int = 28123) -> int:\n\n nums = range(1, n+1)\n abundant = list(filter(is_abundant, nums))\n abundant_sums = set(all_sums(abundant, n))\n fit = set(nums) - abundant_sums\n return fit", "def faster_brute_force_solution():\n def calc_b(a, sum):\n return ((sum**2/2) - (sum * a)) / (sum - a)\n\n a, b = next(\n (a, calc_b(a, TRIPLET_SUM))\n for a in range(1, TRIPLET_SUM)\n if calc_b(a, TRIPLET_SUM).is_integer()\n )\n\n return a * b * (TRIPLET_SUM - a - b)", "def fast_sum(J,s):\n e = np.zeros((s.shape[0]))\n for n in range(s.shape[0]):\n k = 0\n for i in range(s.shape[1]-1):\n for j in range(i+1,s.shape[1]):\n e[n] += J[k]*s[n,i]*s[n,j]\n k += 1\n return e", "def twoSumFaster(nums, target):\n my_hash = {}\n for a in range(0,len(nums)):\n my_hash[nums[a]] = a\n\n for indice_a in range(0,len(nums) - 1):\n diff = target - nums[indice_a]\n if diff in my_hash:\n for indice_b in range(indice_a + 1, len(nums)):\n if nums[indice_b] == diff:\n return [indice_a, indice_b]", "def fn(i, k):\n if i == len(nums): return 0\n if k < 0: return inf \n ans = inf\n rmx = -inf # range max \n rsm = 0 # range sum \n for j in range(i, len(nums)): \n rmx = max(rmx, nums[j])\n rsm += nums[j]\n ans = min(ans, rmx*(j-i+1) - rsm + fn(j+1, k-1))\n return ans", "def fn(x):\n if not x: return 0 \n ans = inf\n freq = Counter(x)\n for cnt in freqs: \n if x[0] in cnt: \n xx = \"\".join(k*v for k, v in (freq - cnt).items())\n ans = min(ans, 1 + fn(xx))\n return ans", "def entropy(*args):\n\n\n values = []\n leaf = -1\n\n for i, val in enumerate(args):\n if(val != 0):\n values.append(val * math.log(val, len(args)))\n if(val == 1):\n leaf = i\n \n return -sum(values), leaf", "def fn(x, i=0):\n if x <= 0: return int(x == 0)\n return sum(fn(x-coins[ii], ii) for ii in range(i, len(coins)))", "def pseudo(x,N) :\n\treturn (x**2+1)%N", "def nw(n):\n return 4*n*n + 1", "def fn(val):\n if val < x: return min(2*val-1, 2*(x-val))\n k = int(log(val)//log(x))\n ans = k + fn(val - x**k)\n if x**(k+1) < 2*val: \n ans = min(ans, k + 1 + fn(x**(k+1) - val))\n return ans", "def loglikehood_coefficient(n_items, X, Y):\n # should not need X_norm_squared because if you could precompute that as\n # well as Y, then you should just pre-compute the output and not even\n # call this function.\n\n def safeLog(d):\n if d <= 0.0:\n return 0.0\n else:\n return np.log(d)\n\n def logL(p, k, n):\n return k * safeLog(p) + (n - k) * safeLog(1.0 - p)\n\n def twoLogLambda(k1, k2, n1, n2):\n p = (k1 + k2) / (n1 + n2)\n return 2.0 * (logL(k1 / n1, k1, n1) + logL(k2 / n2, k2, n2)\n - logL(p, k1, n1) - logL(p, k2, n2))\n\n if X is Y:\n X = Y = np.asanyarray(X)\n else:\n X = np.asanyarray(X)\n Y = np.asanyarray(Y)\n\n result = []\n\n # TODO: Check if it is possible to optimize this function\n\n i = 0\n for arrayX in X:\n result.append([])\n for arrayY in Y:\n XY = np.intersect1d(arrayX, arrayY)\n\n if XY.size == 0:\n result[i].append(0.0)\n else:\n nX = arrayX.size\n nY = arrayY.size\n if (nX - XY.size == 0) or (n_items - nY) == 0:\n result[i].append(1.0)\n else:\n logLikelihood = twoLogLambda(float(XY.size),\n float(nX - XY.size),\n float(nY),\n float(n_items - nY))\n\n result[i].append(1.0 - 1.0 / (1.0 + float(logLikelihood)))\n result[i] = np.asanyarray(result[i])\n i += 1\n\n return np.asanyarray(result)", "def drecurrance(cache, a, b, i, j):\n if i == j and j == 0:\n cache[i][j] = 0\n elif i == 0:\n cache[i][j] = j\n elif j == 0:\n cache[i][j] = i\n elif a[i-1] == b[j-1]:\n cache[i][j] = cache[i-1][j-1]\n else:\n cache[i][j] = 1 + min(cache[i-1][j], cache[i][j-1], cache[i-1][j-1])", "def sw(n):\n return 4*n*n + 2*n + 1", "def identity(n):\n return n", "def slow_kp(p, f):\n return sum(1 for n in range(p) if f(n) % p == 0)", "def minOperations(n):\n if n <= 1:\n return 0\n\n \"\"\"loop for n number of times\"\"\"\n for i in range(2, n + 1):\n if n % i == 0:\n return minOperations(int(n / i)) + i", "def fn(i):\n if i < 0: return 0\n return max(fn(i-1), fn(i-2) + nums[i])", "def Reduce(N):\r\n M = N.copy()\r\n lead = 0\r\n rowCount = M.shape[0]\r\n columnCount = M.shape[1]\r\n B1=eye(rowCount)\r\n for r in range(rowCount): \r\n if (columnCount <= lead):\r\n return B1,M\r\n i = r\r\n while (M[i, lead] == 0):\r\n i = i + 1\r\n if (rowCount == i):\r\n i = r\r\n lead = lead + 1\r\n if (columnCount == lead):\r\n return B1,M\r\n B1.row_swap(i, r)\r\n M.row_swap(i, r)\r\n a=M[r,lead]\r\n for k in range(columnCount):\r\n M[r,k]=S(M[r,k])/a\r\n for k in range(rowCount):\r\n B1[r,k]=S(B1[r,k])/a\r\n for i in range(0,rowCount):\r\n if (i != r):\r\n a=M[i,lead]\r\n for k in range(0,columnCount):\r\n M[i,k]=M[i,k]-M[r,k]*a\r\n for k in range(rowCount):\r\n B1[i,k]=B1[i,k]-B1[r,k]*a\r\n lead = lead + 1\r\n return B1,M", "def fast_sum(J, s):\n e = np.zeros(s.shape[0])\n for n in range(s.shape[0]):\n k = 0\n for i in range(s.shape[1]-1):\n for j in range(i+1,s.shape[1]):\n e[n] += J[k]*s[n,i]*s[n,j]\n k += 1\n return e", "def fn(i, j):\n if i < 0 or j < 0 or obstacleGrid[i][j]: return 0\n if i == 0 and j == 0: return 1 \n return fn(i-1, j) + fn(i, j-1)", "def gn(i, j, k):\n if i+j < n: return anti[i+j][i+k] - anti[i+j][i]\n return anti[i+j][n-1-j+k] - anti[i+j][n-1-j]", "def iterate_save(x, omegas=1, N=Mynum):\n n = len(x)\n h = 1.0 / (N - 1.)\n A = (1/h**2)*get_A(N)\n \n m = (n-1)/2\n l = (n-1)\n \n x[0] = omegas * -( A[0,1]*x[1] + A[0,N]*x[N] ) / A[0,0] + (1-omegas)*x[0]\n\n for i in range(1,N):\n x[i] = omegas * -( A[i,i-1]*x[i-1] + A[i,i+1]*x[i+1] + A[i,i+N]*x[i+N] ) / A[i,i] + (1-omegas)*x[i]\n\n for i in range(N, m):\n x[i] = omegas * -( A[i,i-N]*x[i-N] + A[i,i-1]*x[i-1] + A[i,i+1]*x[i+1] + A[i,i+N]*x[i+N] ) / A[i,i] + (1-omegas)*x[i]\n\n x[m] = omegas * ( 2 -( A[m,m-N]*x[m-N] + A[m,m-1]*x[m-1] + A[m,m+1]*x[m+1] + A[m,m+N]*x[m+N] ) ) / A[m,m] + (1-omegas)*x[m]\n\n for i in range(m+1, n-N):\n x[i] = omegas * -( A[i,i-N]*x[i-N] + A[i,i-1]*x[i-1] + A[i,i+1]*x[i+1] + A[i,i+N]*x[i+N] ) / A[i,i] + (1-omegas)*x[i]\n\n for i in range(n-N,l):\n x[i] = omegas * -( A[i,i-1]*x[i-1] + A[i,i+1]*x[i+1] + A[i,i-N]*x[i-N] ) / A[i,i] + (1-omegas)*x[i]\n\n x[l] = omegas * -( A[l,l-1]*x[l-1] + A[l,l-N]*x[l-N] ) / A[l,l] + (1-omegas)*x[l]\n\n return x , x[m]", "def fn(n, k):\n if k == 1: return n \n if n == 0: return 0 \n ans = inf \n for x in range(1, n+1): \n ans = min(ans, 1 + max(fn(x-1, k-1), fn(n-x, k)))\n return ans", "def l2_simple(objects_list, index_pair):\n return numpy.sum((objects_list[index_pair[0]] - objects_list[index_pair[1]])**2)", "def get_total_complexity(n):\n total = 0\n for i in range(1, n + 1):\n total += get_complexity(i)\n return total", "def eq_div(N, i):\n return [] if i <= 0 else [N // i + 1] * (N % i) + [N // i] * (i - N % i)" ]
[ "0.6677907", "0.6336799", "0.6260519", "0.6221236", "0.61503774", "0.6091918", "0.5895858", "0.587001", "0.586738", "0.58470577", "0.58414006", "0.5831377", "0.5762354", "0.5761957", "0.5754833", "0.57444394", "0.57410777", "0.5713722", "0.5712388", "0.5708484", "0.57046944", "0.56966174", "0.5691033", "0.56677043", "0.56654227", "0.56502926", "0.564972", "0.5641694", "0.5638521", "0.5635904", "0.56284016", "0.55889815", "0.5576655", "0.556851", "0.5567722", "0.55641085", "0.55508476", "0.5545442", "0.5538085", "0.55356514", "0.5531047", "0.5524456", "0.55221784", "0.550625", "0.5497056", "0.54968464", "0.5489026", "0.5482803", "0.54753655", "0.5468834", "0.54684794", "0.5453492", "0.5450697", "0.5449581", "0.54466283", "0.54440624", "0.5418632", "0.54143447", "0.54134953", "0.5405908", "0.5405514", "0.53956676", "0.5395037", "0.5394256", "0.5393131", "0.53871757", "0.53825307", "0.53777504", "0.53759193", "0.53747356", "0.53735787", "0.5366011", "0.5364926", "0.5363646", "0.5359748", "0.5358547", "0.5353908", "0.53479886", "0.5345847", "0.5345182", "0.5342206", "0.53421277", "0.53413427", "0.5339248", "0.533248", "0.53301895", "0.5328465", "0.5323088", "0.5322503", "0.5321708", "0.5321541", "0.53210455", "0.5318992", "0.5318105", "0.5308574", "0.53073347", "0.52899945", "0.52890205", "0.52862513", "0.52853227", "0.52840704" ]
0.0
-1
Test default product price being 10.
def test_default_product_price(self): prod = product('Test Product') self.assertEqual(prod.price, 10) self.assertEqual(prod.weight, 20)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\r\n prod = Product('Test Product')\r\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)\n self.assertEqual(prod.weight, 20)\n self.assertEqual(prod.flammability, 0.5)", "def test_default_prodct(self):\n prod = Product(\"default\")\n self.assertEqual(prod.price, 10)\n self.assertAlmostEqual(prod.flammability, 0.5)\n self.assertEqual(prod.weight, 20)\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")", "def test_default_product_weight(self):\n prod = Product('Test Product')\n self.assertEqual(prod.weight, 20)", "def test_default_product_weight(self):\n prod = Product('Test Product')\n self.assertEqual(prod.weight, 20)", "def test_default_product_weight(self):\n prod = Product('Test Product')\n self.assertEqual(prod.weight, 20)", "def test_default_product_weight(self):\n prod = Product('Test Product')\n self.assertEqual(prod.weight, 20)", "def test_default_product_weight(self):\n prod = Product('Test Product')\n self.assertEqual(prod.weight, 20)", "def test_default_product_weight(self):\n prod = Product('Test Product')\n self.assertEqual(prod.weight, 20)", "def test_default_product_weight(self):\r\n prod = Product('Test Product')\r\n self.assertEqual(prod.weight, 20)", "def test_default_weight(self):\n prod = Product('Test Product')\n self.assertEqual(prod.weight, 20)", "def test_default_product_flammability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.flammability, .5)", "def test_get_standard_price_1(self):\n # By default get_standard_price returns then normal price of the product\n standard_price = self.p1.get_standard_price(self.request)\n self.assertEqual(standard_price, 1.0)\n\n # Switch to for sale\n self.p1.for_sale = True\n self.p1.save()\n\n # If the product is for sale ``get_price`` returns the for sale price\n price = self.p1.get_price(self.request)\n self.assertEqual(price, 0.5)\n\n # But ``get_standard_price`` returns still the normal price\n standard_price = self.p1.get_standard_price(self.request)\n self.assertEqual(standard_price, 1.0)", "def test_default_product_flammability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.flammability, 0.5)", "def test_default_product_flammability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.flammability, 0.5)", "def standard_init_price(self):\n # If a system can't use something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tu and _good.name not in 'fuel':\n base_price = 0\n else:\n base_price = _good.plt + (self.planet.tech_level * _good.pi)\n # if good is highly requested, increase the price\n if self.planet.status in [_good.dps]:\n base_price = base_price + (base_price * 0.5)\n # large system: high production decreases prices\n base_price = (base_price * (100 - self.planet.system_size)) / 100\n\n # price can't be negative\n if base_price < 0:\n base_price = 0\n\n return int(base_price)", "def test_get_price_net(self):\n # Test product\n self.assertEqual(\"%.2f\" % self.p1.get_price_net(self.request), \"1.00\")\n\n # Test variant. By default the price_net of a variant is inherited,\n # but the tax is.\n self.assertEqual(\"%.2f\" % self.v1.get_price_net(self.request), \"1.00\")\n\n # Now we switch to active price.\n self.v1.active_price = True\n self.v1.save()\n\n # Now we get the price net of the parent product\n self.assertEqual(\"%.2f\" % self.v1.get_price_net(self.request), \"2.00\")", "def test_total_price(self):\n self.assertTrue(hasattr(self.exercise, \"total_price\"), \"You must declare 'total_price'\")\n self.assertEqual(self.exercise.total_price, 0, \"'total_price' value seems wrong\")", "def test_product_bundle_price_calculation(self):\n template = self.product_apple_bundle\n template.write({'is_calpack_price': False})\n template.write({'is_calpack_price': True})\n self.assertEqual(template.list_price, self.total_price, 'Product: a product bundle canculation sale price')\n self.assertEqual(template.standard_price, self.total_cost, 'Product: a product bundle canculation product cost')", "def test_total_price(self):\n self.assertTrue(hasattr(self.exercise, \"total_price\"), \"You must declare 'total_price'\")\n self.assertEqual(self.exercise.total_price, 10, \"'savings' value seems wrong\")", "def test_default_num(self):\n products = generate_products()\n self.assertEqual(len(products), 30)", "def test_add_sale_with_price_below_one(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 'Torch', 'price': -10, 'quantity': 5, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be a positive number above 0.')", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_total_price(self):\n self.assertTrue(hasattr(self.exercise, \"total_price\"), \"You must declare 'total_price'\")\n self.assertEqual(self.exercise.total_price, 8, \"'total_price' value seems wrong\")", "def test_product_buy(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 5)\n self.assertEqual(result_buy, 175)", "def test_course_run_current_price():\n run = CourseRunFactory.create()\n assert run.current_price is None\n price = 10\n ProductVersionFactory.create(\n product=ProductFactory(content_object=run), price=price\n )\n assert run.current_price == price", "def test_program_current_price():\n program = ProgramFactory.create()\n assert program.current_price is None\n price = 10\n ProductVersionFactory.create(\n product=ProductFactory(content_object=program), price=price\n )\n assert program.current_price == price", "def set_price(self, request, pk):\n return Response('20$')", "def set_price(self, request, pk):\n return Response('20$')", "def test_default_w_decimals(self):\n self.assertEqual(currency(188.00), \"$188.00\")", "def test_visualize_price_breakdown(self):\n pass", "def test_default_num_products(self):\r\n prod = generate_products()\r\n self.assertEqual(len(prod), 30)", "def test_check_price_ok() -> None:\n data = check_price(min_price=1, data={'p': 2.0})\n assert data == {'p': 2.0}", "def test_lowest_price(self):\n listings = steam_market.get_lowest_price(soup=get_soup_from_path(TEST_FILE_NORMAL_LISTING))\n self.assertEqual('11,59€', listings)", "def desired_price(self, new_desired_price):\n self._desired_price = new_desired_price", "def test_get_price_gross(self):\n # Test product\n self.assertEqual(self.p1.get_price_gross(self.request), 1.19)\n\n # Test variant. By default the price_gross of a variant is inherited\n self.assertEqual(self.v1.get_price_gross(self.request), 1.19)\n\n # Now we switch to active price.\n self.v1.active_price = True\n self.v1.save()\n\n # Now we get the price gross of the parent product\n self.assertEqual(self.v1.get_price_gross(self.request), 2.38)", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def test_default_num_products(self):\n products = generate_products()\n self.assertEqual(len(products), 30)", "def testPowerDefault(self):\n self.assertEqual(\n (Decimal('1.0'), Decimal('1.0'), Decimal('1.0')),\n self.node.power\n )", "def check_price(self):\n return self.day*self.price", "def default_actual_order_price(context):\n current_type = context.current_parameters.get('current_type')\n default_price = 0\n actual_order_price = 0\n if current_type == 'order':\n project_id = context.current_parameters.get('project_id', None)\n if project_id:\n project = Project.get(project_id)\n default_price = project.price if project else default_price\n actual_order_price = context.current_parameters.get('price', default_price)\n return actual_order_price", "def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)", "def test_product_price_is_required(self):\n product = {\n 'name': 'LAPTOP',\n 'price': '',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def price(self) -> float:\n if self.product:\n price = self.product.prices.filter(active=True).first()\n return int(price.unit_amount / 100)\n return -1", "def test_default_num_products(self):\n product_list = generate_products()\n self.assertEqual(len(product_list), 30)", "def setPrice(self, val):\n self.price = val", "def test_default_num_products(self):\r\n lst = generate_products()\r\n self.assertEqual(len(lst), 30)", "def test_default_num_products(self):\n self.assertEqual(len(generate_products()), 30)", "def get_price(self):\n return self.sale_price if self.sale_price else self.price", "def test_subtotal(self):\n self.assertTrue(hasattr(self.exercise, \"subtotal_price\"), \"You must declare 'subtotal_price'\")\n self.assertEqual(self.exercise.subtotal_price, 10, \"'subtotal_price' value seems wrong\")", "def test_subtotal(self):\n self.assertTrue(hasattr(self.exercise, \"subtotal_price\"), \"You must declare 'subtotal_price'\")\n self.assertEqual(self.exercise.subtotal_price, 10, \"'subtotal_price' value seems wrong\")", "def test_subtotal(self):\n self.assertTrue(hasattr(self.exercise, \"subtotal_price\"), \"You must declare 'subtotal_price'\")\n self.assertEqual(self.exercise.subtotal_price, 10, \"'subtotal_price' value seems wrong\")", "def set_price(self, _price):\n self.price = _price\n return self.price", "def calc_price(self):\n price = self.price\n action = self.action\n mortage = 5 # here set mortage multiplier \n\n if action == 'RESIDENTIAL_SALE':\n return price * 12 * mortage\n\n\n if price >= 10000:\n return price * 0.7\n elif price < 10000 & price >= 5000:\n return price * 0.55\n elif price < 5000 & price >= 2800:\n return price * 0.475\n else:\n return price * 0.4", "def selling_price(self):\n # If a system can't produce something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tp and _good.name not in 'fuel':\n sell_price = 0\n else:\n sell_price = self.standard_init_price()\n # raise a bit, randomized\n sell_price = sell_price + random.randrange(self.tradeitem.var)\n\n return int(sell_price)", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)", "def check_price():\n global NUMBER_OF_TOTAL_COINS, BEVERAGE_PRICE\n\n if NUMBER_OF_TOTAL_COINS == BEVERAGE_PRICE:\n return True\n elif NUMBER_OF_TOTAL_COINS < BEVERAGE_PRICE:\n return False\n else:\n return \"FATAL\"", "def test_add_sale_with_price_not_digit_format(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': \"1500\", 'quantity': 3, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be an integer.')", "def test_negative_pricing(self):\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -1.00)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -0.01)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0.00)\n try:\n Product(self.test_product_name, 1.00)\n Product(self.test_product_name, 0.01)\n except InvalidProductPriceException:\n self.fail(\"InvalidProductPriceException raised for positive value unexpectedly\")", "def change_price(self, value): \n value = self.price", "def test_defaults(self):\n p = Product.objects.create(\n name=\"Product\", slug=\"product\", sku=\"4711\", price=42.0)\n\n self.assertEqual(p.name, \"Product\")\n self.assertEqual(p.slug, \"product\")\n self.assertEqual(p.sku, \"4711\")\n self.assertEqual(p.price, 42.0)\n self.assertEqual(p.effective_price, 42.0)\n self.assertEqual(p.short_description, \"\")\n self.assertEqual(p.description, \"\")\n self.assertEqual(len(p.images.all()), 0)\n\n self.assertEqual(p.meta_title, \"<name>\")\n self.assertEqual(p.meta_description, \"\")\n self.assertEqual(p.meta_keywords, \"\")\n\n self.assertEqual(len(p.related_products.all()), 0)\n self.assertEqual(len(p.accessories.all()), 0)\n\n self.assertEqual(p.for_sale, False)\n self.assertEqual(p.for_sale_price, 0.0)\n self.assertEqual(p.active, False)\n\n self.assertEqual(p.deliverable, True)\n self.assertEqual(p.manual_delivery_time, False)\n self.assertEqual(p.delivery_time, None)\n self.assertEqual(p.order_time, None)\n self.assertEqual(p.ordered_at, None)\n self.assertEqual(p.manage_stock_amount, False)\n self.assertEqual(p.stock_amount, 0)\n\n self.assertEqual(p.weight, 0)\n self.assertEqual(p.height, 0)\n self.assertEqual(p.length, 0)\n self.assertEqual(p.width, 0)\n\n self.assertEqual(p.tax, None)\n self.assertEqual(p.sub_type, STANDARD_PRODUCT)\n\n self.assertEqual(p.default_variant, None)\n self.assertEqual(p.variants_display_type, LIST)\n\n self.assertEqual(p.parent, None)\n self.assertEqual(p.active_name, False)\n self.assertEqual(p.active_sku, False)\n self.assertEqual(p.active_short_description, False)\n self.assertEqual(p.active_description, False)\n self.assertEqual(p.active_price, False)\n self.assertEqual(p.active_images, False)\n self.assertEqual(p.active_related_products, False)\n self.assertEqual(p.active_accessories, False)\n self.assertEqual(p.active_meta_description, False)\n self.assertEqual(p.active_meta_keywords, False)", "def test_basicNoSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Don't add the item\n pos.click(\"Ok\")\n \n # Confirm we aren't in a transaction\n if self.in_transaction():\n self.tc_fail(\"Unintentionally In Transaction\")\n else:\n self.log.info(\"Confirmed we are not in a transaction\")\n \n # Setup for next test\n self.recover()", "def test_default_num_products(self):\n gen_prods = generate_products()\n self.assertEqual(len(gen_prods), 30)", "def desired_price(self):\n return self._desired_price", "def clean_price(self):\n price = self.cleaned_data.get('price')\n if price == \"0\":\n raise forms.ValidationError(\n u'Please insert a price for your product')\n return price", "def test_default_num_products(self):\n test_list = generate_products()\n self.assertEqual(len(test_list), 30, msg=\"Length is Bad\")", "def test_callPrice(self):\n call_price1 = calculator.BlackScholes.call_price(**self.params_1)\n call_price2 = calculator.BlackScholes.call_price(**self.params_2)\n self.assertAlmostEqual(call_price1,10.45,delta=0.01)\n self.assertAlmostEqual(call_price2,7.965,delta=0.01)", "def test_10_insert_stock_prices(self):\n p_eur = Price.insert_new_price(\"EUR\", 1.2)\n p_aapl = Price.insert_new_price(\"AAPL\", 163.99)\n p_ibm = Price.insert_new_price(\"IBM\", 145.78)\n p_msft = Price.insert_new_price(\"MSFT\", 75.87)\n\n self.assertTrue(isinstance(p_eur, Price),\n msg=\"Price is NOT returning a valid inserted EUR instance\")\n print(\"Price insert EUR asset is returning the following price: {}\".format(\n p_eur.price,\n ))\n\n self.assertTrue(isinstance(p_aapl, Price),\n msg=\"Price is NOT returning a valid inserted AAPL instance\")\n print(\"Price insert AAPL asset is returning the following price: {}\".format(\n p_aapl.price,\n ))\n\n self.assertTrue(isinstance(p_ibm, Price),\n msg=\"Price is NOT returning a valid inserted IBM instance\")\n print(\"Price insert IBM asset is returning the following price: {}\".format(\n p_ibm.price,\n ))\n\n self.assertTrue(isinstance(p_msft, Price),\n msg=\"Price is NOT returning a valid inserted MSFT instance\")\n print(\"Price insert MSFT asset is returning the following price: {}\".format(\n p_msft.price,\n ))", "def test_basicSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Add the item\n pos.click(\"Sell Item\")\n \n # Confirm we added the item\n ret = self.confirm_line(-1, \"Generic Item\", \"$0.01\")\n if ret == True:\n self.log.info(\"Confirmed item added\")\n else:\n self.tc_fail(ret)\n \n # Setup for next test\n self.recover()", "def get_order_price(self):\r\n if self.price is not None:\r\n return self.price #typical limit price order\r\n else:\r\n #Check the orderbook\r\n logger.info(\"floating price\")\r\n self.get_orderbook()\r\n logger.info(self.orderbook_snapshot)\r\n\r\n pass", "def test_sell_ticket_price_range(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"testticket\")\n self.type(\"#quantity_sell\", \"1\")\n self.type(\"#price_sell\", \"101\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown.\n self.assert_text(\"Ticket price outside of valid range\", \"#message\")\n\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"testticket\")\n self.type(\"#quantity_sell\", \"1\")\n self.type(\"#price_sell\", \"9\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown.\n self.assert_text(\"Ticket price outside of valid range\", \"#message\")", "def get_price(self):\r\n return self.price", "def initial_price(self) -> float:\n return self.__initial_price", "def get_product_price(self):\n\n price = \"0.0000\"\n\n try:\n price = self.trees.get_element_by_id(\"priceblock_ourprice\").text\n except:\n try:\n price = self.trees.get_element_by_id(\n \"price_inside_buybox\").text\n except:\n try:\n price = self.trees.get_element_by_id(\n \"priceblock_dealprice\").text\n except:\n try:\n price = self.trees.xpath(\n \"//span[@class='a-color-price']/text()\")[0]\n except:\n try:\n price = self.trees.xpath(\n \"//span[@class='a-size-base a-color-price']/text()\")[0]\n except:\n pass\n\n non_decimal = re.compile(r'[^\\d.]+')\n price = non_decimal.sub('', price)\n\n return round(float(price[0:5]), 2)", "def price(self, value):\n self._price = Decimal(value)", "def test_get_standard_price_2(self):\n #\n self.p1.for_sale = False\n self.p1.save()\n\n self.v1.active_price = False\n self.v1.active_for_sale_price = False\n self.v1.save()\n\n self.assertEqual(self.v1.get_standard_price(self.request), 1.0)\n self.assertEqual(self.v1.get_price(self.request), 1.0)\n self.assertEqual(self.v1.get_for_sale(), False)\n\n #\n self.p1.for_sale = False\n self.p1.save()\n\n self.v1.active_price = False\n self.v1.active_for_sale_price = True\n self.v1.save()\n\n self.assertEqual(self.v1.get_standard_price(self.request), 1.0)\n self.assertEqual(self.v1.get_price(self.request), 1.0)\n self.assertEqual(self.v1.get_for_sale(), False)\n\n #\n self.p1.for_sale = False\n self.p1.save()\n\n self.v1.active_price = True\n self.v1.active_for_sale_price = False\n self.v1.save()\n\n self.assertEqual(self.v1.get_standard_price(self.request), 2.0)\n self.assertEqual(self.v1.get_price(self.request), 2.0)\n self.assertEqual(self.v1.get_for_sale(), False)\n\n #\n self.p1.for_sale = False\n self.p1.save()\n\n self.v1.active_price = True\n self.v1.active_for_sale_price = True\n self.v1.save()\n\n self.assertEqual(self.v1.get_standard_price(self.request), 2.0)\n self.assertEqual(self.v1.get_price(self.request), 2.0)\n self.assertEqual(self.v1.get_for_sale(), False)\n\n #\n self.p1.for_sale = True\n self.p1.save()\n\n self.v1.active_price = False\n self.v1.active_for_sale_price = False\n self.v1.save()\n\n self.assertEqual(self.v1.get_standard_price(self.request), 1.0)\n self.assertEqual(self.v1.get_price(self.request), 0.5)\n self.assertEqual(self.v1.get_for_sale(), True)\n\n #\n self.p1.for_sale = True\n self.p1.save()\n\n self.v1.active_price = False\n self.v1.active_for_sale_price = True\n self.v1.save()\n\n self.assertEqual(self.v1.get_standard_price(self.request), 1.0)\n self.assertEqual(self.v1.get_price(self.request), 1.5)\n self.assertEqual(self.v1.get_for_sale(), True)\n\n #\n self.p1.for_sale = True\n self.p1.save()\n\n self.v1.active_price = True\n self.v1.active_for_sale_price = False\n self.v1.save()\n\n self.assertEqual(self.v1.get_standard_price(self.request), 2.0)\n self.assertEqual(self.v1.get_price(self.request), 0.5)\n self.assertEqual(self.v1.get_for_sale(), True)\n\n #\n self.p1.for_sale = True\n self.p1.save()\n\n self.v1.active_price = True\n self.v1.active_for_sale_price = True\n self.v1.save()\n\n self.assertEqual(self.v1.get_standard_price(self.request), 2.0)\n self.assertEqual(self.v1.get_price(self.request), 1.5)\n self.assertEqual(self.v1.get_for_sale(), True)\n\n #\n self.p1.for_sale = True\n self.p1.save()\n\n self.v1.active_for_sale = CHOICES_STANDARD\n self.v1.save()\n\n self.assertEqual(self.v1.get_for_sale(), True)\n\n self.v1.active_for_sale = CHOICES_YES\n self.v1.save()\n\n self.assertEqual(self.v1.get_for_sale(), True)\n\n self.v1.active_for_sale = CHOICES_NO\n self.v1.save()\n\n self.assertEqual(self.v1.get_for_sale(), False)\n\n #\n self.p1.for_sale = False\n self.p1.save()\n\n self.v1.active_for_sale = CHOICES_STANDARD\n self.v1.save()\n\n self.assertEqual(self.v1.get_for_sale(), False)\n\n self.v1.active_for_sale = CHOICES_YES\n self.v1.save()\n\n self.assertEqual(self.v1.get_for_sale(), True)\n\n self.v1.active_for_sale = CHOICES_NO\n self.v1.save()\n\n self.assertEqual(self.v1.get_for_sale(), False)", "def price(self, value):\n self.price_ = max(value, 0)\n\n if self.price_ == 0:\n self.mark_as_paid()", "def test_default_num_products(self):\n products = acme_report.generate_products()\n self.assertEqual(len(products), 30)", "def testPriceByNight(self):\n place = Place()\n self.assertTrue(hasattr(place, \"price_by_night\"))\n self.assertEqual(type(place.price_by_night), int)\n self.assertEqual(place.price_by_night, 0)", "def test_PriceCheckPLU(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via PLU\")\n pos.click(\"Price Check\")\n pos.enter_keypad(\"1\", after=\"enter\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Don't add the item\n pos.click(\"Ok\")\n \n # Confirm we aren't in a transaction\n if self.in_transaction():\n self.tc_fail(\"Unintentionally In Transaction\")\n else:\n self.log.info(\"Confirmed we are not in a transaction\")\n \n # Setup for next test\n self.recover()", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)", "def get_product_price(self, url):\n self.driver.get(url)\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_ourprice\").text\n except:\n pass\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_dealprice\").text\n except:\n pass\n\n if price is None:\n price = \"Not available\"\n\n else:\n non_decimal = re.compile(r'[^\\d.]+')\n price = non_decimal.sub('', price)\n\n return price", "def get_price():\n return uniform(1.0, 350.0)", "def testDefault(self):\n self.assertEqual(\n Decimal('1.0'),\n self.node.sat\n )", "def get_price(self, field_name='PRICES'):\n price_data = self.get_price_data()\n return price_data.get('price') or self.find_price(self.get_default(field_name))", "def pricevalidator(self, price):\n if type(price) != int:\n API.abort(400, error_messages[15]['str_price'])\n\n return True", "def modify_price(pid: int, price: float) -> ExecRet:\n if price < 0.0:\n return ExecRet.err(message='invalid price %.4f' % price)\n market = get_market()\n product = market.get_product(pid)\n if not product:\n return ExecRet.err(message='pid %d not exist' % pid)\n LOGGER.info('pid %s, pre-price: %.4f, new-price: %.4f' %\n (pid, product.price, price))\n time.sleep(3)\n product.price = price\n return ExecRet.ok()", "def __get_deal_price(self):\n return self.create_random_decimal(min=1, max=100000)", "def get_base_price(self):\n\n base_price = random.randint(5, 9)\n week_day = datetime.datetime.weekday(self.time_of_order)\n hour = self.time_of_order.hour\n\n if week_day in range(0, 5) and hour in range(8, 11):\n base_price = base_price + 4\n\n return base_price" ]
[ "0.8738257", "0.8738257", "0.8738257", "0.8738257", "0.8738257", "0.8738257", "0.8738257", "0.8738257", "0.8738257", "0.8738257", "0.8738257", "0.87333673", "0.8580199", "0.7287374", "0.72686666", "0.72686666", "0.72686666", "0.72686666", "0.72686666", "0.72686666", "0.7263497", "0.717852", "0.6858057", "0.6813622", "0.67408824", "0.67408824", "0.6709257", "0.6688696", "0.66599596", "0.6654696", "0.66544443", "0.66513526", "0.6646219", "0.66166383", "0.66162026", "0.6558339", "0.653691", "0.6521672", "0.6500342", "0.6500342", "0.6488876", "0.6450081", "0.6406087", "0.6376648", "0.6363995", "0.6356096", "0.6345304", "0.6344195", "0.63123804", "0.6312298", "0.6292402", "0.62903875", "0.62829405", "0.6251204", "0.6236363", "0.62348825", "0.62321043", "0.6222491", "0.62049663", "0.6198217", "0.61947584", "0.61947584", "0.61947584", "0.61911356", "0.6187882", "0.6180349", "0.6178912", "0.6178624", "0.6172789", "0.61724347", "0.6161206", "0.61562955", "0.61517525", "0.6148836", "0.61470944", "0.6146845", "0.6094829", "0.6081774", "0.6077017", "0.6075548", "0.6075277", "0.6071187", "0.6069036", "0.6062449", "0.60567945", "0.60380155", "0.6037288", "0.6023906", "0.6023361", "0.60097057", "0.5996798", "0.5993872", "0.59922606", "0.59909546", "0.59878373", "0.59856266", "0.59788114", "0.5977211", "0.59542483", "0.5943813" ]
0.8610413
12
Mostly ripped from nc3tonc4 in netCDF4python. Added ability to skip dimension and variables. Removed all of the unpacking logic for shorts.
def clone(src, dst_path, skip_globals, skip_dimensions, skip_variables): if os.path.exists(dst_path): os.unlink(dst_path) dst = netCDF4.Dataset(dst_path, 'w') # Global attributes for attname in src.ncattrs(): if attname not in skip_globals: setattr(dst, attname, getattr(src, attname)) # Dimensions unlimdim = None unlimdimname = False for dimname, dim in src.dimensions.items(): # Skip what we need to if dimname in skip_dimensions: continue if dim.isunlimited(): unlimdim = dim unlimdimname = dimname dst.createDimension(dimname, None) else: dst.createDimension(dimname, len(dim)) # Variables for varname, ncvar in src.variables.items(): # Skip what we need to if varname in skip_variables: continue hasunlimdim = False if unlimdimname and unlimdimname in ncvar.dimensions: hasunlimdim = True filler = None if hasattr(ncvar, '_FillValue'): filler = ncvar._FillValue if ncvar.chunking == "contiguous": var = dst.createVariable(varname, ncvar.dtype, ncvar.dimensions, fill_value=filler) else: var = dst.createVariable(varname, ncvar.dtype, ncvar.dimensions, fill_value=filler, chunksizes=ncvar.chunking()) # Attributes for attname in ncvar.ncattrs(): if attname == '_FillValue': continue else: setattr(var, attname, getattr(ncvar, attname)) # Data nchunk = 1000 if hasunlimdim: if nchunk: start = 0 stop = len(unlimdim) step = nchunk if step < 1: step = 1 for n in range(start, stop, step): nmax = n + nchunk if nmax > len(unlimdim): nmax = len(unlimdim) idata = ncvar[n:nmax] var[n:nmax] = idata else: idata = ncvar[:] var[0:len(unlimdim)] = idata else: idata = ncvar[:] var[:] = idata dst.sync() src.close() dst.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,filename='slices.000000',nvar=8,nbuf=3,field=None):\n\n #Open file\n f=tables.openFile(filename)\n\n #Dataset \"para_real\"\n self.time=f.root.para_real[0]\n self.dt =f.root.para_real[1]\n self.dx =f.root.para_real[2]\n self.dy =f.root.para_real[3]\n self.dz =f.root.para_real[4]\n\n #Dataset \"para_int\"\n self.ndump =f.root.para_int[0]\n self.nhist =f.root.para_int[1]\n self.nspec =f.root.para_int[2]\n self.nx =f.root.para_int[3]\n self.ny =f.root.para_int[4]\n self.nz =f.root.para_int[5]\n self.nxslice=f.root.para_int[6]\n self.nyslice=f.root.para_int[7]\n self.nzslice=f.root.para_int[8]\n\n self.dim =f.root.para_int[3:6]\n self.slice =f.root.para_int[6:9] \n self.dim_glob=self.dim*self.slice\n\n #Dataset \"x\", \"y\" and \"z\n self.x=f.root.x[:]\n self.y=f.root.y[:]\n self.z=f.root.z[:]\n\n #Dataset \"uin\"\n if not field:\n self.xyz=f.root.uin[:,:,:,:]\n else:\n if field == 'rho' : self.xyz = f.root.uin[0,:,:,:]\n if field == 'E' : self.xyz = f.root.uin[4,:,:,:]\n if field == 'rhoux': self.xyz = f.root.uin[1,:,:,:]\n if field == 'rhouy': self.xyz = f.root.uin[2,:,:,:]\n if field == 'rhouz': self.xyz = f.root.uin[3,:,:,:]\n if field == 'Bx' : self.xyz = f.root.uin[5,:,:,:]\n if field == 'By' : self.xyz = f.root.uin[6,:,:,:]\n if field == 'Bz' : self.xyz = f.root.uin[7,:,:,:]\n\n #Dataset \"para_mpi\"\n if (self.nxslice*self.nyslice*self.nzslice>1):\n self.xleft =f.root.para_mpi[0]\n self.xright=f.root.para_mpi[1]\n self.yleft =f.root.para_mpi[2]\n self.yright=f.root.para_mpi[3]\n self.zleft =f.root.para_mpi[4]\n self.zright=f.root.para_mpi[5]\n self.xposition=f.root.para_mpi[6]\n self.yposition=f.root.para_mpi[7]\n self.zposition=f.root.para_mpi[8]\n\n #Close file\n f.close()", "def addtonc(ncfout,key,vd,ofield,ftype=\"timeseries\"):\n nc_out=nc.Dataset(ncfout,'r+')\n if ftype==\"timeseries\":\n diml=['time','height','south_north','west_east'] # Tuple of Dimensions\n if vd['dims']==4:\n dimtup=tuple(diml)\n elif vd['dims']==3:\n dimtup = tuple([c for c in diml if c != \"height\"])\n elif vd['dims']==2:\n dimtup = tuple([c for c in diml if c not in [\"height\",\"time\"]])\n elif ftype==\"roughness\":\n diml=['south_north','west_east']\n dimtup=tuple(diml)\n elif ftype==\"tabfile\":\n diml=['south_north','west_east','sector','wind','stab']\n if vd['dims']==3:\n dimtup=tuple(diml.remove('wind').remove('stab'))\n if vd['dims']==2:\n dimtup=tuple(diml.remove('wind').remove('stab').remove('sector'))\n if key in (\"TKE\", \"ABLAT_CYL\", \"ACCRE_CYL\"):\n outv=nc_out.createVariable(key, 'f4', dimtup, zlib=True,\n complevel=9, fill_value=-999.)\n else:\n outv=nc_out.createVariable(key,'f4',dimtup,zlib=True,complevel=9)\n outv.units=vd['units']\n outv.long_name=vd['name']\n if vd['std_name'] is not None:\n outv.standard_name=vd['std_name']\n if key==\"PRECIP\":\n outv.cell_methods=\"time: sum\"\n outv.grid_mapping=\"crs\"\n outv.coordinates=\"XLAT XLON\"\n outv[:]=ofield[:]\n nc_out.close()\n return(None)", "def createncfile(dz_id,t,x,z):\n db = labdb.LabDB()\n #create the directory in which to store the nc file\n sql = \"\"\"INSERT into dn2t (dz_id) VALUES (%d)\"\"\" % (dz_id) \n db.execute(sql)\n sql = \"\"\"SELECT LAST_INSERT_ID()\"\"\" \n rows = db.execute(sql)\n dn2t_id = rows[0][0]\n dn2t_path = \"/Volumes/HD4/dn2t/%d\" % dn2t_id \n os.mkdir(dn2t_path)\n\n dn2t_filename = os.path.join(dn2t_path,\"dn2t.nc\")\n print(\"d(N2)/dt filename : \",dn2t_filename)\n\n\n # Declare the nc file for the first time\n nc = netCDF4.Dataset(dn2t_filename,'w',format = 'NETCDF4')\n row_dim = nc.createDimension('row',964)\n col_dim = nc.createDimension('column',1292)\n lenT=t.shape[0] #lenT is the length of the dn2t file.Its 1 element shorter in time axis than deltaN2\n print(\"time axis length\",lenT) # debug info\n t_dim = nc.createDimension('time',lenT)\n\n # Dimensions are also variable\n ROW = nc.createVariable('row',numpy.float32,('row'))\n print(list(nc.dimensions.keys()), ROW.shape,ROW.dtype)\n COLUMN = nc.createVariable('column',numpy.float32,('column'))\n print(list(nc.dimensions.keys()) , COLUMN.shape, COLUMN.dtype)\n TIME = nc.createVariable('time',numpy.float32,('time'))\n print(list(nc.dimensions.keys()) ,TIME.shape, TIME.dtype)\n\n # declare the 3D data variable \n dn2t = nc.createVariable('dn2t_array',numpy.float32,('time','row','column'))\n print(list(nc.dimensions.keys()) ,dn2t.shape,dn2t.dtype)\n\n # assign the values\n TIME[:] = t\n ROW[:] = z\n COLUMN[:] = x\n\n nc.close()\n db.commit()\n return dn2t_id,dn2t_filename", "def F_read_S5P_nc(self,fn,data_fields,data_fields_l2g=[]):\n from netCDF4 import Dataset\n ncid = Dataset(fn,'r')\n outp = {}\n for i in range(len(data_fields)):\n tmp = ncid[data_fields[i]]\n tmpdtype = tmp.dtype\n if not data_fields_l2g:\n varname = tmp.name\n else:\n varname = data_fields_l2g[i]\n if tmpdtype is \"str\":\n outp[varname] = tmp[:]\n else:\n outp[varname] = np.squeeze(tmp[:])\n ## scale factor already applied?! so confusing\n# try:\n# outp[varname] = outp[varname]*tmp.scale_factor\n# if tmp.scale_factor != 1:\n# print(varname+' has a scale_factor of '+'%s'%tmp.scale_factor)\n# except Exception:\n# #print(e)\n# print(varname+' has no scale_factor!')\n if 'time_utc' in outp.keys():\n UTC_matlab_datenum = np.zeros((len(outp['time_utc']),1),dtype=np.float64)\n for i in range(len(outp['time_utc'])):\n tmp = datetime.datetime.strptime(outp['time_utc'][i],'%Y-%m-%dT%H:%M:%S.%fZ')\n UTC_matlab_datenum[i] = (tmp.toordinal()\\\n +tmp.hour/24.\\\n +tmp.minute/1440.\\\n +tmp.second/86400.\\\n +tmp.microsecond/86400/1000000+366.)\n outp['UTC_matlab_datenum'] = np.tile(UTC_matlab_datenum,(1,outp['latc'].shape[1]))\n else: # hcho l2 does not have time_utc\n # the delta_time field of hcho fills all across track position, but ch4 is one per scanline\n if len(outp['delta_time'].shape) == 1:\n outp['delta_time'] = np.tile(outp['delta_time'][...,None],(1,outp['latc'].shape[1]))\n outp['UTC_matlab_datenum'] = (outp['time']+outp['delta_time']/1000.)/86400.+734139.\n \n outp['across_track_position'] = np.tile(np.arange(1.,outp['latc'].shape[1]+1),\\\n (outp['latc'].shape[0],1)).astype(np.int16)\n return outp", "def yomaha2nc4(finame, foname, line_buffer=100000, zlib=False):\n\n MISS_OUT = -999\n\n tic = tm.time()\n\n print \"yomaha2nc4\"\n print \"working with\"\n print finame\n print foname\n\n #=====================================================================\n # Set up the metadata\n #=====================================================================\n\n missing = ['-999.9999' if i in [0, 8, 15, 18, 21]\n else '-99.9999' if i in [1, 9, 16, 19, 22]\n else '-999.999' if i in [3, 10, 17, 20, 23]\n else '-999.9' if i == 2\n else '-999.99' if i in [4, 5, 6, 7, 11, 12, 13, 14]\n else '-128' if i == 27\n else '-999'\n for i in range(28)]\n\n variables = [\n {'name': 'x_deep',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude'},\n {'name': 'y_deep',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude'},\n {'name': 'z_park',\n 'unit': 'dbar',\n 'long_name': 'Parking Pressure'},\n {'name': 't_deep',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time'},\n {'name': 'u_deep',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of zonal deep velocity'},\n {'name': 'v_depth',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of meridional deep velocity'},\n {'name': 'e_u_deep',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of zonal deep velocity'},\n {'name': 'e_v_deep',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of meridional deep velocity'},\n {'name': 'x_surf',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude'},\n {'name': 'y_surf',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude'},\n {'name': 't_surf',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time'},\n {'name': 'u_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of zonal velocity at sea surface'},\n {'name': 'v_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of meridional velocity at sea surface'},\n {'name': 'e_u_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of zonal velocity at sea surface'},\n {'name': 'e_v_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of meridional velocity at sea surface'},\n {'name': 'x_last_prev',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude of the last fix at the surface during previous cycle'},\n {'name': 'y_last_prev',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude of the last fix at the surface during previous cycle'},\n {'name': 't_last_prev',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time of the last fix at the surface during previous cycle'},\n {'name': 'x_first',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude of the first fix at the surface'},\n {'name': 'y_first',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude of the first fix at the surface'},\n {'name': 't_first',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time of the first fix at the surface'},\n {'name': 'x_last',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude of the last fix at the surface'},\n {'name': 'y_last',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude of the last fix at the surface'},\n {'name': 't_last',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time of the last fix at the surface'},\n {'name': 'n_fix',\n 'unit': '',\n 'long_name': 'Number of surface fixes'},\n {'name': 'float_id',\n 'unit': '',\n 'long_name': 'Float ID'},\n {'name': 'n_cycle',\n 'unit': '',\n 'long_name': 'Cycle number'},\n {'name': 'inv_flag',\n 'unit': '',\n 'long_name': 'Time inversion/duplication flag'},\n ]\n\n dtype = [np.int32 if i in [24, 25, 26]\n else np.byte if i == 27\n else np.float32\n for i in range(28)]\n\n #=====================================================================\n # Set up the output file\n #=====================================================================\n var = []\n\n # get file length\n length = 0\n with open(finame, 'r') as fi:\n for line in fi:\n length += 1\n\n # create the out-file\n fo = nc.Dataset(foname, mode='w', format='NETCDF4', clobber=True)\n\n # create dims and vlan data type\n fo.createDimension('id', size=length)\n id_v = fo.createVariable('id', np.int64, 'id',\n zlib=zlib, fill_value=MISS_OUT)\n id_v[:] = range(1, length + 1)\n\n for i in range(len(variables)):\n v_dict = variables[i]\n v_obj = fo.createVariable(v_dict['name'], dtype[i], 'id', zlib=zlib,\n fill_value=missing[i])\n v_obj.units = v_dict['unit']\n v_obj.long_name = v_dict['long_name']\n var.append(v_obj)\n\n #=====================================================================\n # read and write the data\n #=====================================================================\n buf = [[] for i in range(len(variables))]\n idx = 0\n with open(finame, 'r') as fi:\n old_idx = idx\n for line in fi:\n idx += 1\n line = line.strip()\n [buf[i].append(dtype[i](val)) if val != missing[i]\n else buf[i].append(dtype[i](MISS_OUT))\n for i, val in enumerate(line.split())]\n # write chunk to disk and clear buffer\n if np.mod(idx, line_buffer) == 0:\n# id_v[old_idx:idx-1] = range(old_idx + 1,\n# len(buf[i][:]) + old_idx + 1)\n for i in range(len(variables)):\n var[i][old_idx:idx] = np.ma.array(\n buf[i],\n mask=[val == dtype[i](MISS_OUT)\n for val in buf[i]])\n\n old_idx = idx\n buf = [[] for i in range(len(variables))]\n # write last peace to file\n if old_idx != idx:\n# id_v[old_idx:idx - 1] = range(old_idx + 1, len(buf[i][:]) + old_idx + 1)\n for i in range(len(variables)):\n var[i][old_idx:idx] = np.ma.array(buf[i],\n mask=[val == dtype[i](MISS_OUT)\n for val in buf[i]])\n\n #=====================================================================\n # clean up and finish\n #=====================================================================\n fo.close()\n print \"yomaha2nc4 done after % 12.6f seconds\" % (tm.time() - tic)\n\n return None", "def nctoflt(ncfile, fltstem, varname, iz=0):\n\n ncobj = nh.nc3_open(ncfile,'r')\n a = ncobj.variables[varname]\n # Copy out into a numpy array and make sure we have only\n # 2 dimensions and type float32.\n b = numpy.float32(ncobj.variables[varname])\n if len(b.shape) < 2 or len(b.shape) > 3:\n raise ValueError(\"Only 2D and 3D data allowed (not \"+len(b.shape)+\"D)\")\n if len(b.shape) == 3:\n b = numpy.float32(b[iz,::,::].reshape(b.shape[1], b.shape[2]))\n fillValue = numpy.float32(ncobj.variables[varname]._FillValue)\n\n latvec = ncobj.variables['latitude']\n lonvec = ncobj.variables['longitude']\n lat1 = latvec[0]\n lat2 = latvec[len(latvec)-1]\n # Reverse if latitude runs South to North\n if lat1 < lat2:\n x = lat2\n lat2 = lat1\n lat1 = x\n b = b[::-1,]\n lon1 = lonvec[0]\n lon2 = lonvec[len(lonvec)-1]\n \n dlat = abs(lat1-lat2)/(len(latvec)-1)\n dlon = abs(lon2-lon1)/(len(lonvec)-1)\n xll = lon1-dlon*0.5\n yll = lat2-dlat*0.5\n\n\n fltname = fltstem+'.flt'\n if os.path.exists(fltname): os.unlink(fltname)\n b.tofile(fltname)\n f = file(fltstem+\".hdr\",\"w\")\n \n f.write(\"ncols %d\\n\" % b.shape[1])\n f.write(\"nrows %d\\n\" % b.shape[0])\n f.write(\"xllcorner %f\\n\" % xll)\n f.write(\"yllcorner %f\\n\" % yll)\n f.write(\"cellsize %f\\n\" % dlon)\n f.write(\"NODATA_value %f\\n\" % fillValue)\n if sys.byteorder == \"little\":\n f.write(\"byteorder LSBFIRST\\n\")\n else:\n f.write(\"byteorder LSBLAST\\n\")\n f.close()\n attr = nh.nc3_get_attributes(ncobj)\n nh.nc3_close(ncobj)\n return attr", "def fixdims(var):\n\t\n\tfor ii in var.dims:\n\t\tkk=ii[:3].lower()\n\t\t\n\t\tif kk == 'lat':\n\t\t\tvar=var.rename({ii:'lat'})\n\t\t\t\n\t\tif kk == 'lon':\n\t\t\tvar=var.rename({ii:'lon'})\n\t\t\t\n\t\tif kk == 'lev' or kk == 'ple' or kk == 'pre':\n\t\t\tvar=var.rename({ii:'level'})\n\n\t\tif kk == 'tim':\n\t\t\tvar=var.rename({ii:'time'})\n\n\treturn var", "def nc2generic(native_img):\n n_records = native_img.lat.shape[0]\n generic_data = get_template_ASCATL2_SMX(n_records)\n\n fields = [('jd', 'jd'),\n ('sat_id', None),\n ('abs_line_nr', None),\n ('abs_orbit_nr', None),\n ('node_num', 'node_num'),\n ('line_num', 'line_num'),\n ('as_des_pass', 'as_des_pass'),\n ('swath', 'swath_indicator'),\n ('azif', None),\n ('azim', None),\n ('azia', None),\n ('incf', None),\n ('incm', None),\n ('inca', None),\n ('sigf', None),\n ('sigm', None),\n ('siga', None),\n ('sm', 'soil_moisture'),\n ('sm_noise', 'soil_moisture_error'),\n ('sm_sensitivity', 'soil_moisture_sensitivity'),\n ('sig40', 'sigma40'),\n ('sig40_noise', 'sigma40_error'),\n ('slope40', 'slope40'),\n ('slope40_noise', 'slope40_error'),\n ('dry_backscatter', 'dry_backscatter'),\n ('wet_backscatter', 'wet_backscatter'),\n ('mean_surf_sm', 'mean_soil_moisture')]\n\n for field in fields:\n if field[1] is None:\n continue\n\n if type(native_img.data[field[1]]) == np.ma.core.MaskedArray:\n valid_mask = ~native_img.data[field[1]].mask\n generic_data[field[0]][valid_mask] = native_img.data[field[1]][\n valid_mask]\n else:\n generic_data[field[0]] = native_img.data[field[1]]\n\n if 'abs_line_number' in native_img.data:\n generic_data['abs_line_nr'] = native_img.data['abs_line_number']\n\n # flag_fields need to be treated differently since they are not masked\n # arrays so we need to check for nan values\n flags = [('correction_flag', 'corr_flags'),\n # There is a processing flag but it is different to the other\n # formats\n ('processing_flag', None),\n ('aggregated_quality_flag', 'aggregated_quality_flag'),\n ('snow_cover_probability', 'snow_cover_probability'),\n ('frozen_soil_probability', 'frozen_soil_probability'),\n ('innudation_or_wetland', 'wetland_flag'),\n ('topographical_complexity', 'topography_flag')]\n\n for field in flags:\n if field[1] is None:\n continue\n\n valid_mask = (native_img.data[field[1]] != ubyte_nan)\n generic_data[field[0]][valid_mask] = native_img.data[field[1]][\n valid_mask]\n\n fields = [('sat_id', 'sat_id'),\n ('abs_orbit_nr', 'orbit_start')]\n\n for field in fields:\n generic_data[field[0]] = np.repeat(native_img.metadata[field[1]],\n n_records)\n\n # convert sat_id (spacecraft id) to the intern definition\n sat_id_lut = np.array([0, 4, 3, 5])\n generic_data['sat_id'] = sat_id_lut[generic_data['sat_id']]\n\n img = Image(native_img.lon, native_img.lat, generic_data,\n native_img.metadata, native_img.timestamp,\n timekey='jd')\n\n return img", "def ncread(file, vars=None, dims=False, noisy=False, atts=False, datetimes=False):\n\n # Set to True when we've converted from Modified Julian Day so we don't\n # end up doing the conversion twice, once for `Times' and again for\n # `time' if both variables have been requested in `vars'.\n done_datetimes = False\n got_itime = False\n got_itime2 = False\n # Check whether we'll be able to fulfill the datetime request.\n if datetimes and vars and not list(set(vars) & set(('Times', 'time', 'Itime', 'Itime2'))):\n raise ValueError(\"Conversion to python datetimes has been requested \"\n \"but no time variable (`Times', `time', `Itime' or `Itime2') has been \"\n \"requested in vars.\")\n\n # If we have a list, assume it's lots of files and load them all. Only use\n # MFDataset on lists of more than 1 file.\n if isinstance(file, list) and len(file) > 1:\n try:\n try:\n rootgrp = MFDataset(file, 'r')\n except IOError as msg:\n raise IOError('Unable to open file {} ({}). Aborting.'.format(file, msg))\n except:\n # Try aggregating along a 'time' dimension (for POLCOMS,\n # for example).\n try:\n rootgrp = MFDataset(file, 'r', aggdim='time')\n except IOError as msg:\n raise IOError('Unable to open file {} ({}). Aborting.'.format(file, msg))\n elif isinstance(file, list) and len(file) == 1:\n rootgrp = Dataset(file[0], 'r')\n else:\n rootgrp = Dataset(file, 'r')\n\n # Create a dict of the dimension names and their current sizes\n read_dims = {}\n for key, var in list(rootgrp.dimensions.items()):\n # Make the dimensions ranges so we can use them to extract all the\n # values.\n read_dims[key] = '0:{}'.format(str(len(var)))\n\n # Compare the dimensions in the netCDF file with those provided. If we've\n # been given a dict of dimensions which differs from those in the netCDF\n # file, then use those.\n if dims:\n common_keys = set(read_dims).intersection(list(dims.keys()))\n for k in common_keys:\n read_dims[k] = dims[k]\n\n if noisy:\n print(\"File format: {}\".format(rootgrp.file_format))\n\n if not vars:\n vars = iter(list(rootgrp.variables.keys()))\n\n FVCOM = {}\n\n # Save the dimensions in the attributes dict.\n if atts:\n attributes = {}\n attributes['dims'] = read_dims\n attributes['global'] = {}\n for g in rootgrp.ncattrs():\n attributes['global'][g] = getattr(rootgrp, g)\n\n for key, var in list(rootgrp.variables.items()):\n if noisy:\n print('Found {}'.format(key), end=' ')\n sys.stdout.flush()\n\n if key in vars:\n var_dims = rootgrp.variables[key].dimensions\n\n to_extract = [read_dims[d] for d in var_dims]\n\n # If we have no dimensions, we must have only a single value, in\n # which case set the dimensions to empty and append the function to\n # extract the value.\n if not to_extract:\n to_extract = '.getValue()'\n\n # Thought I'd finally figured out how to replace the eval approach,\n # but I still can't get past the indexing needed to be able to\n # subset the data.\n # FVCOM[key] = rootgrp.variables.get(key)[0:-1]\n # I know, I know, eval() is evil.\n get_data = 'rootgrp.variables[\\'{}\\']{}'.format(key, str(to_extract).replace('\\'', ''))\n FVCOM[key] = eval(get_data)\n\n # Get all attributes for this variable.\n if atts:\n attributes[key] = {}\n # Grab all the attributes for this variable.\n for varatt in rootgrp.variables[key].ncattrs():\n attributes[key][varatt] = getattr(rootgrp.variables[key], varatt)\n\n if datetimes and key in ('Times', 'time', 'Itime', 'Itime2') and not done_datetimes:\n # Convert the time data to datetime objects. How we do this\n # depends on which we hit first - `Times', `time', `Itime' or\n # `Itime2'. For the former, we need to parse the strings, for the\n # latter we can leverage num2date from the netCDF4 module and\n # use the time units attribute.\n if key == 'Times':\n try:\n # Check if we've only extracted a single time step, in\n # which case we don't need to use a list comprehension\n # to get the datetimes.\n if isinstance(FVCOM[key][0], np.ndarray):\n FVCOM['datetime'] = np.asarray([datetime.strptime(''.join(i).strip(), '%Y-%m-%dT%H:%M:%S.%f') for i in FVCOM[key].astype(str)])\n else:\n FVCOM['datetime'] = np.asarray(datetime.strptime(''.join(FVCOM[key].astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f'))\n except ValueError:\n # Try a different format before bailing out.\n if isinstance(FVCOM[key][0], np.ndarray):\n FVCOM['datetime'] = np.asarray([datetime.strptime(''.join(i).strip(), '%Y/%m/%d %H:%M:%S.%f') for i in FVCOM[key].astype(str)])\n else:\n FVCOM['datetime'] = np.asarray(datetime.strptime(''.join(FVCOM[key].astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f'))\n done_datetimes = True\n elif key == 'time':\n FVCOM['datetime'] = num2date(FVCOM[key],\n rootgrp.variables[key].units)\n done_datetimes = True\n elif key == 'Itime':\n got_itime = True\n elif key == 'Itime2':\n got_itime2 = True\n\n if noisy:\n if len(str(to_extract)) < 60:\n print('(extracted {})'.format(str(to_extract).replace('\\'', '')))\n else:\n print('(extracted given indices)')\n\n elif noisy:\n print()\n\n # If: 1. we haven't got datetime in the output 2. we've been asked to get it and 3. we've got both Itime and\n # Itime2, then make datetime from those.\n if datetimes and got_itime and got_itime2 and 'datetime' not in FVCOM:\n FVCOM['datetime'] = num2date(FVCOM['Itime'] + (FVCOM['Itime2'] / 1000 / 24 / 60 / 60),\n rootgrp.variables['Itime'].units)\n\n # Close the open file.\n rootgrp.close()\n\n if atts:\n return FVCOM, attributes\n else:\n return FVCOM", "def nc_encode(ds):\n for var in data_vars:\n ds[var].encoding[\"_FillValue\"] = 1.0e20\n # ds[var].encoding[\"coordinates\"] = \"{} {}\".format(lon, lat)\n for coord in ds.coords.values():\n coord.encoding[\"_FillValue\"] = None\n for coord in [lon_vertices, lat_vertices]:\n ds[coord].encoding = {\"_FillValue\": None}\n return ds", "def expand_var(nc, out, name, direction):\n if name == direction:\n return\n\n var1 = nc.variables[name]\n\n print(\"Processing %s...\" % name)\n\n # Copy coordinate variables and stop:\n if name in ['t', 'z', 'y', 'x', 'zb']:\n var2 = out.createVariable(name, var1.dtype, (name,))\n var2[:] = var1[:]\n copy_attributes(var1, var2)\n return\n\n dims = var1.dimensions\n if len(dims) == 1:\n dims = ('y', 'x')\n elif len(dims) == 2:\n dims = ('t', 'y', 'x')\n elif len(dims) == 3:\n if name == \"litho_temp\": # litho_temp is the only variable depending on 'zb'.\n dims = ('t', 'zb', 'y', 'x')\n else:\n dims = ('t', 'z', 'y', 'x')\n\n var2 = out.createVariable(name, var1.dtype, dims)\n copy_attributes(var1, var2)\n\n for j in range(3):\n if direction == 'x':\n var2[get_slice(var2.dimensions, x=j)] = permute(var1)\n elif direction == 'y':\n var2[get_slice(var2.dimensions, y=j)] = permute(var1)", "def createnc(ncfout,xlat,xlon,times=None,zvals=None,wsvals=None,\\\n wdvals=None,olvals=None,attbts=None,ftype=\"timeseries\",dims=[7,180,180]):\n nc_out=nc.Dataset(ncfout,'w',clobber=True)\n\n # Set Attributes to the File\n if attbts is not None:\n final_attbts={}\n # Define projection\n proj_lcc = pj_lcc = Proj(\"+proj=lcc +lat_1={TRUELAT1} +lat_2={TRUELAT2} +lat_0={MOAD_CEN_LAT} +lon_0={STAND_LON} +x_0=0 +y_0=0 +a=6370000 +b=6370000\".format(**attbts))\n\n # Get x&y of domain center\n xcen, ycen = pj_lcc(attbts['CEN_LON'], attbts['CEN_LAT'])\n\n for key in attbts:\n if str(key).find(\"STAG\") <= 0 : # Remove Staggered Grid Information\n final_attbts.update({key:attbts[key]})\n nc_out.setncatts(final_attbts)\n # Create a CRS Variable for the Projection (GIS Readability)\n crsv=nc_out.createVariable('crs','c')\n crsv.semi_major_axis = 6370000.0\n crsv.inverse_flattening = 0.0\n crsv.grid_mapping_name = \"lambert_conformal_conic\"\n crsv.longitude_of_central_meridian = attbts[\"STAND_LON\"]\n crsv.false_easting = 0.0\n crsv.false_northing = 0.0\n crsv.latitude_of_projection_origin = attbts[\"MOAD_CEN_LAT\"]\n crsv.standard_parallel = [attbts[\"TRUELAT1\"],attbts[\"TRUELAT2\"]]\n crsv.longitude_of_prime_meridian = 0.0\n crsv.proj = proj_lcc.srs\n\n\n\n # Override Institution and Experiment\n nc_out.INSTITUTION=INSTITUTION\n nc_out.EXPERIMENT=EXPERIMENT\n nc_out.Conventions=\"CF-1.6\"\n\n # Create Dimensions First\n if ftype==\"timeseries\":\n nc_out.TITLE='Timeseries of the New European Wind Atlas from WRF V3.8.1'\n nc_out.createDimension('time',None)\n nc_out.createDimension('DateStrLen',19)\n nc_out.createDimension('height',dims[0])\n nc_out.createDimension('south_north',dims[1])\n nc_out.createDimension('west_east',dims[2])\n # Create Time Vector as Integer\n timesn = nc_out.createVariable('time','i8',('time',))\n timesn.units = \"minutes since 1900-01-01 00:00:00.0\"\n timesn.calendar = \"gregorian\"\n timesn.long_name = \"Time\"\n timesn.standard_name = \"time\"\n timesn[:] = nc.date2num(createdatv(times),units=timesn.units,calendar=timesn.calendar)\n # Create additional Time Vector as Character\n timesc = nc_out.createVariable('Times', 'c', ('time','DateStrLen'))\n timesc.format = \"YYYY-MM-DD_HH:MM:SS\"\n timesc.long_name = \"Time\"\n timesc[:] = times[:]\n # Height\n hgts = nc_out.createVariable('height','f4',('height',))\n hgts.units=\"m\"\n hgts.long_name=\"Height above Ground\"\n hgts.standard_name=\"height\"\n hgts[:] = zvals\n # y\n south_north = nc_out.createVariable('south_north','f4',('south_north',))\n south_north.long_name = \"y-coordinate in Cartesian system\"\n south_north.units = \"m\"\n\n dy = attbts[\"DY\"]\n ny = attbts[\"SOUTH-NORTH_PATCH_END_UNSTAG\"]\n ymin = ycen - dy * (ny - 1) / 2\n s_n = np.linspace(0, ny-1, ny) * dy + ymin\n south_north[:] = s_n\n\n # x\n west_east = nc_out.createVariable('west_east','f4',('west_east',))\n west_east.long_name = \"x-coordinate in Cartesian system\"\n west_east.units = \"m\"\n\n dx = attbts[\"DX\"]\n nx = attbts[\"WEST-EAST_PATCH_END_UNSTAG\"]\n xmin = xcen - dx * (nx - 1) / 2\n e_w = np.linspace(0, nx-1, nx) * dx + xmin\n west_east[:] = e_w\n\n elif ftype==\"roughness\":\n nc_out.title='NEWA Roughness'\n nc_out.createDimension('south_north',dims[0])\n nc_out.createDimension('west_east',dims[1])\n\n elif ftype==\"tabfile\":\n nc_out.title='NEWA WasP Tab File'\n nc_out.createDimension('south_north',dims[0])\n nc_out.createDimension('west_east',dims[1])\n nc_out.createDimension('sector',dims[2])\n nc_out.createDimension('wind',dims[3])\n nc_out.createDimension('stab',dims[4])\n\n # Wind Speed Class\n wscl = nc_out.createVariable('wspdCl','f4',('wind',))\n wscl.units=\"ms-1\"\n wscl.long_name=\"Velocity of bin centre\"\n wscl[:] = wsvals\n\n # Wind Speed Class\n wdcl = nc_out.createVariable('wdirCl','f4',('sector',))\n wdcl.units=\"ms-1\"\n wdcl.long_name=\"Velocity of bin centre\"\n wdcl[:] = wdvals\n\n # Stability\n lcl = nc_out.createVariable('Ltypical','f4',('stab',))\n lcl.units=\"m\"\n lcl.long_name=\"L typical\"\n lcl[:] = olvals\n\n # Lat and Lon\n lats = nc_out.createVariable(\"XLAT\", 'f4', ('south_north','west_east'), zlib=True,complevel=9)\n lats[:] = xlat[:]\n lats.units=\"degree_north\"\n lats.long_name=\"Center Latitude of Grid Cell\"\n lats.standard_name=\"latitude\"\n lons = nc_out.createVariable(\"XLON\", 'f4', ('south_north','west_east'), zlib=True,complevel=9)\n lons[:] = xlon[:]\n lons.units=\"degree_east\"\n lons.long_name=\"Center Longitude of Grid Cell\"\n lons.standard_name=\"longitude\"\n nc_out.close()\n return(None)", "def cshort(queue=None):\n return np.float16", "def __init__(self, filename):\n self.ncfile = Dataset(filename, 'w', format='NETCDF4')\n self.scalar_dim = self.ncfile.createDimension('scalar', 1)\n self.string_dim = self.ncfile.createDimension('string', 0)\n self.ncfile.createDimension('iteration', None)\n self.ncfile.createDimension('attempt', None)", "def quality(self): \n\n subsetInt = [int(s) for s in self.subset.split() if s.isdigit()]\n columnNames = [] \n for i in range(len(subsetInt)):\n if subsetInt[i] == 1:\n columnNames.append(self.varNames[i])\n\n #qualityBand number of subset\n q = columnNames.index('Quality') \n\n if subsetInt[self.qualityBand] == 1:\n dataCount = self.subset.count('1')\n QC = np.repeat(self.DC[:,q].reshape((self.DC.shape[0],1)), dataCount-1, axis = 1)\n if self.dataset == 'MOD09A1.005' or self.dataset == 'MOD13Q1.005':\n QC = np.uint16(QC)\n else:\n QC = np.uint8(QC)\n\n QCm = QC & 1 #flips DCm mask\n DCm = np.delete(self.DC, q, 1) #looks good\n \n DCm = np.ma.masked_where(QCm == 1, DCm)\n DCm = np.ma.masked_where(DCm == 9999.0, DCm) \n \n if len(self.tiles) > 1:\n obs = self.observations/len(self.tiles)\n if len(self.tiles) == 1:\n obs = self.observations/2\n \n outArray = np.empty(shape = (self.rows*self.columns*obs, 0))\n for b in range(0, self.DC.shape[1]-1):\n cfull = DCm[:,b].reshape((self.observations, self.rows, self.columns))\n b16 = np.empty(shape = (self.rows*self.columns*obs, 0))\n for band in range(0,cfull.shape[0],2):\n c16 = np.ma.mean(cfull[band:band+1,:,:], axis=0)\n c16f = np.ma.filled(c16, 9999.0).astype(float).reshape((self.rows*self.columns))\n b16 = np.append(b16, c16f)\n outArray = np.append(outArray, b16.reshape((obs*self.rows*self.columns, 1)), axis = 1)\n \n self.finalDC = outArray\n \n np.save(str(self.directory) + '/' + self.dataset + '.npy', self.finalDC)\n del outArray, QC, DCm\n\n outfile = str(self.directory) + '/' + self.dataset + '.txt'\n f = open(outfile, 'w')\n for name in columnNames:\n if name != 'Quality':\n f.write(name + '\\n')\n var = [a for a in columnNames if not a.startswith('Quality')]\n logger.log('SUCCESS', 'The final 16-day interval quality-masked matrix was created successfully. This matrix has dimensions %d rows by %d columns. Datasets included in the matrix are %s' % (self.finalDC.shape[0], self.finalDC.shape[1], var))\n \n \n if subsetInt[self.qualityBand] != 1:\n cleanDC = np.delete(self.DC, q, 1)\n \n \n if len(self.tiles) > 1:\n obs = self.observations/len(self.tiles)\n if len(self.tiles) == 1:\n obs = self.observations/2\n \n outArray = np.empty(shape = (self.rows*self.columns*obs, 0))\n for b in range(cleanDC.shape[1]):\n cfull = cleanDC[:,b].reshape((self.observations, self.rows, self.columns))\n b16 = np.empty(shape=(self.rows*self.columns*obs))\n for band in range(cfull.shape[0]):\n c16 = np.mean(cfull[band:band+1,:,:], axis=0)\n band16 = np.append(b16, c16, axis=0)\n outArray = np.append(outArray, b16.reshape((obs*self.rows*self.columns, 1)), axis = 1)\n\n np.save(self.directory + '/' + self.dataset + '.npy', self.finalDC)\n del cleanDC, outArray\n \n outfile = self.directory + '/' + self.dataset + '.txt'\n f = open(outfile, 'w')\n for name in columnNames:\n if name != 'Quality':\n f.write(str(name) + ' \\n')\n var = [a for a in columnNames if not a.startswith('Quality')]\n logger.log('SUCCESS', 'The final 16-day interval matrix was created successfully. A quality mask was not applied, though remaining no data values are set at 9999. This matrix has dimensions %d rows by %d columns. Datasets included in the matrix are %s' % (self.finalDC.shape[0], self.finalDC.shape[1], var))", "def read_satellite(filename, ftype):\n #ftype = 'l3c'\n #filename = '/gws/nopw/j04/cds_c3s_sst/output/v2.6.0/l3c/AVHRR19_G/2018/03/01/20180301120000-C3S-L3C_GHRSST-SSTskin-AVHRR19_G-ICDR2.0_day-v02.0-fv01.0.nc'\n #ftype = 'l4'\n #filename = '/gws/nopw/j04/cds_c3s_sst/public/data/ICDR_v2/Analysis/L4/v2.0/2018/01/01/20180101120000-C3S-L4_GHRSST-SSTdepth-OSTIA-GLOB_ICDR2.0-v02.0-fv01.0.nc'\n print \"Reading %s file: %s\" % (ftype, filename)\n \n # Read data - L4 or L3C (note L4 mask and L3C quality level have same array name)\n ncin = netCDF4.Dataset(filename)\n if ftype == 'l4':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n sst = ncin.variables['analysed_sst'][:]\n unc = ncin.variables['analysis_uncertainty'][:]\n sea_ice_frac = ncin.variables['sea_ice_fraction'][:]\n ql = ncin.variables['mask'][:]\n sstfill = ncin.variables['analysed_sst']._FillValue\n sstao = ncin.variables['analysed_sst'].add_offset\n sstsf = ncin.variables['analysed_sst'].scale_factor\n elif ftype == 'l3c':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n time_bnds = ncin.variables['time_bnds'][:]\n sst = ncin.variables['sea_surface_temperature'][:]\n sst_depth = ncin.variables['sea_surface_temperature_depth'][:]\n sst_dtime = ncin.variables['sst_dtime'][:]\n sst_depth_dtime = ncin.variables['sst_depth_dtime'][:]\n sses_bias = ncin.variables['sses_bias'][:]\n sses_sd = ncin.variables['sses_standard_deviation'][:]\n sst_depth_total_unc = ncin.variables['sst_depth_total_uncertainty'][:]\n l2p_flags = ncin.variables['l2p_flags'][:]\n ql = ncin.variables['quality_level'][:]\n wind_speed = ncin.variables['wind_speed'][:]\n large_scale_cor_unc = ncin.variables['large_scale_correlated_uncertainty'][:]\n synop_cor_unc = ncin.variables['synoptically_correlated_uncertainty'][:]\n uncor_unc = ncin.variables['uncorrelated_uncertainty'][:]\n adj_unc = ncin.variables['adjustment_uncertainty'][:]\n aerosol_dyn_ind = ncin.variables['aerosol_dynamic_indicator'][:]\n sens = ncin.variables['sensitivity'][:]\n tfill = ncin.variables['sst_dtime']._FillValue\n sstfill = ncin.variables['sea_surface_temperature']._FillValue\n sstao = ncin.variables['sea_surface_temperature'].add_offset\n sstsf = ncin.variables['sea_surface_temperature'].scale_factor\n else:\n print 'ftype not recognised or supported'\n \n # Create time field\n # -> If L4 then create a time field set to time in L4 file\n # -> Also add a time fill value to keep coding simple later on\n if ftype == 'l4':\n time = np.empty((7200,3600))\n time[:,:] = time_read\n tfill = -2147483648\n else:\n time = copy.deepcopy(sst_dtime) # Need to make a hard copy\n mask = sst_dtime.mask == False; mask = mask[0,:,:]\n row, col = np.where(mask==True)\n time.data[0, row, col] = time.data[0,row, col] + time_read\n \n # Create output structure\n if ftype == 'l4':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n sst=sst,\n unc=unc,\n sea_ice_frac=sea_ice_frac,\n ql=ql,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n elif ftype == 'l3c':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n time_bnds=time_bnds,\n sst=sst,\n sst_depth=sst_depth,\n sst_dtime=sst_dtime,\n sst_depth_dtime=sst_depth_dtime,\n sses_bias=sses_bias,\n sses_sd=sses_sd,\n sst_depth_total_unc=sst_depth_total_unc,\n l2p_flags=l2p_flags,\n ql=ql,\n wind_speed=wind_speed,\n large_scale_cor_unc=large_scale_cor_unc,\n synop_cor_unc=synop_cor_unc,\n uncor_unc=uncor_unc,\n adj_unc=adj_unc,\n aerosol_dyn_ind=aerosol_dyn_ind,\n sens=sens,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n else:\n print 'ftype not recognised or supported'\n \n return data", "def save_nc(output_file_path_str,data_arr,var_name,var_type,var_unit,longname,\r\n datetime_object,description,dt=None,verbose=False):\r\n \r\n ## Put input data into lists (if not provided as list):\r\n if type(data_arr) is not list: data_arr = [data_arr]\r\n if type(var_name) is not list: var_name = [var_name]\r\n if type(var_type) is not list: var_type = [var_type]\r\n if type(var_unit) is not list: var_unit = [var_unit]\r\n if type(longname) is not list: longname = [longname]\r\n if type(datetime_object) is not list: datetime_object = [datetime_object]\r\n \r\n ## Check length of input data:\r\n if len(set(map(np.shape, data_arr))) > 1:\r\n raise ValueError('variable arrays are not of the same shape (%s)' %\r\n map(np.shape, data_arr))\r\n if len(datetime_object)!=1 and len(datetime_object)!=data_arr[0].shape[0]:\r\n raise ValueError('length of datetime object (%s) is unequal '+\r\n 'to one or the first dimension of the variable array (%s)' %\r\n (len(datetime_object),data_arr[0].shape[0]))\r\n elif len(var_name)!=len(data_arr):\r\n raise ValueError('length of var_name list (%s) is unequal '+\r\n 'to the number of variable arrays (%s)' %\r\n (len(var_name),len(data_arr)))\r\n #elif len(var_name)!=1 and len(var_name)!=len(data_arr):\r\n # raise ValueError('length of var_name list (%s) is unequal '+\r\n # 'to one or the first dimension of the variable array (%s)' %\r\n # (len(var_name),len(data_arr)))\r\n elif len(var_type)!=1 and len(var_type)!=len(data_arr):\r\n raise ValueError('length of var_type list (%s) is unequal '+\r\n 'to one or the first dimension of the variable array (%s)' %\r\n (len(var_name),len(data_arr)))\r\n elif len(var_unit)!=1 and len(var_unit)!=len(data_arr):\r\n raise ValueError('length of var_unit list (%s) is unequal '+\r\n 'to one or the first dimension of the variable array (%s)' %\r\n (len(var_unit),len(data_arr)))\r\n elif len(longname)!=1 and len(longname)!=len(data_arr):\r\n raise ValueError('length of longname list (%s) is unequal '+\r\n 'to one or the first dimension of the variable array (%s)' %\r\n (len(longname),len(data_arr)))\r\n if len(datetime_object)==1 and len(datetime_object)!=len(data_arr) and dt is None:\r\n raise ValueError('length of time step has to be provided')\r\n\r\n if not exists(dirname(output_file_path_str)):\r\n print('... create output directory: ' + dirname(output_file_path_str))\r\n makedirs(dirname(output_file_path_str))\r\n \r\n ## Create NetCDF file:\r\n dataset = Dataset(output_file_path_str,\r\n 'w', format='NETCDF4_CLASSIC')\r\n dataset.history = 'Created ' + datetime.datetime.now().strftime(\"%d.%m.%Y %H:%M\")\r\n dataset.description = description\r\n \r\n ## Dimension creation:\r\n x = dataset.createDimension('x', data_arr[0].shape[2])\r\n y = dataset.createDimension('y', data_arr[0].shape[1])\r\n time = dataset.createDimension('time', None) # data_arr.shape[0])\r\n \r\n ## Auxilary variable creation:\r\n x_axis = dataset.createVariable('x', np.float32, ('x',))\r\n y_axis = dataset.createVariable('y', np.float32, ('y',))\r\n times = dataset.createVariable('time', np.int16, ('time',)) # u8 or i8 does not work...\r\n times.calendar = 'standard'\r\n times.units = 'minutes since %s' % datetime_object[0].strftime(\"%Y-%m-%d %H:%M:%S\")\r\n #times.units = 'seconds since 1970-01-01 00:00:00.0'\r\n \r\n ## Create time stamps variable:\r\n if len(datetime_object)==1:\r\n datetime_list = datetime_object - np.arange(data_arr[0].shape[0])*datetime.timedelta(minutes=dt)\r\n else: datetime_list = datetime_object\r\n times[:] = date2num(datetime_list,units=times.units)\r\n \r\n ## Create spatial coordinate variable:\r\n y_axis.units = 'Swiss northing CH1903 [km]'\r\n x_axis.units = 'Swiss easting CH1903 [km]'\r\n x_axis[:] = np.arange(255,965)+0.5\r\n y_axis[::-1] = np.arange(-160,480)+0.5\r\n \r\n ## Data variable creation:\r\n var_name_list = var_name #if len(var_name)==1 else var_name*len(data_arr)\r\n var_type_list = var_type if len(var_name)==1 else var_type*len(data_arr)\r\n\r\n ## Write data into variables:\r\n id_var_list=[]\r\n for i in range(len(data_arr)):\r\n #if \"int\" in var_type_list[i]:\r\n # id_var_list.append(dataset.createVariable(var_name_list[i],var_type_list[i],\r\n # ('time','y','x'),zlib=True))\r\n #else:\r\n id_var_list.append(dataset.createVariable(var_name_list[i],var_type_list[i],\r\n ('time','y','x'),zlib=True,\r\n least_significant_digit=3)) #,least_significant_digit=2\r\n id_var_list[i].setncatts({'long_name': longname[i],'units': var_unit[i]})\r\n id_var_list[i][:,:,:] = data_arr[i]\r\n\r\n ## Close file:\r\n dataset.close()\r\n if verbose: print(\" Written NetCDF file for: %s (%s)\" %\r\n (description,datetime_object[0].strftime(\"%d.%m.%y %H:%M\")))", "def test_netCDF_field_components(self):\n # Geometries\n f = cfdm.example_field(6)\n\n for component in (\"interior_ring\", \"node_count\", \"part_node_count\"):\n f.nc_set_component_variable(component, \"ncvar\")\n f.nc_set_component_variable_groups(component, [\"forecast\"])\n\n f.nc_clear_component_variable_groups(component)\n f.nc_del_component_variable(component)\n\n f.nc_del_component_variable(component)\n f.nc_clear_component_variable_groups(component)\n\n f.nc_set_component_variable(component, \"ncvar\")\n f.nc_set_component_variable_groups(component, [\"forecast\"])\n\n for component in (\"interior_ring\", \"part_node_count\"):\n f.nc_set_component_dimension(component, \"ncvar\")\n f.nc_set_component_dimension_groups(component, [\"forecast\"])\n\n f.nc_clear_component_dimension_groups(component)\n f.nc_del_component_dimension(component)\n\n f.nc_del_component_dimension(component)\n f.nc_clear_component_dimension_groups(component)\n\n f.nc_set_component_dimension(component, \"ncvar\")\n f.nc_set_component_dimension_groups(component, [\"forecast\"])\n\n # Compression: indexed and contiguous\n f = cfdm.example_field(4)\n f.compress(\"indexed_contiguous\", inplace=True)\n\n for component in (\"count\", \"index\"):\n f.nc_set_component_variable(component, \"ncvar\")\n f.nc_set_component_variable_groups(component, [\"forecast\"])\n\n f.nc_clear_component_variable_groups(component)\n f.nc_del_component_variable(component)\n\n f.nc_del_component_variable(component)\n f.nc_clear_component_variable_groups(component)\n\n f.nc_set_component_variable(component, \"ncvar\")\n f.nc_set_component_variable_groups(component, [\"forecast\"])\n\n for component in (\"count\", \"index\"):\n f.nc_set_component_dimension(component, \"ncvar\")\n f.nc_set_component_dimension_groups(component, [\"forecast\"])\n\n f.nc_clear_component_dimension_groups(component)\n f.nc_del_component_dimension(component)\n\n f.nc_del_component_dimension(component)\n f.nc_clear_component_dimension_groups(component)\n\n f.nc_set_component_dimension(component, \"ncvar\")\n f.nc_set_component_dimension_groups(component, [\"forecast\"])\n\n for component in (\"count\", \"index\"):\n f.nc_set_component_sample_dimension(component, \"ncvar\")\n f.nc_set_component_sample_dimension_groups(component, [\"forecast\"])\n\n f.nc_clear_component_sample_dimension_groups(component)\n f.nc_del_component_sample_dimension(component)\n\n f.nc_del_component_sample_dimension(component)\n f.nc_clear_component_sample_dimension_groups(component)\n\n f.nc_set_component_sample_dimension(component, \"ncvar\")\n f.nc_set_component_sample_dimension_groups(component, [\"forecast\"])\n\n # Compression: gathered\n component = \"list\"\n\n # Expected exceptions\n for component in (\"list\", \"node_count\"):\n with self.assertRaises(ValueError):\n f.nc_set_component_dimension(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_del_component_dimension(component)\n\n with self.assertRaises(ValueError):\n f.nc_set_component_dimension_groups(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_clear_component_dimension_groups(component)\n\n with self.assertRaises(ValueError):\n f.nc_set_component_sample_dimension(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_del_component_sample_dimension(component)\n\n with self.assertRaises(ValueError):\n f.nc_set_component_sample_dimension_groups(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_clear_component_sample_dimension_groups(component)\n\n # Expected exceptions\n for component in (\"WRONG\",):\n with self.assertRaises(ValueError):\n f.nc_set_component_variable(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_del_component_variable(component)\n\n with self.assertRaises(ValueError):\n f.nc_set_component_variable_groups(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_clear_component_variable_groups(component)", "def build_var_header():\n # var_id will only be populated by... ew.. this one is difficult...\n # \"var_id\" # - need to generate these...\n meta_header = OrderedDict()\n meta_header['sample'] = str\n meta_header['chrom'] = str\n meta_header['start'] = np.int16\n meta_header['end'] = np.int16\n meta_header['var_type'] = str\n meta_header['state'] = str\n\n # Target - made from parsing truvari/rtg information\n # \"state\" #\n\n info_header = OrderedDict()\n info_header[\"POP\"] = bool\n info_header[\"VARLEN\"] = np.int32\n info_header[\"NUMASM\"] = np.int16\n \n fmt_header = OrderedDict() \n # Categorical\n # need to categorize these... ./. 0/0 0/1 1/1 # only possibilities\n fmt_header[\"GT\"] = np.int8 \n # fmt_header[\"PG\"] = np.int32 # don't use..\n fmt_header[\"GQ\"] = np.int8\n # fmt_header[\"PI\"] = don't use\n fmt_header[\"OV\"] = np.int8\n fmt_header[\"DP\"] = np.int16\n #split where _r is ref-allele and _a is alt-allele\n fmt_header[\"AD_r\"] = np.int16\n fmt_header[\"AD_a\"] = np.int16\n fmt_header[\"PDP\"] = np.int16\n fmt_header[\"PAD_r\"] = np.int16\n fmt_header[\"PAD_a\"] = np.int16\n fmt_header[\"US_r\"] = np.int16\n fmt_header[\"US_a\"] = np.int16\n fmt_header[\"DS_r\"] = np.int16\n fmt_header[\"DS_a\"] = np.int16\n fmt_header[\"UC_r\"] = np.int16\n fmt_header[\"UC_a\"] = np.int16\n fmt_header[\"DC_r\"] = np.int16\n fmt_header[\"DC_a\"] = np.int16\n fmt_header[\"UDC_r\"] = np.int16\n fmt_header[\"UDC_a\"] = np.int16\n fmt_header[\"UCC_r\"] = np.int16\n fmt_header[\"UCC_a\"] = np.int16\n fmt_header[\"DDC_r\"] = np.int16\n fmt_header[\"DDC_a\"] = np.int16\n fmt_header[\"DCC_r\"] = np.int16\n fmt_header[\"DCC_a\"] = np.int16\n fmt_header[\"UMO_r\"] = np.int16\n fmt_header[\"UMO_a\"] = np.int16\n fmt_header[\"DMO_r\"] = np.int16\n fmt_header[\"DMO_a\"] = np.int16\n fmt_header[\"UXO_r\"] = np.int16\n fmt_header[\"UXO_a\"] = np.int16\n fmt_header[\"DXO_r\"] = np.int16\n fmt_header[\"DXO_a\"] = np.int16\n fmt_header[\"NR_r\"] = np.int16\n fmt_header[\"NR_a\"] = np.int16\n fmt_header[\"MO_r\"] = np.int16\n fmt_header[\"MO_a\"] = np.int16\n fmt_header[\"XO_r\"] = np.int16\n fmt_header[\"XO_a\"] = np.int16\n fmt_header[\"XC_r\"] = np.int16\n fmt_header[\"XC_a\"] = np.int16\n fmt_header[\"AC_r\"] = np.int16\n fmt_header[\"AC_a\"] = np.int16\n fmt_header[\"MC_r\"] = np.int16\n fmt_header[\"MC_a\"] = np.int16\n fmt_header[\"EC_r\"] = np.int16\n fmt_header[\"EC_a\"] = np.int16\n fmt_header[\"PL_ref\"] = np.int8\n fmt_header[\"PL_het\"] = np.int8\n fmt_header[\"PL_hom\"] = np.int8\n\n ret_header = OrderedDict()\n ret_header.update(meta_header)\n ret_header.update(info_header)\n ret_header.update(fmt_header)\n return ret_header", "def inputs_netCDF(ID, fname, data):\n\n from netCDF4 import Dataset #, date2num, num2date\n from datetime import datetime\n\n print('**** creating SpaFHy input netCDF4 file: ' + fname + ' ****')\n \n # create dataset & dimensions\n ncf = Dataset(fname, 'w')\n ncf.description = 'SpatialData from : ' + str(ID)\n ncf.history = 'created ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n ncf.source = 'SpaFHy v.1.0 inputs'\n \n dlat, dlon = np.shape(data['cmask'])\n\n ncf.createDimension('dlon', int(dlon))\n ncf.createDimension('dlat', int(dlat))\n ncf.createDimension('scalar', 1)\n\n # create variables \n # call as createVariable(varname,type,(dimensions))\n cellsize = ncf.createVariable('cellsize', 'f4', ('scalar',))\n cellsize.units = 'm'\n lat = ncf.createVariable('lat', 'f4', ('dlat',))\n lat.units = 'ETRS-TM35FIN'\n lon = ncf.createVariable('lon', 'f4', ('dlon',))\n lon.units = 'ETRS-TM35FIN'\n\n cellsize[0] = data['cellsize']\n lon[:] = data['lon0']\n lat[:] = data['lat0']\n \n # required inputs\n cmask = ncf.createVariable('cmask', 'i4', ('dlat','dlon',))\n cmask.units = 'integer inside catchment, Nan outside'\n LAI_conif = ncf.createVariable('LAI_conif', 'f4', ('dlat','dlon',))\n LAI_conif.units = 'conifer LAI (m2m-2)'\n LAI_decid = ncf.createVariable('LAI_decid', 'f4', ('dlat','dlon',))\n LAI_decid.units = 'deciduous annual max LAI (m2m-2)' \n hc = ncf.createVariable('hc', 'f4', ('dlat','dlon',))\n hc.units = 'canopy height m' \n cf = ncf.createVariable('cf', 'f4', ('dlat','dlon',))\n cf.units = 'canopy closure (-)' \n \n soilclass = ncf.createVariable('soilclass', 'i4', ('dlat','dlon',))\n soilclass.units = 'soil class (1 - 5)'\n \n flowacc = ncf.createVariable('flowacc', 'f4', ('dlat','dlon',))\n flowacc.units = 'flow accumualtion area m2'\n slope = ncf.createVariable('slope', 'f4', ('dlat','dlon',))\n slope.units = 'local slope (deg)' \n \n for k in ['LAI_conif', 'LAI_decid', 'hc', 'cf', 'soilclass', 'flowacc', 'slope']:\n ncf[k][:,:] = data[k]\n \n print('**** done ****')", "def prep(self, deleteraw=False):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n if deleteraw:\n del self.rawdata\n del self.flags\n\n self.freq = self.freq_orig[self.chans]\n\n # set up ur tracks (lol)\n self.dmtrack0 = {}\n self.twidths = {}\n self.delay = {}\n for dmbin in xrange(len(self.dmarr)):\n self.dmtrack0[dmbin] = self.dmtrack(self.dmarr[dmbin],0) # track crosses high-freq channel in first integration\n (trackt, trackc) = self.dmtrack0[dmbin]\n if len(trackc)<len(self.chans):\n print 'Computed track for DM=%.1f is too long for the observation; only %d channels are computed' % (self.dmarr[dmbin],len(trackc))\n continue\n \n# old way\n# self.twidths[dmbin] = [len(n.where(trackc == (chan-self.chans[0]))[0]) for chan in self.chans] # width of track for each unflagged channel\n# self.delay[dmbin] = [n.int(trackt[n.where(trackc == (chan-self.chans[0]))[0][0]]) for chan in self.chans] # integration delay for each unflagged channel of a given dm.\n# new way\n\n self.twidths[dmbin] = [len(n.where(n.array(trackc) == chan)[0]) for chan in range(len(self.chans))] # width of track for each unflagged channel\n self.delay[dmbin] = [n.int(trackt[n.where(n.array(trackc) == chan)[0][0]]) for chan in range(len(self.chans))] # integration delay for each unflagged channel of a given dm.\n\n\n print 'Track width in time: '\n for dmbin in self.twidths:\n print 'DM=%.1f, max(twidth)=%d. Iteration could step by %d/2.' % (self.dmarr[dmbin], max(self.twidths[dmbin]), max(self.twidths[dmbin]))", "def __init__(self, path, coordkeys = \"time time_bounds TFLAG ETFLAG latitude latitude_bounds longitude longitude_bounds lat lat_bnds lon lon_bnds etam_pressure etai_pressure layer_bounds layer47 layer\".split(), delimiter = ',', names = True, **kwds):\n kwds['names'] = names\n kwds['delimiter'] = delimiter\n data = np.recfromtxt(path, **kwds)\n dimkeys = [dk for dk in data.dtype.names if dk in coordkeys]\n varkeys = [vk for vk in data.dtype.names if not vk in coordkeys]\n for dk in dimkeys:\n dv = np.unique(data[dk])\n dv.sort()\n self.createDimension(dk, len(dv))\n dvar = self.createVariable(dk, dv.dtype.char, (dk,))\n dvar[:] = dv\n \n for vk in varkeys:\n vv = data[vk]\n var = self.createVariable(vk, vv.dtype.char, tuple(dimkeys))\n for idx in np.ndindex(var.shape):\n thisidx = np.sum([data[dk] == self.variables[dk][di] for di, dk in zip(idx, dimkeys)], axis = 0) == len(dimkeys)\n if thisidx.any():\n var[idx] = vv[thisidx]", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def _unpack_drex_rdata(self, data, ngr):\n\n self.g_ol = np.copy(data[0:3*3*ngr].reshape((3,3,ngr)).T)\n self.g_en = np.copy(data[3*3*ngr:2*3*3*ngr].reshape((3,3,ngr)).T)\n self.volfrac_ol = np.copy(data[2*3*3*ngr:2*3*3*ngr+ngr])\n self.volfrac_en = np.copy(data[2*3*3*ngr+ngr:2*3*3*ngr+2*ngr])\n self.fraction_olivine = np.copy(data[3*3*ngr*2+ngr*4+10])", "def pydaptonetCDF4(dataset):\n import pydap.model\n assert isinstance(dataset, pydap.model.DatasetType)\n # in pydap the dataset itself is a dict, in netCDF4 it has a variables dict\n # let's add the variables as well\n dataset.variables = {}\n for variable in dataset.keys():\n if isinstance(dataset[variable], pydap.model.GridType):\n # the pydap client returns grids for arrays with coordinates.\n # \n dataset.variables[variable] = dataset[variable][variable]\n dataset.variables[variable].attributes.update(dataset[variable].attributes)\n else:\n dataset.variables[variable] = dataset[variable]\n for key, value in dataset.attributes['NC_GLOBAL'].items():\n if key not in dataset:\n # I think the __setitem__ might be overwritten, so we'll do it like this\n setattr(dataset, key, value)\n else:\n warnings.warn('Could not set %s to %s because it already exists as a variable' % (key, value))\n return dataset", "def write_to_netCDF(nc_filename, data,\n ncformat='NETCDF4_CLASSIC',\n all_variables=False,\n verbose=True):\n ncfile = Dataset(nc_filename,'w', format=ncformat, clobber=True)\n for dd,dim in enumerate(data['dims']):\n ncfile.createDimension(data['dimname'][dd],dim)\n for vv,varname in enumerate(data['varn']):\n if all_variables:\n newvar = ncfile.createVariable(varname,\n data['vardtype'][vv],\n data['vardims'][vv])\n newvar[:] = data['data'][vv]\n newvar.units = data['units'][vv]\n else:\n if varname in core_variables:\n newvar = ncfile.createVariable(varname,\n data['vardtype'][vv],\n data['vardims'][vv],\n fill_value=data['fillValue'])\n newvar[:] = data['data'][vv]\n if verbose:\n print(varname)\n print(newvar[newvar == np.nan])\n newvar[newvar == np.nan] = data['fillValue']\n newvar.units = data['units'][vv]\n ncfile.createDimension('nchars',19)\n newvar[:] = data['time']\n ncfile.description = data['description']\n ncfile.station = data['station']\n ncfile.sensor = data['sensor']\n ncfile.latitude = data['latitude']\n ncfile.longitude = data['longitude']\n ncfile.altitude = data['altitude']\n ncfile.createdon = datetime.now().strftime(standard_datetime_fmt)\n ncfile.createdby = data['author']\n ncfile.close()", "def test_netCDF_variable_dimension(self):\n f = cfdm.Field()\n\n f.nc_set_variable(\"qwerty\")\n self.assertTrue(f.nc_has_variable())\n self.assertEqual(f.nc_get_variable(), \"qwerty\")\n self.assertEqual(f.nc_get_variable(default=None), \"qwerty\")\n self.assertEqual(f.nc_del_variable(), \"qwerty\")\n self.assertFalse(f.nc_has_variable())\n self.assertIsNone(f.nc_get_variable(default=None))\n self.assertIsNone(f.nc_del_variable(default=None))\n\n f.nc_set_variable(\"/ncvar\")\n self.assertEqual(f.nc_get_variable(), \"ncvar\")\n\n f.nc_set_variable(\"/ncvar/qwerty\")\n self.assertEqual(f.nc_get_variable(), \"/ncvar/qwerty\")\n\n for nc_var_name in self.nc_grouped_variable_names:\n with self.assertRaises(ValueError):\n f.nc_set_variable(nc_var_name)\n\n d = cfdm.DomainAxis()\n\n d.nc_set_dimension(\"qwerty\")\n self.assertTrue(d.nc_has_dimension())\n self.assertEqual(d.nc_get_dimension(), \"qwerty\")\n self.assertEqual(d.nc_get_dimension(default=None), \"qwerty\")\n self.assertEqual(d.nc_del_dimension(), \"qwerty\")\n self.assertFalse(d.nc_has_dimension())\n self.assertIsNone(d.nc_get_dimension(default=None))\n self.assertIsNone(d.nc_del_dimension(default=None))\n\n d.nc_set_dimension(\"/ncdim\")\n self.assertEqual(d.nc_get_dimension(), \"ncdim\")\n\n d.nc_set_dimension(\"/ncdim/qwerty\")\n self.assertEqual(d.nc_get_dimension(), \"/ncdim/qwerty\")\n\n for nc_dim_name in self.nc_grouped_dimension_names:\n with self.assertRaises(ValueError):\n d.nc_set_dimension(nc_dim_name)\n\n d = cfdm.Count()\n\n d.nc_set_sample_dimension(\"qwerty\")\n self.assertTrue(d.nc_has_sample_dimension())\n self.assertEqual(d.nc_get_sample_dimension(), \"qwerty\")\n self.assertEqual(d.nc_get_sample_dimension(default=None), \"qwerty\")\n self.assertEqual(d.nc_del_sample_dimension(), \"qwerty\")\n self.assertFalse(d.nc_has_sample_dimension())\n self.assertIsNone(d.nc_get_sample_dimension(default=None))\n self.assertIsNone(d.nc_del_sample_dimension(default=None))\n\n d.nc_set_sample_dimension(\"/ncdim\")\n self.assertEqual(d.nc_get_sample_dimension(), \"ncdim\")\n\n d.nc_set_sample_dimension(\"/ncdim/qwerty\")\n self.assertEqual(d.nc_get_sample_dimension(), \"/ncdim/qwerty\")\n\n for nc_dim_name in self.nc_grouped_dimension_names:\n with self.assertRaises(ValueError):\n d.nc_set_sample_dimension(nc_dim_name)\n\n # ------------------------------------------------------------\n # Global attributes\n # ------------------------------------------------------------\n # values keyword\n f = cfdm.Field()\n\n f.nc_set_global_attribute(\"Conventions\", \"CF-1.8\")\n f.nc_set_global_attribute(\"project\")\n f.nc_set_global_attribute(\"foo\")\n f.set_property(\"Conventions\", \"Y\")\n f.set_property(\"project\", \"X\")\n self.assertEqual(\n f.nc_global_attributes(values=True),\n {\"Conventions\": \"CF-1.8\", \"project\": \"X\", \"foo\": None},\n )\n\n f = cfdm.Field()\n self.assertEqual(f.nc_clear_global_attributes(), {})\n\n f.nc_set_global_attribute(\"Conventions\")\n f.nc_set_global_attribute(\"project\", \"X\")\n self.assertEqual(\n f.nc_global_attributes(), {\"Conventions\": None, \"project\": \"X\"}\n )\n\n f.nc_set_global_attribute(\"project\")\n f.nc_set_global_attribute(\"comment\", None)\n self.assertEqual(\n f.nc_global_attributes(),\n {\"Conventions\": None, \"project\": None, \"comment\": None},\n )\n\n self.assertEqual(\n f.nc_clear_global_attributes(),\n {\"Conventions\": None, \"project\": None, \"comment\": None},\n )\n self.assertEqual(f.nc_global_attributes(), {})\n\n f.nc_set_global_attribute(\"Conventions\")\n f.nc_set_global_attribute(\"project\")\n self.assertEqual(\n f.nc_global_attributes(), {\"Conventions\": None, \"project\": None}\n )\n\n _ = f.nc_clear_global_attributes()\n f.nc_set_global_attributes({})\n self.assertEqual(f.nc_global_attributes(), {})\n\n f.nc_set_global_attributes({\"comment\": 123}, copy=False)\n self.assertEqual(f.nc_global_attributes(), {\"comment\": 123})\n\n f.nc_set_global_attributes({\"comment\": None, \"foo\": \"bar\"})\n self.assertEqual(\n f.nc_global_attributes(), {\"comment\": None, \"foo\": \"bar\"}\n )\n\n f = cfdm.Field()\n f.set_properties({\"foo\": \"bar\", \"comment\": \"variable comment\"})\n f.nc_set_variable(\"tas\")\n d = f.set_construct(cfdm.DomainAxis(2))\n f.set_data(cfdm.Data([8, 9]), axes=[d])\n\n f2 = f.copy()\n f2.nc_set_variable(\"ua\")\n\n cfdm.write(\n [f, f2],\n tempfile1,\n file_descriptors={\"comment\": \"global comment\", \"qwerty\": \"asdf\"},\n )\n\n g = cfdm.read(tempfile1)\n self.assertEqual(len(g), 2)\n\n for x in g:\n self.assertEqual(\n x.properties(),\n {\n \"comment\": \"variable comment\",\n \"foo\": \"bar\",\n \"qwerty\": \"asdf\",\n \"Conventions\": \"CF-\" + cfdm.CF(),\n },\n )\n self.assertEqual(\n x.nc_global_attributes(),\n {\n \"comment\": \"global comment\",\n \"qwerty\": None,\n \"Conventions\": None,\n },\n )\n\n cfdm.write(g, tempfile2)\n h = cfdm.read(tempfile2)\n for x, y in zip(h, g):\n self.assertEqual(x.properties(), y.properties())\n self.assertEqual(\n x.nc_global_attributes(), y.nc_global_attributes()\n )\n self.assertTrue(x.equals(y, verbose=3))\n self.assertTrue(y.equals(x, verbose=3))\n\n g[1].nc_set_global_attribute(\"comment\", \"different comment\")\n cfdm.write(g, tempfile3)\n h = cfdm.read(tempfile3)\n for x, y in zip(h, g):\n self.assertEqual(x.properties(), y.properties())\n self.assertEqual(\n x.nc_global_attributes(),\n {\"comment\": None, \"qwerty\": None, \"Conventions\": None},\n )\n self.assertTrue(x.equals(y, verbose=3))\n self.assertTrue(y.equals(x, verbose=3))", "def read_netcdf(ncFile,vars = [],coords = False, verbose = False):\n if verbose:\n print 'Reading input data vars:', vars, ', from file:',ncFile\n f = Dataset(ncFile,'r')\n if vars==[]: vars = f.variables.keys()\n d={}\n a={}\n g={}\n if coords:\n if isinstance(vars,str):\n d[vars] = f.variables[vars][coords]\n a[vars] = f.variables[vars].__dict__\n else:\n for var in vars:\n d[var] = f.variables[var][coords]\n a[var] = f.variables[var].__dict__\n else:\n if isinstance(vars,str):\n d[vars] = f.variables[vars][:]\n a[vars] = f.variables[vars].__dict__\n else:\n for var in vars:\n d[var] = f.variables[var][:]\n a[var] = f.variables[var].__dict__\n \n for attr in f.ncattrs():\n g[attr] = getattr(f,attr)\n \n f.close()\n \n return d,a,g", "def __init__(self,fn_dts,fn_contigs,wnd_size):\n\n wnd = int(fn_dts.split(\"/\")[-1].split(\"_bp\")[0])\n assert int(wnd)==wnd_size\n self.wnd_size = wnd_size\n print fn_dts\n self.wnd_DTS = DenseTrackSet(fn_contigs,\n fn_dts,\n overwrite=False,\n openMode='r')\n \n self.contigs = self.wnd_DTS.mContigNameLen\n self.starts=self.wnd_DTS[\"starts\"]\n self.ends=self.wnd_DTS[\"ends\"]\n self.cps=self.wnd_DTS[\"copy\"]", "def getheader(filename):\n # read header and convert to string\n h = np.fromfile(filename, dtype='uint8', count=512)\n header = ''\n for s in h[h > 0]:\n header += chr(s)\n # start reading at 'datatype'\n hd = header[header.lower().find('datatype'):]\n hd = hd.split(':')[0].replace(',', ' ').split()\n # Types: uint8 int16 int32 float32\n typelist = ['u1', 'i2', 'i4', 'f4']\n # extract datatype\n try:\n dtype = typelist[int(hd[0].split('=')[1]) - 1]\n except:\n print(header)\n raise IOError('getheader: datatype invalid or missing')\n # extract endianness\n try:\n if hd[-1].split('=')[0].lower() != 'endian':\n raise IndexError()\n endian = hd[-1].split('=')[1]\n except IndexError:\n print(header)\n raise IOError('getheader: endianess missing.')\n if endian.lower() == 'l':\n dtype = '<' + dtype\n else:\n dtype = '>' + dtype\n # extract dims\n try:\n if hd[2].split('=')[0].lower() != 'dims':\n raise IndexError()\n dims = int(hd[2].split('=')[1])\n if dims not in [2, 3]:\n raise ValueError('Invalid dims=%i (must be 2 or 3)' % dims)\n except IndexError:\n print(header)\n raise IOError('getheader: dims invalid or missing.')\n try:\n if hd[3].split('=')[0].lower() != 'nx':\n raise IndexError()\n nx = int(hd[3].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: nx invalid or missing.')\n try:\n if hd[4].split('=')[0].lower() != 'ny':\n raise IndexError()\n ny = int(hd[4].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: ny invalid or missing.')\n if dims == 3:\n try:\n if hd[5].split('=')[0].lower() != 'nt':\n raise IndexError()\n nt = int(hd[5].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: nt invalid or missing.')\n shape = (nx, ny, nt)\n else:\n shape = (nx, ny)\n return [shape, dtype, header]", "def read_netcdf(self,filename):", "def _parser_netcdf(self, filepath, local_attrs):\n\n fileparts = {}\n\n try:\n fileparts['variable'] = []\n fileparts['start_time'] = []\n fileparts['end_time'] = []\n fileparts['path'] = []\n\n # open file\n d = nc.Dataset(filepath, 'r')\n\n # find what the time (unlimited) dimension is\n dims = list(dict(d.dimensions).keys())\n\n # loop through all variables\n for v in d.variables:\n # add all variables that are not coordinates to the catalog\n if v not in dims:\n fileparts['variable'].append(v)\n fileparts['path'].append(filepath)\n\n if 'time' in d.variables.keys():\n times = d['time']\n fileparts['start_time'].append(times[0])\n fileparts['end_time'].append(times[-1])\n\n # add global attributes\n for g in local_attrs['global'].keys():\n if g not in fileparts.keys():\n fileparts[g] = []\n fileparts[g].append(local_attrs['global'][g])\n\n # add the keys that are common just to the particular glob string\n # fileparts.update(local_attrs[filepath])\n for lv in local_attrs[filepath].keys():\n if lv not in fileparts.keys():\n fileparts[lv] = []\n if '<<' in local_attrs[filepath][lv]:\n if hasattr(d.variables[v], lv):\n fileparts[lv].append(getattr(d.variables[v], lv))\n else:\n fileparts[lv].append('NaN')\n elif '<' in local_attrs[filepath][lv]:\n k = local_attrs[filepath][lv].replace('<', '').replace('>', '')\n if hasattr(d, k):\n fileparts[lv].append(getattr(d, k))\n else:\n fileparts[lv].append('NaN')\n else:\n fileparts[lv].append(local_attrs[filepath][lv])\n # close netcdf file\n d.close()\n except Exception:\n pass\n return fileparts", "def read_dimensions(self, path_meta, path_data):\n if path_meta.endswith('.mdd'): path_meta = path_meta.replace('.mdd', '')\n if path_data.endswith('.ddf'): path_data = path_data.replace('.ddf', '')\n self._meta, self._data = r_dimensions(path_meta+'.mdd', path_data+'.ddf')\n self._set_file_info(path_data, path_meta)\n if not self._dimensions_comp == 'ignore':\n d_comp = self._dimensions_comp\n self._meta['info']['dimensions_comp'] = d_comp\n self.set_dim_suffix()\n self.undimensionize()\n if d_comp is True: self.dimensionize()\n self._rename_blacklist_vars()\n return None", "def PseudoIOAPIVariable(parent,name,typecode,dimensions,**kwds):\n\n retval = PseudoNetCDFVariable(parent, name, typecode, dimensions, **kwds)\n\n if not 'units' in kwds:\n warn('IOAPI variables must have units; %s has been initialized with \"None\" units')\n retval.units = 'None'\n \n if not 'long_name' in kwds:\n retval.long_name = name.ljust(16)\n\n if not 'var_desc' in kwds:\n retval.var_desc = name.ljust(80)\n\n return retval", "def load_nc(file,var):\n\tf = netCDF4.Dataset(file,'r+')\n\tdara = f.variables[var][:]\n\tf.close()\n\treturn data", "def __init__(self, filepatt, tshape, itile=None, dtype='>f4', ncs=None, blankval=nan):\n self.filepatt = filepatt\n self.dtype = dtype\n self.tshape = tuple(tshape)\n self.tnx = tshape[-1]\n self.tny = tshape[-2]\n self.tsize = prod(self.tshape)\n self.glob = False\n if '*' in self.filepatt or '?' in self.filepatt:\n self.glob = True\n # value for blank tiles\n self.blankval = blankval\n if itile is not None:\n self.itile = array(itile, int)\n elif ncs is not None:\n # standard cube sphere layout from ncs, tshape\n nface = 6\n ntx = ncs/self.tnx\n nty = ncs/self.tny\n self.itile = zeros((nty, nface*ntx), int)\n for yt in range(nty):\n for iface in range(nface):\n for xt in range(ntx):\n self.itile[yt,iface*ntx+xt] = 1+(iface*nty+yt)*ntx+xt\n else:\n print 'error: tiledfile: need either itile or ncs'\n return\n\n self.nty, self.ntx = self.itile.shape\n self.ntile = self.nty*self.ntx\n self.nx = self.ntx*self.tnx\n self.ny = self.nty*self.tny\n self.shape = self.tshape[:-2] + (self.ny,self.nx)\n self.tiles = [None for i in range(self.ntile)]", "def write_flat_netcdf(outFile,time,frac,uh,x,y,xc,yc,inGlobs,inAttrs):\n f = Dataset(outFile, 'w', format='NETCDF4')\n\n # set dimensions\n times = f.createDimension('time', len(time))\n npoints = f.createDimension('npoints', len(frac))\n \n # initialize variables\n times = f.createVariable('time','f8',('time',))\n fracs = f.createVariable('fraction','f8',('npoints',))\n xis = f.createVariable('xi','i4',('npoints',))\n yis = f.createVariable('yi','i4',('npoints',))\n xcs = f.createVariable('xc','f8',('npoints',))\n ycs = f.createVariable('yc','f8',('npoints',))\n uhs = f.createVariable('unit_hydrograph','f8',('time','npoints',))\n \n # deal with attributes\n f.description = 'Flattened uh/fraction grid file'\n f.history = 'Created ' + tm.ctime(tm.time())\n f.velocity = inGlobs['velocity']\n f.diffusion = inGlobs['diffusion']\n f.outlet_lon = inGlobs['outlet_lon']\n f.outlet_lat = inGlobs['outlet_lat']\n f.outlet_y = inGlobs['outlet_y']\n f.outlet_x = inGlobs['outlet_x']\n try:\n f.includes = inGlobs['includes']\n except:\n pass\n \n times.standard_name = inAttrs['time']['standard_name']\n times.units = inAttrs['time']['units']\n times.calendar = inAttrs['time']['calendar']\n \n try:\n fracs.units = inAttrs['fraction']['units']\n except:\n fracs.units = '%'\n fracs.description = inAttrs['fraction']['description']\n \n uhs.units = inAttrs['unit_hydrograph']['units']\n uhs.description = inAttrs['unit_hydrograph']['description']\n \n xis.standard_name = 'x_ind'\n xis.description = 'x index location'\n \n yis.standard_name = 'y_ind'\n yis.description = 'y index location'\n \n xcs.standard_name =inAttrs['xc']['standard_name']\n xcs.long_name = inAttrs['xc']['long_name']\n xcs.units =inAttrs['xc']['units']\n \n ycs.standard_name =inAttrs['yc']['standard_name']\n ycs.long_name = inAttrs['yc']['long_name']\n ycs.units =inAttrs['yc']['units']\n \n times[:] = time\n fracs[:] = frac\n uhs[:,:] = uh\n xis[:] = x\n yis[:] = y\n xcs[:] = xc\n ycs[:] = yc\n\n f.close()\n \n return", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def analyze_pressure_dump(filename, Lx=200., Ly=200, Lz=900., N=10, bin_divide_flag=False, Natoms=113579):\n myfile = open(filename+'.txt')\n trajectory = []\n traj_pd = []\n frames = []\n\n for _ in range(3):\n next(myfile)\n count = 0\n while EOF(myfile):\n count += 1\n s = next(myfile) # info with the time step\n\n x = np.zeros(N, dtype=[('Chunk',np.float32), ('Coord1',np.float32), ('Ncount',np.float32), ('density',np.float32), ('temp',np.float32), ('vx',np.float32), ('fx',np.float32),('c_pciKE[1]',np.float32), ('c_pciKE[2]',np.float32), ('c_pciKE[3]',np.float32), ('c_pciVIR[1]',np.float32), ('c_pciVIR[2]',np.float32), ('c_pciVIR[3]',np.float32), ('c_pgelELAS[1]',np.float32), ('c_pgelELAS[2]',np.float32), ('c_pgelELAS[3]',np.float32), ('c_pgelVIR[1]', np.float32), ('c_pgelVIR[2]', np.float32), ('c_pgelVIR[3]', np.float32), ('c_pgelPAIR[1]', np.float32), ('c_pgelPAIR[2]', np.float32), ('c_pgelPAIR[3]', np.float32)])\n\n# Chunk Coord1 Ncount density/number temp vx fx c_pciKE[1] c_pciKE[2] c_pciKE[3] c_pciVIR[1] c_pciVIR[2] c_pciVIR[3] c_pgelELAS[1] c_pgelELAS[2] c_pgelELAS[3] c_pgelVIR[1] c_pgelVIR[2] c_pgelVIR[3] c_pgelPAIR[1] c_pgelPAIR[2] c_pgelPAIR[3]\n\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n frame, _, _ = list_line\n frames.append(int(frame))\n # print( \"reading lines\")\n\n for i in xrange(N):\n count += 1\n s = next(myfile)\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n # print( \"reading line\", i, list_line)\n for il, l in enumerate(list_line):\n x[i][il] = float(l)\n\n trajectory.append(x)\n\n # names = x.dtype.fields.keys()\n # data = x.dtype.fields.values()\n\n df = pd.DataFrame.from_records(x)\n traj_pd.append(df)\n\n myfile.close()\n\n\n\n # # volume = 218.*44.*44.\n volume = Lx*Ly*Lz\n # N_atoms = 113579\n # if bin_divide_flag:\n # bin_volume = volume / float(N)\n # else:\n # bin_volume = 1.\n\n bin_volume = volume / float(N)\n # bin_volume = volume\n # bin_volume /= float(Natoms)\n\n Combine_PD = pd.concat(traj_pd)\n FINAL_PD = pd.DataFrame()\n\n FINAL_PD['Coord1'] = Combine_PD['Coord1']\n FINAL_PD['p_ciKE'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciKE[1]'] + Combine_PD['c_pciKE[2]'] + Combine_PD['c_pciKE[3]'])/(3.*bin_volume)\n FINAL_PD['p_ciVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciVIR[1]'] + Combine_PD['c_pciVIR[2]'] + Combine_PD['c_pciVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelELAS'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelELAS[1]'] + Combine_PD['c_pgelELAS[2]'] + Combine_PD['c_pgelELAS[3]'])/(3.*bin_volume)\n\n FINAL_PD['p_gelVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelVIR[1]'] + Combine_PD['c_pgelVIR[2]'] + Combine_PD['c_pgelVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelPAIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelPAIR[1]'] + Combine_PD['c_pgelPAIR[2]'] + Combine_PD['c_pgelPAIR[3]'])/(3.*bin_volume)\n\n # So now I have to\n # P_bin = (sigma_per_atom_xx + ... + sigma_per_atom_zz)/(bin_volume*3)\n # *N_atoms_per_bin\n # N_atoms_per_bin = number_density*N_atoms\n\n\n df_concat = FINAL_PD\n\n by_row_index = df_concat.groupby(df_concat.index)\n df_means = by_row_index.mean()\n by_row_index_2 = df_concat.groupby(df_concat.index)\n df_stds = by_row_index_2.std()\n\n # print( df_means.head())\n # print( df_stds.head())\n return df_means, df_stds", "def collapse_var(nc, out, name, direction):\n var1 = nc.variables[name]\n N = (len(nc.dimensions[direction]) - 1) / 2\n\n print(\"Processing %s...\" % name)\n dims = var1.dimensions\n if len(dims) > 1: # only collapse spatial fields\n dims = [x for x in dims if x != direction]\n\n try:\n fill_value = var1._FillValue\n var2 = out.createVariable(name, var1.dtype,\n dimensions=dims, fill_value=fill_value)\n except:\n var2 = out.createVariable(name, var1.dtype,\n dimensions=dims)\n\n copy_attributes(var1, var2)\n\n if direction == 'x':\n var2[:] = var1[get_slice(var1.dimensions, x=N)]\n elif direction == 'y':\n var2[:] = var1[get_slice(var1.dimensions, y=N)]", "def createNCDF(self):\n\n rootgrp = Dataset(self.filename_out, 'w', format=self.format, clobber=True)\n\n # Create dimensions.\n if 'dimensions' in self.input_dict:\n for k, v in self.input_dict['dimensions'].items():\n rootgrp.createDimension(k, v)\n else:\n if not self.Quiet:\n print('No netCDF created:')\n print(' No dimension key found (!! has to be \\\"dimensions\\\"!!!)')\n return()\n\n # Create global attributes.\n if 'global attributes' in self.input_dict:\n for k, v in self.input_dict['global attributes'].items():\n rootgrp.setncattr(k, v)\n else:\n if not self.Quiet:\n print(' No global attribute key found (!! has to be \\\"global attributes\\\"!!!)')\n\n # Create variables.\n for k, v in self.input_dict['variables'].items():\n dims = self.input_dict['variables'][k]['dimensions']\n data = v['data']\n # Create correct data type if provided\n if 'data_type' in self.input_dict['variables'][k]:\n data_type = self.input_dict['variables'][k]['data_type']\n else:\n data_type = 'f4'\n # Check whether we've been given a fill value.\n if 'fill_value' in self.input_dict['variables'][k]:\n fill_value = self.input_dict['variables'][k]['fill_value']\n else:\n fill_value = None\n # Create ncdf variable\n if not self.Quiet:\n print(' Creating variable: {} {} {}'.format(k, data_type, dims))\n var = rootgrp.createVariable(k, data_type, dims, fill_value=fill_value)\n if len(dims) > np.ndim(data):\n # If number of dimensions given to netCDF is greater than the\n # number of dimension of the data, then fill the netCDF\n # variable accordingly.\n if 'time' in dims:\n # Check for presence of time dimension (which can be\n # unlimited variable: defined by None).\n try:\n var[:] = data\n except IndexError:\n raise(IndexError(('Supplied data shape {} does not match the specified'\n ' dimensions {}, for variable \\'{}\\'.'.format(data.shape, var.shape, k))))\n else:\n if not self.Quiet:\n print('Problem in the number of dimensions')\n else:\n try:\n var[:] = data\n except IndexError:\n raise(IndexError(('Supplied data shape {} does not match the specified'\n ' dimensions {}, for variable \\'{}\\'.'.format(data.shape, var.shape, k))))\n\n # Create attributes for variables\n if 'attributes' in self.input_dict['variables'][k]:\n for ka, va in self.input_dict['variables'][k]['attributes'].items():\n var.setncattr(ka, va)\n\n rootgrp.close()", "def __init__(self, filepatt, tshape=None, itile=None, dtype='>f4', ncs=None, blankval=nan, its=None, cache=False,rot=None):\n self.filepatt = filepatt\n self.dtype = dtype\n if tshape is not None:\n self.tshape = tuple(tshape)\n else:\n metapatt = re.sub(r'\\.data$', '.meta', filepatt)\n if re.search(r'%010d', metapatt):\n metapatt = re.sub(r'%010d', '%010d'%its[0], metpatt)\n metaglob = re.sub(r'%[0-9]*d', '*', metapatt)\n metafiles = glob(metaglob)\n dims,i1s,i2s = readmeta(metafiles[0])\n self.tshape = dims\n\n self.tnx = tshape[-1]\n self.tny = tshape[-2]\n self.tsize = prod(self.tshape)\n self.glob = False\n self.rot = rot\n if '*' in self.filepatt or '?' in self.filepatt:\n self.glob = True\n # value for blank tiles\n self.blankval = blankval\n self.its = its\n if itile is not None:\n self.itile = array(itile, int)\n elif ncs is not None:\n # standard cube sphere layout from ncs, tshape\n nface = 6\n ntx = ncs/self.tnx\n nty = ncs/self.tny\n self.itile = zeros((nty, nface*ntx), int)\n for yt in range(nty):\n for iface in range(nface):\n for xt in range(ntx):\n self.itile[yt,iface*ntx+xt] = 1+(iface*nty+yt)*ntx+xt\n else:\n print 'error: tiledfiles: need either itile or ncs'\n return\n\n self.nty, self.ntx = self.itile.shape\n self.ntile = self.nty*self.ntx\n self.nx = self.ntx*self.tnx\n self.ny = self.nty*self.tny\n self.cache = cache\n if its: # and cache:\n self.nt = len(its)\n else:\n self.nt = 1\n self.shape = (len(its),) + self.tshape[:-2] + (self.ny,self.nx)\n self.tiles = [[None for i in range(self.ntile)] for it in range(self.nt)]\n # current it (if not chache)\n self.it = None", "def _unpack(chid, data, count=None, ftype=None, as_numpy=True):\n\n def array_cast(data, count, ntype, use_numpy):\n \"cast ctypes array to numpy array (if using numpy)\"\n if use_numpy:\n dtype = dbr.NP_Map.get(ntype, None)\n if dtype is not None:\n out = numpy.empty(shape=(count,), dtype=dbr.NP_Map[ntype])\n ctypes.memmove(out.ctypes.data, data, out.nbytes)\n else:\n out = numpy.ctypeslib.as_array(copy(data))\n else:\n out = copy(data)\n return out\n\n def unpack_simple(data, count, ntype, use_numpy):\n \"simple, native data type\"\n if count == 1 and ntype != dbr.STRING:\n return data[0]\n if ntype == dbr.STRING:\n out = []\n for elem in range(min(count, len(data))):\n this = strjoin('', data[elem]).rstrip()\n if NULLCHAR_2 in this:\n this = this[:this.index(NULLCHAR_2)]\n out.append(this)\n if len(out) == 1:\n out = out[0]\n return out\n if count > 1:\n data = array_cast(data, count, ntype, use_numpy)\n return data\n\n def unpack_ctrltime(data, count, ntype, use_numpy):\n \"ctrl and time data types\"\n if count == 1 or ntype == dbr.STRING:\n data = data[0].value\n if ntype == dbr.STRING and NULLCHAR in data:\n data = data[:data.index(NULLCHAR)]\n return data\n # fix for CTRL / TIME array data:Thanks to Glen Wright !\n data = (count*dbr.Map[ntype]).from_address(ctypes.addressof(data) +\n dbr.value_offset[ftype])\n if count > 1:\n data = array_cast(data, count, ntype, use_numpy)\n return data\n\n unpack = unpack_simple\n if ftype >= dbr.TIME_STRING:\n unpack = unpack_ctrltime\n\n if count is None and chid is not None:\n count = element_count(chid)\n if count is None:\n count = 1\n\n if ftype is None and chid is not None:\n ftype = field_type(chid)\n if ftype is None:\n ftype = dbr.INT\n ntype = native_type(ftype)\n use_numpy = (HAS_NUMPY and as_numpy and ntype != dbr.STRING and count > 1)\n return unpack(data, count, ntype, use_numpy)", "def _squeeze_dims(ds):\n ds = ds.squeeze()\n for dim in ['lon', 'lat', 'bnds', 'depth', 'depth_2', 'depth_3']:\n if dim in ds:\n if ds[dim].size <= 1:\n del ds[dim]\n drop = []\n for dim in [\n 'hyai', 'hybi', 'hyam', 'hybm', 'time_bnds', 'lat_bnds', 'lon_bnds'\n ]:\n if dim in ds:\n drop.append(dim)\n ds = ds.drop(drop)\n return ds.squeeze()", "def __init__(self):\n\n self.mx = self.my = self.mz = 0\n self.mvar = 0\n self.maux = 0\n self.mglobal = 0\n\n self.precision = 'S'\n self.nghostx = self.nghosty = self.nghostz = 0\n\n self.nprocx = self.nprocy = self.nprocz = 0\n self.iprocz_slowest = 0\n self.ipx = self.ipy = self.ipz = 0\n\n # Add derived quantities to the dim object.\n self.nx = self.ny = self.nz = 0\n self.mw = 0\n self.l1 = self.l2 = 0\n self.m1 = self.m2 = 0\n self.n1 = self.n2 = 0\n\n self.nxgrid = self.nygrid = self.nzgrid = 0\n self.mxgrid = self.mygrid = self.mzgrid = 0", "def prep(self):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n self.freq = self.freq_orig[self.chans]\n\n # set up ur tracks (lol)\n self.dmtrack0 = {}\n self.twidths = {}\n for dmbin in xrange(len(self.dmarr)):\n self.dmtrack0[dmbin] = self.dmtrack(self.dmarr[dmbin],0) # track crosses high-freq channel in first integration\n self.twidths[dmbin] = 0\n for k in self.dmtrack0[dmbin][1]:\n self.twidths[dmbin] = max(self.twidths[dmbin], len(n.where(n.array(self.dmtrack0[dmbin][1]) == k)[0]))\n\n print 'Track width in time: '\n for dmbin in self.twidths:\n print 'DM=%.1f, twidth=%d. Iteration could step by %d/2.' % (self.dmarr[dmbin], self.twidths[dmbin], self.twidths[dmbin])", "def write_netcdf(ncinfo):\r\n\t# ========== Create new netcdf ==========\r\n\tNAME=nc.netcdf_file(ncinfo.fname,'w')\r\n\t\r\n\t# ========== Set up the Dimensions ==========\r\n\tNAME.createDimension('time', None) #Question: Shouldn't time be unlimited?\r\n\t# NAME.createDimension('lev',11)\r\n\tNAME.createDimension('lat',ncinfo.lat)\r\n\tNAME.createDimension('lon',ncinfo.lon)\r\n\t\r\n\t# ========== Setup the Variables ==========\r\n\ttime=NAME.createVariable('time',np.float64,('time',))\r\n\t# lev=NAME.createVariable('lev',np.int32,('lev',))\r\n\tlat=NAME.createVariable('lat',np.float64,('lat',))\r\n\tlon=NAME.createVariable('lon',np.float64,('lon',))\r\n\t# VAR=NAME.createVariable(str(VAR),np.float64,('time','lev','lat','lon'),)\r\n\tVAR=NAME.createVariable(ncinfo.var_name,np.float64,('time','lat','lon'),)\r\n\t# setting the missing value is super important for the file to be cdo readable\r\n\tsetattr(VAR,'missing_value',ncinfo.fill)\r\n\tsetattr(VAR, 'standard_name', ncinfo.var_lname) \r\n\t\r\n\t# ========== Set the units ==========\r\n\ttime.units= 'day as %Y%m%d'\r\n\t# lev.units = '-'\r\n\tlat.units = 'degrees_north'\r\n\tlon.units = 'degrees_east'\r\n\tVAR.units = ncinfo.units\r\n\r\n\t# ========== Add data ==========\r\n\t\r\n\t# creates time vector using the date_range function\r\n\t# time[:]=[t for t in date_range('20110101.5','20111231.5')] \r\n\t# lev[:]=PFT_vector\r\n\tlat[:] = ncinfo.latitudes\r\n\tlon[:] = ncinfo.longitudes\r\n\t# THis is a Bodge for singe variable data\r\n\tVAR[:] = ncinfo.data\r\n\r\n\t#Add global attributes\r\n\tNAME.description = ncinfo.description\r\n\tNAME.history = ncinfo.history\r\n\r\n\t# WHATS MISSING\r\n\t# metadata a whole bunch of metadata\r\n\t# the standard_name and long_name of the variables\r\n\r\n\t# ========== Close the netcdf ==========\r\n\tNAME.close()", "def load_2D_netCDF(filename, var_name, lat_name, lon_name):\n data = Dataset(filename, 'r')\n var = data[var_name][:]\n lats = data[lat_name][:]\n lons = data[lon_name][:]\n data.close()\n return var, lats, lons", "def read_traj(ncfiles,indkeep=0):\n\n data = nc.Dataset(ncfiles)\n \n xyz = data.variables['coordinates']\n \n xyzn = Quantity(xyz[indkeep:-1], angstroms) \n \n lens = data.variables['cell_lengths']\n lensn = Quantity(lens[indkeep:-1], angstroms)\n\n angs = data.variables['cell_angles']\n angsn = Quantity(angs[indkeep:-1], degrees)\n\n return data, xyzn, lensn, angsn", "def write_netcdf(file,xc,xc_bnd,yc,yc_bnd,times,hydrographs,fractions,loc,Flist,velocity,diffusion,NODATA,verbose):\n \n f = Dataset(file,'w', format='NETCDF4')\n\n # set dimensions\n time = f.createDimension('time', None)\n x = f.createDimension('x',xc.shape[1])\n y = f.createDimension('y',xc.shape[0])\n nv4 = f.createDimension('nv4',4)\n\n # initialize variables\n time = f.createVariable('time','f8',('time',))\n xcs = f.createVariable('xc','f8',('y','x',))\n ycs = f.createVariable('yc','f8',('y','x',))\n xc_bnds = f.createVariable('xc_bnds','f8',('y','x','nv4',))\n yc_bnds = f.createVariable('yc_bnds','f8',('y','x','nv4',))\n fraction = f.createVariable('fraction','f8',('y','x',),fill_value=NODATA)\n UHS = f.createVariable('unit_hydrograph','f8',('time','y','x',),fill_value=NODATA)\n\n # write attributes for netcdf\n f.description = 'Aggregated UH_S and Fraction Vars for full RASM domain'\n f.history = 'Created: {}\\n'.format(tm.ctime(tm.time()))\n f.history += ' '.join(sys.argv) + '\\n'\n f.source = sys.argv[0] # prints the name of script used\n f.velocity = velocity\n f.diffusion = diffusion\n f.outlet_lon = loc[0]\n f.outlet_lat = loc[1]\n f.includes = str(len(Flist))+' files'\n\n ycs.long_name = 'latitude of grid cell center'\n ycs.standard_name = 'latitude'\n ycs.units = 'degrees_north'\n ycs._CoordinateAxisType = 'Lat'\n ycs.bounds = 'yc_bnds'\n\n xcs.long_name = 'longitude of grid cell center'\n xcs.standard_name = 'longitude'\n xcs.units = 'degrees_east'\n xcs._CoordinateAxisType = 'Lon'\n xcs.bounds = 'xc_bnds'\n\n time.standard_name = 'time'\n time.units = 'seconds'\n time.description = 'Seconds since initial impulse'\n time.calendar = 'proleptic_gregorian'\n\n UHS.units = 'unitless'\n UHS.description = 'unit hydrograph for each grid cell with respect to basin outlet location'\n \n fraction.units = 'unitless'\n fraction.description = 'fraction of grid cell contributing to guage location'\n\n # write data to variables initialized above\n time[:]= times\n xcs[:,:] = xc\n ycs[:,:] = yc\n xc_bnds[:,:,:] = xc_bnd\n yc_bnds[:,:,:] = yc_bnd\n UHS[:,:,:] = hydrographs\n fraction[:,:]= fractions\n f.close()\n\n return", "def date_info_day(date_str, infile):\n #date_str = str(sys.argv[1])\n #infile = './' + date_str + '.nc'\n\n # prepare date\n year,mon,day = date_str.split('-')\n year_num = int(float(year))\n mon_num = int(float(mon))\n day_num = int(float(day))\n\n\n datesec_calc = []\n val_pr_day = 4\n secstep = 86400/val_pr_day\n sec = [0, 1*secstep, 2*secstep, 3*secstep]\n for j in sec:\n datesec_calc.append(j)\n\n # Open a netCDF file for appending:\n ncfile = Dataset(infile,'a')\n #time_in = ncfile.variables['time'][:]\n #ncfile = Dataset('date_datesec' + date + '.nc','w')\n\n # Create the variable (4 byte integer in this case)\n # first argument is name of variable, second is datatype, third is\n # a tuple with the names of dimensions.\n date_str = ncfile.createVariable('date',dtype('int32').char,('time'))\n datesec = ncfile.createVariable('datesec',dtype('int32').char,('time'))\n\n # Write data to variable:\n date_str[:] = year_num*10000+mon_num*100+day_num\n datesec[:] = datesec_calc\n\n # Add attributes to the variables:\n date_str.long_name = 'current date (YYYYMMDD)'\n datesec.long_name = 'current seconds of current date'\n\n # close the file.\n ncfile.close()\n return", "def test_netCDF_to_memory(self):\n f = cfdm.example_field(4)\n f.data.to_memory() # on non-compressed array\n f.compress(\"indexed_contiguous\", inplace=True)\n f.data.to_memory() # on compressed array", "def test_data_handling_nc_cc():\n\n input_files = [os.path.join(TEST_DATA_DIR, 'ua_test.nc'),\n os.path.join(TEST_DATA_DIR, 'va_test.nc'),\n os.path.join(TEST_DATA_DIR, 'orog_test.nc'),\n os.path.join(TEST_DATA_DIR, 'zg_test.nc')]\n\n with xr.open_mfdataset(input_files) as fh:\n min_lat = np.min(fh.lat.values)\n min_lon = np.min(fh.lon.values)\n target = (min_lat, min_lon)\n plevel = fh.plev[-1]\n ua = np.transpose(fh['ua'][:, -1, ...].values, (1, 2, 0))\n va = np.transpose(fh['va'][:, -1, ...].values, (1, 2, 0))\n\n handler = DataHandlerNCforCC(input_files, features=['U_100m', 'V_100m'],\n target=target, shape=(20, 20),\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n\n assert handler.data.shape == (20, 20, 20, 2)\n\n handler = DataHandlerNCforCC(input_files,\n features=[f'U_{int(plevel)}pa',\n f'V_{int(plevel)}pa'],\n target=target, shape=(20, 20),\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n if handler.invert_lat:\n handler.data = handler.data[::-1]\n assert handler.data.shape == (20, 20, 20, 2)\n assert np.allclose(ua, handler.data[..., 0])\n assert np.allclose(va, handler.data[..., 1])", "def writeNetCDFData(out_nc, hrus, dr_time, hru_type, remapped_data, var_meta, var_attrs, var_encodings, remap_idx):\n\n dataset = xr.Dataset()\n\n for varname, meta in var_meta.items():\n foo = xr.DataArray(remapped_data[varname][:, remap_idx],\n dims=['time', 'basinID'],\n name=varname)\n\n foo.encoding = var_encodings[varname]\n foo.attrs = var_attrs[varname]\n\n dataset[varname] = foo\n\n # HRU ID variables\n dataset['basinID'] = xr.DataArray(hrus[remap_idx], dims=['basinID'])\n dataset['basinID'].encoding = {'dtype': hru_type, '_FillValue': None}\n dataset['basinID'].attrs = {'long_name': 'Basin ID'}\n\n dataset[TIME_DIM_NAME] = dr_time\n\n dataset.to_netcdf(out_nc, unlimited_dims='time')", "def _convert_dataset(self):\n allowed_vars = ['x', 'y', 'z', 'tvu', 'thu']\n dtyp = [(varname, self.data[varname].dtype) for varname in allowed_vars if varname in self.data]\n empty_struct = np.empty(len(self.data['x']), dtype=dtyp)\n for varname, vartype in dtyp:\n empty_struct[varname] = self.data[varname].values\n self.data = empty_struct", "def _read_dx(self, FN):\n if FN.endswith('.dx'):\n F = open(FN, 'r')\n else:\n import gzip\n F = gzip.open(FN, 'r')\n\n # Read the header\n line = F.readline()\n while line.find('object') == -1:\n line = F.readline()\n header = {}\n header['counts'] = [int(x) for x in line.split(' ')[-3:]]\n for name in ['origin', 'd0', 'd1', 'd2']:\n header[name] = [float(x) for x in F.readline().split(' ')[-3:]]\n F.readline()\n header['npts'] = int(F.readline().split(' ')[-3])\n\n # Test to make sure the grid type is okay.\n # These conditions are not absolultely essential,\n # but they reduce the number of subtraction operations.\n if not (header['d0'][1] == 0 and header['d0'][2] == 0\n and header['d1'][0] == 0 and header['d1'][2] == 0\n and header['d2'][0] == 0 and header['d2'][1] == 0):\n raise Exception('Trilinear grid must be in original basis')\n if not (header['d0'][0] > 0 and header['d1'][1] > 0\n and header['d2'][2] > 0):\n raise Exception('Trilinear grid must have positive coordinates')\n\n # Read the data\n vals = np.ndarray(shape=header['npts'], dtype=float)\n index = 0\n while index < header['npts']:\n line = F.readline()[:-1]\n items = [float(item) for item in line.split()]\n vals[index:index + len(items)] = items\n index = index + len(items)\n F.close()\n\n data = {\n 'origin':np.array(header['origin']), \\\n 'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \\\n 'counts':np.array(header['counts']), \\\n 'vals':vals}\n return data", "def __init__(self, x_dimname='lon', y_dimname='lat', time_dimname='time'):\n self.x_dimname = x_dimname\n self.y_dimname = y_dimname\n self.time_dimname = time_dimname", "def dims(filespec, verbose=False):\n with open(filespec, \"rb\") as f:\n if f.read(4) == b\"\\x76\\x2f\\x31\\x01\": # EXR magic number\n version = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n max_strlen = 256 if (version & 0x400) else 32\n got_channels = False\n got_dims = False\n while not (got_channels and got_dims):\n attr_name = _read_string_nul(f, max_strlen)\n _ = _read_string_nul(f, max_strlen) # attr_type\n attr_size = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n if attr_name == \"channels\":\n nchan = 0\n isfloat = False\n bitdepth = 16\n while not got_channels:\n name = _read_string_nul(f, max_strlen)\n if len(name) >= 1:\n dtype = np.frombuffer(f.read(16), dtype=\"<u4\")[0]\n isfloat = isfloat or (dtype > 0)\n bitdepth = max(bitdepth, 16 if dtype == 1 else 32)\n nchan += 1\n else:\n got_channels = True\n elif attr_name == \"dataWindow\":\n box = np.frombuffer(f.read(16), dtype=\"<i4\")\n xmin, ymin, xmax, ymax = box\n width = xmax - xmin + 1\n height = ymax - ymin + 1\n got_dims = True\n else:\n _ = f.seek(attr_size, 1)\n if verbose:\n print(f\"Reading file {filespec} \", end='')\n print(f\"(w={width}, h={height}, c={nchan}, bitdepth={bitdepth})\")\n return width, height, nchan, isfloat, bitdepth\n raise RuntimeError(f\"File {filespec} is not a valid EXR file.\")", "def __init__(self, time_drop_width, time_stripes_num, freq_drop_width, \n freq_stripes_num):\n\n super(SpecAugmentation, self).__init__()\n\n self.time_dropper = DropStripes(dim=2, drop_width=time_drop_width, \n stripes_num=time_stripes_num)\n\n self.freq_dropper = DropStripes(dim=3, drop_width=freq_drop_width, \n stripes_num=freq_stripes_num)", "def netcdf_compatible_array(arry):\n arry = strip_array_wrappers(arry)\n\n if arry.ndim > 0:\n for _ in range(3):\n if arry.dtype.char != \"O\" or arry.ndim == 0:\n break\n\n if arry.shape[0] == 1:\n arry = np.array(arry[0])\n else:\n arry = np.array(tuple(arry))\n\n if \"S\" in arry.dtype.char:\n return np.char.decode(arry, \"ascii\")\n # TODO: ensure no float16, ...\n return arry", "def __init__(self, dimensions=2):\n self.compound = False\n self.dimensions = dimensions\n self.domain = None\n self.mesh = None\n self.problem = None\n self.problem_type = ''\n self.x, self.Lx, self.nx, self.delta_x = [None]*4\n self.y, self.Ly, self.ny, self.delta_y = [None]*4\n self.z, self.z_dealias, self.Lz, self.nz = [None]*4\n return", "def __init__(self, data_array):\n self._data_array = data_array\n self._units = self._data_array.attrs.get('units', 'dimensionless')", "def __init__(self,prefix,verbose=False):\n self.prefix = prefix\n self.hub = dict() #hub-height wind speeds\n self.field = dict() #full NY x NZ field\n self._readTurbSimScalar(prefix,'u',verbose=verbose)\n self._readTurbSimScalar(prefix,'v',verbose=verbose)\n self._readTurbSimScalar(prefix,'w',verbose=verbose)", "def globvardimvals(tmpl, valuesdict,sufs=['.001.001.meta', '.meta']):\n # remove formats: {xx:yy} -> {xx}\n tmpl = re.sub(r'{([^:}]*)(:[^}]*)?}', r'{\\1}', tmpl)\n\n fields = list(set(re.findall(r'{([^}]*)}', tmpl)))\n vardims = [k for k in fields if k.startswith('v')]\n vardims.sort()\n knownvars = dict((k,v) for k,v in valuesdict.items() if k in vardims)\n knownvardims = [ k for k in vardims if k in knownvars ]\n knownvarvals = [ knownvars[k] for k in knownvardims ]\n knownvarlens = [ len(v) for v in knownvarvals ]\n unknownvardims = [ k for k in vardims if not k in knownvars ]\n\n fixdims = [k for k in fields if not k.startswith('v')]\n fixdims.sort()\n\n # just pick actual fields\n known = dict((k,v) for k,v in valuesdict.items() if k in fields)\n knowndims = dict((k,v) for k,v in known.items() if k not in vardims)\n # first known value for each field\n firstdims = dict((k,v[0]) for k,v in knowndims.items())\n\n if 'vars' in valuesdict:\n # list of variable value tuples\n # must be all variables; will ignore other v0=... settings\n varvals = valuesdict['vars']\n else:\n knownvarindices = np.indices(knownvarlens)\n varvals = []\n for vi in zip(*[x.flat for x in knownvarindices]):\n varval = tuple(v[i] for v,i in zip(knownvarvals,vi))\n varvals.append(varval)\n\n dimvals = {}\n\n unknown = set(fields) - set(known)\n if unknown:\n replaceknown = dict((k,'{'+k+'}') for k in fields)\n for k,v in firstdims.items():\n replaceknown[k] = v\n\n for knownvarval in varvals:\n vars = dict(zip(knownvardims, knownvarval))\n replaceknown.update(vars)\n\n unknowntmpl = tmpl.format(**replaceknown)\n\n globpatt = re.sub(r'{[^}]*}', '*', unknowntmpl)\n for suf in sufs:\n metafiles = glob(globpatt + suf)\n if len(metafiles):\n break\n else:\n raise IOError(globpatt + suf)\n\n unknowndims = [k for k in unknown if not k.startswith('v')]\n regexp,parts,keys = format2re(unknowntmpl + suf)\n vals = {}\n for metafile in metafiles:\n g = re.match(regexp,metafile).groups()\n d = dict(zip(keys,g))\n varval = tuple(d[k] for k in unknownvardims)\n if varval not in vals:\n vals[varval] = dict((k,set()) for k in unknowndims)\n for k,v in zip(keys,g):\n if not k.startswith('v'):\n vals[varval][k].add(v)\n\n for unknownvarvals,vs in vals.items():\n unknownvars = dict(zip(unknownvardims,unknownvarvals))\n vars.update(unknownvars)\n varval = tuple(vars[k] for k in vardims)\n dimvals[varval] = dict((k,sorted(list(s))) for k,s in vs.items())\n dimvals[varval].update(knowndims)\n else:\n dimvals = dict.fromkeys(varvals, knowndims)\n \n # res: (v0,v1) -> {'d0':['a','b','c'], 'd1':[0,1,2], ...}\n return vardims,fixdims,dimvals", "def test_dimension_mapping(self):\n fh = NetCDF4()\n\n with tempfile.TemporaryDirectory() as tdir:\n tfile = os.path.join(tdir, 'testfile')\n before = xr.Dataset({\n \"var1\": (\"dim1\", np.arange(5)),\n \"group1/var1\": (\"group1/dim1\", np.arange(5)),\n \"group1/var2\": (\"group1/dim2\", np.arange(5)),\n \"group1/subgroup1/var1\":\n (\"group1/subgroup1/dim1\", np.arange(5)),\n \"group1/subgroup1/var2\":\n (\"group1/subgroup1/dim2\", np.arange(5)),\n \"group2/var1\": (\"group2/dim1\", np.arange(5)),\n \"group2/subgroup1/var1\":\n (\"group1/subgroup1/dim1\", np.arange(5)),\n \"group3/var1\": (\"group3/dim1\", np.arange(10)),\n }, coords={\n \"dim1\": (\"dim1\", np.arange(5)),\n \"group1/dim1\": (\"group1/dim1\", np.arange(5))\n })\n\n # Save the dataset and load it again:\n fh.write(before, tfile)\n after = fh.read(tfile)\n\n # How it should be after loading:\n check = xr.Dataset({\n \"var1\": (\"dim1\", np.arange(5)),\n \"group1/var1\": (\"group1/dim1\", np.arange(5)),\n \"group1/var2\": (\"group1/dim2\", np.arange(5)),\n \"group1/subgroup1/var1\": (\"group1/dim1\", np.arange(5)),\n \"group1/subgroup1/var2\": (\"group1/dim2\", np.arange(5)),\n \"group2/var1\": (\"dim1\", np.arange(5)),\n \"group2/subgroup1/var1\": (\"dim1\", np.arange(5)),\n \"group3/var1\": (\"group3/dim1\", np.arange(10)),\n }, coords={\n \"dim1\": (\"dim1\", np.arange(5)),\n \"group1/dim1\": (\"group1/dim1\", np.arange(5))\n })\n\n assert after.equals(check)", "def in_cstruc(traj, *argv, **kwargs) :\n data = traj.data\n v = traj.var(\"q_cloud_liquid_mass\")\n w_v = traj.var(\"w\")\n z_p = traj.var(\"tracer_traj_zr\") # This seems to be a hack - i.e. wrong.\n tr1_p = traj.var(\"tracer_rad1\")\n\n if 'thresh' in kwargs:\n thresh = kwargs['thresh']\n else :\n thresh = 1.0E-5\n\n if len(argv) == 2 :\n (tr_time, obj_ptrs) = argv\n qcl = data[ tr_time, obj_ptrs, v]\n w = data[ tr_time, obj_ptrs, w_v]\n tr1 = data[ tr_time, obj_ptrs, tr1_p]\n else :\n qcl = data[..., v]\n w = data[..., w_v]\n zpos = data[..., z_p]\n tr1 = data[ ..., tr1_p]\n\n mask = cstruct_select(qcl, w, zpos, tr1, thresh=thresh, ver=6)\n return mask, qcl", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def read_nc_var(nc_file, var_name, var_date_name):\n \n fh = Dataset(nc_file, mode='r')\n var = fh.variables[var_name][:]\n var_dates = fh.variables[var_date_name][:]\n \n print(var_name,': ', var.shape)\n print(var_date_name,': ', var_dates.shape)\n \n return var, var_dates;", "def get_data(datauri): \n print(datauri)\n nc = netCDF4.Dataset(datauri)\n time = nc.variables['JULD']\n depth = nc.variables['PRES']\n\n checkdepth = 0\n findepth = np.zeros(time.shape[0])\n for i in range (0, depth.shape[0]):\n maxdepth = np.amax(depth[i])\n findepth[i] = maxdepth\n if (maxdepth > checkdepth):\n dd=i\n checkdepth = maxdepth\n maxdepth = findepth[dd]\n \n temperature = nc.variables['TEMP'][dd] \n tempadj=nc.variables['TEMP_ADJUSTED'][dd]\n depthnew = nc.variables['PRES'][dd] \n depthadj = nc.variables['PRES_ADJUSTED'][dd] \n\n latitude = nc.variables['LATITUDE'][dd]\n longitude = nc.variables['LONGITUDE'][dd]\n\n lonm=nc.variables['LONGITUDE'][dd].mask\n latm=nc.variables['LATITUDE'][dd].mask\n timm=nc.variables['JULD'][dd].mask\n\n if (lonm == True or latm == True):\n longitude=-999.9\n latitude=-999.9\n\n\n out = {}\n out['latitude'] = nc.variables.pop('LATITUDE')[dd]\n out['longitude'] = nc.variables.pop('LONGITUDE')[dd]\n out['temperature'] = nc.variables.pop('TEMP')[dd]\n out['temperatureadj'] = nc.variables.pop('TEMP_ADJUSTED')[dd]\n out['salinity'] = nc.variables.pop('PSAL')[dd]\n out['salinityadj'] = nc.variables.pop('PSAL_ADJUSTED')[dd]\n out['depth'] = nc.variables.pop('PRES')[dd]\n out['depthadj'] = nc.variables.pop('PRES_ADJUSTED')[dd]\n \n return out", "def woa_subset(llcrnrlon=2.5, urcrnrlon=357.5, llcrnrlat=-87.5, urcrnrlat=87.5,\n var='temperature', clim_type='monthly', resolution='1deg',\n levels=slice(0, 40)):\n\n uri = \"http://data.nodc.noaa.gov/thredds/dodsC/woa/WOA09/NetCDFdata\"\n fname = \"%s_%s_%s.nc\" % (var, clim_type, resolution)\n url = '%s/%s' % (uri, fname)\n nc = Dataset(url)\n\n v = dict(temperature='t', dissolved_oxygen='o', salinity='s',\n oxygen_saturation='O', apparent_oxygen_utilization='A',\n phosphate='p', silicate='p', nitrate='n')\n\n d = dict({'%s_an' % v[var]: 'OA Climatology',\n '%s_mn' % v[var]: 'Statistical Mean',\n '%s_dd' % v[var]: 'N. of Observations',\n '%s_se' % v[var]: 'Std Error of the Statistical Mean',\n '%s_sd' % v[var]: 'Std Deviation from Statistical Mean',\n '%s_oa' % v[var]: 'Statistical Mean minus OA Climatology',\n '%s_ma' % v[var]: 'Seasonal/Monthly minus Annual Climatology',\n '%s_gp' % v[var]: 'N. of Mean Values within Influence Radius'})\n\n depths = [0, 10, 20, 30, 50, 75, 100, 125, 150, 200, 250, 300, 400, 500,\n 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1750,\n 2000, 2500, 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500, 7000,\n 7500, 8000, 8500, 9000][levels]\n\n llcrnrlon, urcrnrlon = map(wrap_lon360, (llcrnrlon, urcrnrlon))\n lon = wrap_lon360(nc.variables.pop('lon')[:])\n lat = nc.variables.pop('lat')[:]\n depth = nc.variables.pop('depth')[:]\n times = nc.variables.pop('time')\n times = num2date(times[:], times.units, calendar='365_day')\n times = [time.strftime('%b') for time in times]\n\n if clim_type == 'annual':\n times = clim_type\n\n # Select data subset.\n maskx = np.logical_and(lon >= llcrnrlon, lon <= urcrnrlon)\n masky = np.logical_and(lat >= llcrnrlat, lat <= urcrnrlat)\n maskz = np.array([z in depths for z in depth])\n\n lon, lat, depth = lon[maskx], lat[masky], depth[maskz]\n\n start = '%s_' % v[var]\n variables = dict()\n for variable in nc.variables.keys():\n if variable.startswith(start):\n subset = nc.variables[variable][..., maskz, masky, maskx]\n data = Panel4D(subset, major_axis=lat, minor_axis=lon,\n labels=np.atleast_1d(times),\n items=np.atleast_1d(depth))\n variables.update({d[variable]: data})\n return variables", "def drifterParse(ncfile, drift, idx=None, debug=False):", "def load_3D_netCDF(filename, var_name = \"prcp\", lat_name = \"lats\", lon_name = \"lons\", time_name = \"times\"):\n data = Dataset(filename, 'r')\n var = data[var_name][:]\n lats = data[lat_name][:]\n lons = data[lon_name][:]\n times = data[time_name][:]\n data.close()\n return var, lats, lons, times", "def unusedFromKDOTDataPreparation():", "def _read_nc(self, FN):\n from netCDF4 import Dataset\n grid_nc = Dataset(FN, 'r')\n data = {}\n for key in list(grid_nc.variables):\n data[key] = np.array(grid_nc.variables[key][:][0][:])\n grid_nc.close()\n return data", "def nDto2D_TensorField(nD_Tensor):\n # Ensure Numpy array\n data = np.array(nD_Tensor)\n # If data is 5D, then assume 3D tensor field, nX x nY x nZ x 3 x 3\n if len(data.shape) == 5:\n data = data.reshape((data.shape[0]*data.shape[1]*data.shape[2], 9))\n # Else if data is 4D\n elif len(data.shape) == 4:\n # If last D has 9/6 (symmetric tensor) component, then assume 3D tensor field, nX x nY x nZ x nComponent\n if data.shape[3] in (6, 9):\n data = data.reshape((data.shape[0]*data.shape[1]*data.shape[2], data.shape[3]))\n # Else if 3 in 4th D\n elif data.shape[3] == 3:\n # If 3 in 3rd D, then assume 2D tensor field, nX x nY x 3 x 3\n if data.shape[2] == 3:\n data = data.reshape((data.shape[0]*data.shape[1], 9))\n # Otherwise assume 3D vector field, nX x nY x nZ x 3\n else:\n data = data.reshape((data.shape[0]*data.shape[1]*data.shape[2], 3))\n\n # Else if 3D data\n elif len(data.shape) == 3:\n # If 3rd D is 6/9, then assume 2D (symmetric) tensor field, nX x nY x nComponent\n if data.shape[2] in (6, 9):\n data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))\n # Else if 3rd D is 3\n elif data.shape[2] == 3:\n # If 2nd D is 3, then assume 1D tensor, nPoint x 3 x 3\n if data.shape[1] == 3:\n data = data.reshape((data.shape[0], 9))\n # Otherwise assume 2D vector field, nX x nY x 3\n else:\n data = data.reshape((data.shape[0]*data.shape[1], 3))\n\n return data", "def _load_raw(self, filename, scalarize=lambda x: x[0]):\n\n # Load all the data in this object.\n data = list()\n with open(filename, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for row in reader:\n data += [list(row)]\n\n # Attempt to parse all the data into their respective variables.\n try:\n # Load the header information.\n self.n = int(data[0][0])\n self.ns = int(data[0][1])\n self.m = int(data[0][2])\n\n k = int(data[0][3])\n self.s0 = int(data[0][4])\n self.ng = int(data[0][5])\n\n self.horizon = int(data[0][6])\n self.gamma = float(data[0][7])\n self.epsilon = float(0.01)\n\n # Functions to convert flattened NumPy arrays to C arrays.\n array_type_ng_uint = ct.c_uint * (self.ng)\n array_type_nmns_int = ct.c_int * (self.n * self.m * self.ns)\n array_type_nmns_float = ct.c_float * (self.n * self.m * self.ns)\n array_type_nm_float = ct.c_float * (self.n * self.m)\n\n # Load each of the larger data structures into memory and immediately\n # convert them to their C object type to save memory.\n rowOffset = 1\n self.goals = array_type_ng_uint(*np.array([int(data[rowOffset][s]) for s in range(self.ng)]).flatten())\n\n rowOffset = 2\n self.S = array_type_nmns_int(*np.array([[[int(data[(self.n * a + s) + rowOffset][sp]) \\\n for sp in range(self.ns)] \\\n for a in range(self.m)] \\\n for s in range(self.n)]).flatten())\n\n rowOffset = 2 + self.n * self.m\n self.T = array_type_nmns_float(*np.array([[[float(data[(self.n * a + s) + rowOffset][sp]) \\\n for sp in range(self.ns)] \\\n for a in range(self.m)] \\\n for s in range(self.n)]).flatten())\n\n rowOffset = 2 + self.n * self.m + self.n * self.m\n self.R = array_type_nm_float(*scalarize(np.array([[[float(data[(self.m * i + a) + rowOffset][s])\n for a in range(self.m)] \\\n for s in range(self.n)] \\\n for i in range(k)])).flatten())\n\n self.Rmax = max([self.R[i] for i in range(self.n * self.m)])\n self.Rmin = min([self.R[i] for i in range(self.n * self.m)])\n\n except Exception:\n print(\"Failed to load file '%s'.\" % (filename))\n raise Exception()", "def read_short(data):\n s_type = \"=%s\" % get_type(\"short\")\n return struct.unpack(s_type, data.read(2))[0]", "def func3(string:str):\n with open(string,\"r\") as file:\n data = file.read()\n data = data.split(\"bandwidths [1]:\")[0]\n size = int(data.split(\"points: size =\")[1].split(\"\\n\")[0])\n print(\"size is:\",size)\n final = np.zeros((2,2,size))\n my_list = data.split(\"formants\")[2:5]\n for i in range(2):\n final[i,:,:] = list(map(pars_value,my_list[i].split(\"points \")[1:])),list(map(pars_number,my_list[i].split(\"points \")[1:]))\n temp = final[0,0,:]>1100\n final[0,0,temp] = None\n return final", "def CreateDataArray(name, shape, cDims, type):\n # Create a numpy array of ones to hold our data\n num_array = np.ndarray(shape, dtype=type, order=\"C\")\n\n z = np.asarray(num_array)\n if not z.flags.contiguous:\n z = np.ascontiguousarray(z)\n z.fill(0)\n\n shape = z.shape\n assert z.flags.contiguous, 'Only contiguous arrays are supported.'\n assert not np.issubdtype(z.dtype, np.complex128), \\\n \"Complex numpy arrays cannot be converted to vtk arrays.\"\\\n \"Use real() or imag() to get a component of the array before\"\\\n \" passing it to vtk.\"\n\n # Get the Pointer to the numpy array\n z_flat = np.ravel(z)\n \n #np.info(z)\n \n # Declare the number of components for the array\n if type == np.int8:\n array = simpl.Int8ArrayType(z_flat, cDims, name, False)\n elif type == np.uint8:\n array = simpl.UInt8ArrayType(z_flat, cDims, name, False)\n elif type == np.int16:\n array = simpl.Int16ArrayType(z_flat, cDims, name, False)\n elif type == np.uint16:\n array = simpl.UInt16ArrayType(z_flat, cDims, name, False)\n elif type == np.int32:\n array = simpl.Int32ArrayType(z_flat, cDims, name, False)\n elif type == np.uint32:\n array = simpl.UInt32ArrayType(z_flat, cDims, name, False)\n elif type == np.int64:\n array = simpl.Int64ArrayType(z_flat, cDims, name, False)\n elif type == np.uint64:\n array = simpl.UInt64ArrayType(z_flat, cDims, name, False)\n elif type == np.float32:\n array = simpl.FloatArrayType(z_flat, cDims, name, False)\n elif type == np.double:\n array = simpl.DoubleArrayType(z_flat, cDims, name, False) \n \n # we need to return the 'z' numpy array so it does not go out of scope.\n return (z, array)", "def extrude(ncpath, times):\n d = netCDF4.Dataset(ncpath, 'r+')\n\n # Get time\n t_dim = d.dimensions['time']\n t = d.variables['time']\n t_len = t.shape[0]\n assert t.axis == 'T'\n assert t.dimensions == ('time', )\n\n # For each axis containing time\n for varname in d.variables:\n if varname == 'time':\n continue\n var = d.variables[varname]\n\n if 'time' not in var.dimensions:\n continue\n\n if 'time' != var.dimensions[0]:\n raise ValueError('Only supports time as first dimension')\n\n for i in range(times):\n n2 = var.shape[0]\n var[n2:n2+t_len] = var[:t_len]\n\n # For the time dimension\n # Make some simple assumptions about it's monotonicity\n a = t[:t_len]\n dt = t[1] - t[0]\n for i in range(times):\n a = a + (t[-1] + dt)\n n2 = t.shape[0]\n t[n2:n2+t_len] = a\n\n d.close()", "def write_netcdf(file, lons, lats, times, hydrographs, fractions, loc, grid_id,\n inds, Flist, velocity, diffusion, fill_value, verbose):\n f = Dataset(file,'w', format='NETCDF4')\n\n # set dimensions\n time = f.createDimension('time', None)\n lon = f.createDimension('lon', (len(lons)))\n lat = f.createDimension('lat', (len(lats)))\n\n # initialize variables\n time = f.createVariable('time','f8',('time',))\n lon = f.createVariable('lon','f8',('lon',))\n lat = f.createVariable('lat','f8',('lat',))\n fraction = f.createVariable('fraction','f8',('lat','lon',),fill_value=fill_value)\n UHS = f.createVariable('unit_hydrograph','f8',('time','lat','lon',),fill_value=fill_value)\n\n # write attributes for netcdf\n f.description = 'Aggregated UH_S and Fraction Vars'\n f.history = 'Created: {}\\n'.format(tm.ctime(tm.time()))\n f.history += ' '.join(sys.argv) + '\\n'\n f.source = sys.argv[0] # prints the name of script used\n f.velocity = velocity\n f.diffusion = diffusion\n f.outlet_id = str(grid_id.astype(np.int64))\n f.outlet_y= str(inds[0].astype(np.int64))\n f.outlet_x = str(inds[1].astype(np.int64)) # this is change is a cdo work around. Othewise cdo removes the attribute. \n f.outlet_lat = loc[0]\n f.outlet_lon = loc[1]\n f.includes = ', '.join(Flist)\n\n lat.long_name = 'latitude coordinate'\n lat.standard_name = 'latitude'\n lat.units = 'degrees_north'\n\n lon.long_name = 'longitude coordinate'\n lon.standard_name = 'longitude'\n lon.units = 'degrees_east'\n\n time.units = 'seconds since 0001-1-1 0:0:0'\n time.calendar = 'noleap'\n time.longname = 'time'\n time.type_prefered = 'float'\n time.description = 'Seconds since initial impulse'\n\n UHS.units = 'unitless'\n UHS.description = 'unit hydrograph for each grid cell with respect to downstream grid location'\n \n fraction.units = 'unitless'\n fraction.description = 'fraction of grid cell contributing to guage location'\n\n # write data to variables initialized above\n time[:]= times\n lon[:] = lons\n lat[:] = lats\n UHS[:,:,:] = hydrographs\n fraction[:,:]= fractions\n f.close()", "def _parse_slices_V4X(par, parfile):\n logger = logging.getLogger('raw2nii')\n par.slices = np.loadtxt(parfile, dtype=par.fields, ndmin=1).view(\n np.recarray)\n if not par.slices.shape:\n logger.warning('par.slices has wrong shape: {0}, reshaping...'.format(\n par.slices.shape))\n par.slices = np.reshape(par.slices, (1,))\n if len(par.slices[0]) != par.field_len:\n raise ValueError('Slice tag format does not match the number of '\n 'entries')\n #Determine number of interleaved image sequences (was:types,\n #name kept for historic reasons) (e.g. angio)\n par.nr_mrtypes = np.unique(par.slices.scanning_sequence).shape[0]\n #Determine number of interleaved echos\n par.nr_echos = np.unique(par.slices.echo_number).shape[0]\n #Determine number of interleaved image types (e.g. angio)\n par.nr_realmrtypes = np.unique(par.slices.image_type_mr).shape[0]\n #Determine number of diffusion gradients (e.g. DTI)\n par.nr_diffgrads = np.unique(par.slices.gradient_orientation_number\n ).shape[0]\n #Determine number of dynamics(directly from slice lines in\n #PAR file instead of PAR file header info!)\n par.nr_dyn = np.unique(par.slices.dynamic_scan_number).shape[0]\n if par.nr_dyn != par.gen_info.max_number_of_dynamics:\n logger.warning('Number of dynamics in header of PAR file does not '\n 'match number of dynamics in the body')\n par.nr_bvalues = np.unique(par.slices.diffusion_b_value_number).shape[0]\n #Check if multishell\n par.is_multishell = par.nr_bvalues > 2\n #Sort the slices\n sort_order = (par.slices.slice_number, par.slices.dynamic_scan_number,\n par.slices.diffusion_b_value_number,\n par.slices.gradient_orientation_number, par.slices.echo_number,\n par.slices.image_type_mr, par.slices.scanning_sequence)\n if par.is_multishell:\n sort_order = sort_order[:2] + (sort_order[3], sort_order[2]) + (\n sort_order[4:]) # Swap diffusion b value and gradient orientation\n else:\n pass # B0 and B1 diffusion weighting\n par.slices_sorted = par.slices[np.lexsort(sort_order)]\n return par.slices", "def readExperi(directory,varid,experi,level):\n print('\\n>>> Using readExperi function! \\n')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ### Call files\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n if any([experi == 'FPOL',experi == 'FSUB']):\n directory = '/home/zlabe/green/simu/'\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'T2M_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = 'surface'\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'TEMP_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = dataq.variables['level'][:]\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:4],varid))\n \n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,\n int(lat.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,int(lev.shape[0]),\n int(lat.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n\n print('\\n*Completed: Finished readExperi function!')\n return lat,lon,time,lev,var", "def read_xtidefile(fid):\n l = fgetl_nocom(fid)\n ncon = sscanf(l, '\\n %d')\n xtide = type('struct', (), {})()\n for k in range(1, (ncon +1)):\n l = fgetl_nocom(fid)\n xtide.name(k, :) = l[0:8]\n xtide.speed(k) = sscanf(l[8:l.shape[0]], '\\n %f')\n xtide.startyear = sscanf(fgetl_nocom(fid), '\\n %d')\n nyear = sscanf(fgetl_nocom(fid), '\\n %d')\n for k in range(1, (ncon +1)):\n l = fgetl(fid)\n xtide.equilibarg(k, :) = fscanf(fid, '\\n %f', nyear)\n l = fgetl(fid)\n l = fgetl(fid)\n # Skip *END*\n nyear = sscanf(fgetl_nocom(fid), '\\n %d')\n for k in range(1, (ncon +1)):\n l = fgetl(fid)\n xtide.nodefactor(k, :) = fscanf(fid, '\\n %f', nyear)\n l = fgetl(fid)\n l = fgetl(fid)\n # Skip *END*\n # Now read in all harmonic data\n #nsta=1754; \n # This is number of stations in harmonics (1998-07-18)\n #nsta=3351; \n # This is number of stations in v1.42 or harmonics file\n nsta = 3316\n # This is number in v1.51\n xharm = type('struct', (), {})()\n nh = 0\n while max(l.shape) > 0 & l[0] != - 1:\n\n l = l + ' '\n nh = nh + 1\n while not l[0:3] == '# !':\n\n l = fgetl(fid) + ' '\n\n while l[0:3] == '# !':\n\n if 'unit' == l[((3:7 -1) -1)]:\n tmp = deblank(l[(findstr(l, ':') + 2 -1):l.shape[0]])\n xharm.units(nh, range(1, (max(tmp.shape) +1))) = tmp\n else:\n if 'long' == l[((3:7 -1) -1)]:\n xharm.longitude(nh) = sscanf(l[(findstr(l, ':') + 1 -1):l.shape[0]], '\\n %f')\n else:\n if 'lati' == l[((3:7 -1) -1)]:\n xharm.latitude(nh) = sscanf(l[(findstr(l, ':') + 1 -1):l.shape[0]], '\\n %f')\n l = fgetl(fid)\n\n tmp = deblank(l)\n if tmp[0] != '#':\n # Not commented out\n xharm.station(nh, range(1, (max(tmp.shape) +1))) = tmp\n tmp = fgetl(fid)\n k = np.min(findstr(tmp, ':'))\n tim = sscanf(tmp[0:k - 1], '\\n %d') + sscanf(tmp[(k + np.array([range(1, 3)]).reshape(1, -1) -1)], '\\n %d') / 60\n xharm.timezone(nh) = tim\n tmp = fgetl(fid)\n xharm.datum(nh) = sscanf(tmp, '\\n %f')\n for k in range(1, (ncon +1)):\n l = fgetl(fid)\n if l[0] != 'x':\n ll = np.min(np.array([findstr(' ', l), np.flatnonzero(abs(l) == 9)]).reshape(1, -1))\n # space or tab\n tmp = sscanf(l[(ll + 1 -1):l.shape[0]], '\\n %f', 2)\n xharm.A(nh, k) = tmp[0]\n xharm.kappa(nh, k) = tmp[1]\n l = fgetl(fid)\n else:\n nh = nh - 1\n if rem(nh, 50) == 0:\n fprintf('.')\n\n fprintf('\\\\n')\n # Convert internally to sparse matrix storage (much smaller).\n xharm.A = sparse(xharm.A)\n xharm.kappa = sparse(xharm.kappa)\n return xtide, xharm\n # \n return xtide, xharm", "def read(self, file, nints, nskip, nocal, nopass, selectpol):\n\n self.file = file\n self.nints = nints\n vis = miriad.VisData(self.file,)\n\n # read data into python arrays\n i = 0\n for inp, preamble, data, flags in vis.readLowlevel ('dsl3', False, nocal=True, nopass=True):\n # Loop to skip some data and read shifted data into original data arrays\n if i == 0:\n # get few general variables\n self.nants0 = inp.getScalar ('nants', 0)\n self.inttime0 = inp.getScalar ('inttime', 10.0)\n self.nspect0 = inp.getScalar ('nspect', 0)\n self.nwide0 = inp.getScalar ('nwide', 0)\n self.sdf0 = inp.getScalar ('sdf', self.nspect0)\n self.nschan0 = inp.getScalar ('nschan', self.nspect0)\n self.ischan0 = inp.getScalar ('ischan', self.nspect0)\n self.sfreq0 = inp.getScalar ('sfreq', self.nspect0)\n self.restfreq0 = inp.getScalar ('restfreq', self.nspect0)\n self.pol0 = inp.getScalar ('pol')\n # DLK 2013-04-04\n # get the initial phase center\n self.ra0=inp.getScalar('ra')\n self.dec0=inp.getScalar('dec')\n\n self.sfreq = self.sfreq0\n self.sdf = self.sdf0\n self.nchan = len(data)\n print 'Initializing nchan:', self.nchan\n bls = []\n\n # build complete list of baselines\n bls.append(preamble[4])\n # end here. assume at least one instance of each bl occurs before ~six integrations (accommodates MWA)\n if len(bls) == 6*len(n.unique(bls)):\n blarr = []\n for bl in n.unique(bls):\n blarr.append(mirtask.util.decodeBaseline (bl))\n self.blarr = n.array(blarr)\n bldict = dict( zip(n.unique(bls), n.arange(len(blarr))) )\n break\n\n i = i+1\n\n # find number of pols in data\n uvd = mirtask.UVDataSet(self.file, 'rw')\n self.npol_orig = uvd.getNPol()\n pols = []\n for i in xrange(20): # loop over the first few spectra to find all polarizations in the data\n pols.append(uvd.getPol())\n uvd.next()\n uvd.close()\n upols = n.unique(pols) # get unique pols in first few spectra\n polstr = mirtask.util.polarizationName(upols[0])\n if len(upols) > 1:\n for pol in upols[1:]:\n polstr = polstr + ', ' + mirtask.util.polarizationName(pol)\n self.npol = len(selectpol)\n if self.npol > self.npol_orig:\n raise ValueError('Trying to select %d pols from %d available.' % (self.npol, self.npol_orig))\n for pol in selectpol:\n if not pol in polstr:\n raise ValueError('Trying to select %s, but %s available.' % (pol, polstr))\n print 'Initializing npol: %d (of %d, %s)' % (self.npol, self.npol_orig, polstr)\n\n # Initialize more stuff...\n self.freq_orig = self.sfreq + self.sdf * n.arange(self.nchan)\n self.freq = self.freq_orig[self.chans]\n\n # good baselines\n self.nbl = len(self.blarr)\n print 'Initializing nbl:', self.nbl\n self.ants = n.unique(self.blarr)\n self.nants = len(self.ants)\n print 'Initializing nants:', self.nants\n self.nskip = int(nskip*self.nbl) # number of iterations to skip (for reading in different parts of buffer)\n nskip = int(self.nskip)\n\n # define data arrays\n self.rawdata = n.zeros((nints, self.nbl, self.nchan, self.npol),dtype='complex64')\n self.flags = n.zeros((nints, self.nbl, self.nchan, self.npol),dtype='bool')\n self.u = n.zeros((nints,self.nbl),dtype='float64')\n self.v = n.zeros((nints,self.nbl),dtype='float64')\n self.w = n.zeros((nints,self.nbl),dtype='float64')\n self.preamble = n.zeros((nints*self.nbl,5),dtype='float64')\n\n # go back and read data into arrays\n for polnum in range(self.npol):\n stokes = selectpol[polnum]\n i = 0\n for inp, preamble, data, flags in vis.readLowlevel ('dsl3', False, nocal=nocal, nopass=nopass, stokes=stokes):\n # Loop to skip some data and read shifted data into original data arrays\n\n if i < nskip:\n i = i+1\n continue \n\n # assumes ints in order, but may skip. after nbl iterations, it fills next row, regardless of number filled.\n if (i-nskip) < nints*self.nbl:\n self.preamble[i-nskip] = preamble\n self.rawdata[(i-nskip)//self.nbl, bldict[preamble[4]], :, polnum] = data\n self.flags[(i-nskip)//self.nbl, bldict[preamble[4]], :, polnum] = flags\n # uvw stored in preamble index 0,1,2 in units of ns\n # Assumes miriad files store uvw in ns. Set to lambda by multiplying by freq of first channel.\n self.u[(i-nskip)//self.nbl, bldict[preamble[4]]] = preamble[0] * self.freq_orig[0]\n self.v[(i-nskip)//self.nbl, bldict[preamble[4]]] = preamble[1] * self.freq_orig[0]\n self.w[(i-nskip)//self.nbl, bldict[preamble[4]]] = preamble[2] * self.freq_orig[0]\n else:\n break # stop at nints\n\n if not (i % (self.nbl*100)):\n print 'Read spectrum ', str(i)\n\n i = i+1\n\n time = self.preamble[::self.nbl,3]\n\n if ((not n.any(self.rawdata)) & (not n.any(time))):\n raise ValueError('rawdata and time arrays at default values. No data read?')\n\n # limit the data to actually real data (DLK)\n maxgoodtime=max(n.where(time>0)[0])\n if maxgoodtime+1 < nints:\n print 'Requested to read %d integrations, but only found %d good integrations' % (nints,\n maxgoodtime)\n # need to trim off some of the data\n time=time[:maxgoodtime]\n self.nints=len(time)\n self.u=self.u[:maxgoodtime]\n self.v=self.v[:maxgoodtime]\n self.w=self.w[:maxgoodtime]\n self.rawdata=self.rawdata[:maxgoodtime]\n self.flags=self.flags[:maxgoodtime]\n \n self.reltime = 24*3600*(time - time[0]) # relative time array in seconds. evla times change...?\n # preserve absolute time (DLK)\n self.time=time\n self.inttime = n.array([self.reltime[i+1] - self.reltime[i] for i in xrange(len(self.reltime)/5,len(self.reltime)-1)]).mean()\n\n # define relative phase center for each integration\n self.l0 = n.zeros(self.nints)\n self.m0 = n.zeros(self.nints)\n\n # print summary info\n print\n print 'Shape of raw data, time:'\n print self.rawdata.shape, self.reltime.shape", "def _write_nc(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n from netCDF4 import Dataset\n grid_nc = Dataset(FN, 'w', format='NETCDF4')\n grid_nc.createDimension('one', 1)\n grid_nc.createDimension('n_cartesian', 3)\n grid_nc.createDimension('n_points', n_points)\n grid_nc.createVariable('origin', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('counts', 'i8', ('one', 'n_cartesian'))\n grid_nc.createVariable('spacing', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('vals', 'f8', ('one', 'n_points'), zlib=True)\n for key in data.keys():\n grid_nc.variables[key][:] = data[key]\n grid_nc.close()", "def read_from_file(self,grd_fn):\n self.grd_fn = grd_fn\n self.fp = open(self.grd_fn,'rt')\n hdr = self.fp.readline().strip() #header &GRD_2008 or &LISTGRD\n\n if hdr == self.hdr_08:\n print( \"Will read 2008 format for grid\" )\n n_parms = 11\n elif hdr == self.hdr_old:\n print( \"Will read old UnTRIM grid format\" )\n n_parms = 10\n\n for i in range(n_parms): # ignore TNE and TNS in new format files\n l = self.fp.readline()\n lhs,rhs = l.split('=')\n val = rhs.strip().strip(',')\n varname = lhs.strip()\n print( \"%s=%s\"%(varname,val) )\n\n if varname=='NV':\n Nvertices = int(val)\n elif varname=='NE':\n Npolys = int(val)\n elif varname=='NS':\n Nsides = int(val)\n elif varname=='NBC':\n Nboundary_poly = int(val)\n elif varname=='NSI':\n Ninternal_sides = int(val)\n elif varname=='NSF':\n Nflow_sides = int(val)\n elif varname=='NBC':\n Nbc = int(val)\n elif varname=='ANGLE':\n self.angle = float(val)\n elif varname=='LOCATION':\n self.location = val\n elif varname=='NR': ## these are read, but not used\n Nred = int(val)\n elif varname=='TNE':\n TNE=int(val)\n elif varname=='TNS':\n TNS=int(val)\n # others: HLAND for older fmt.\n \n while 1:\n s = self.fp.readline().strip() # header: /\n if s == '/':\n break\n\n # We know the size of everything, and can ask UnstructuredGrid to allocate\n # arrays now, with the 'special' meaning that passing an integer means allocate\n # the array of that size, full of zeros.\n # this allocates\n # self.nodes, self.edges, self.cells\n self.from_simple_data(points = Nvertices,edges = Nsides, cells = Npolys)\n\n for v in range(Nvertices):\n Cv = self.fp.readline().split()\n if hdr == self.hdr_08:\n vertex_num = int(Cv.pop(0))\n if vertex_num != v+1:\n print( \"Mismatched vertex numbering: %d != %d\"%(vertex_num,v+1) )\n self.nodes['x'][v,0] = float(Cv[0])\n self.nodes['x'][v,1] = float(Cv[1])\n \n print( \"Npolys\",Npolys )\n self.cells['edges'] = self.UNKNOWN # initialize all\n self.cells['nodes'] = self.UNKNOWN\n \n for c in range(Npolys):\n l = self.fp.readline()\n Cp = l.split()\n if hdr == self.hdr_08:\n poly_num = int(Cp.pop(0))\n if poly_num-1 != c:\n print( \"Mismatched polygon id: %fd != %d\"%(poly_num,c+1) )\n \n numsides = int(Cp[0])\n\n self.cells['_center'][c,0] = float(Cp[1])\n self.cells['_center'][c,1] = float(Cp[2])\n\n if hdr == self.hdr_old:\n # vertex index is Cp[3,5,7,9]\n # the others, 4,6,8,10, are edges, right?\n # convert to 0 based indices here\n\n # This is probably wrong! I think it's actually reading the\n # sides\n self.cells['edges'][c,0] = int(Cp[4]) - 1\n self.cells['edges'][c,1] = int(Cp[6]) - 1 \n self.cells['edges'][c,2] = int(Cp[8]) - 1\n if numsides == 4:\n self.cells['edges'][c,3] = int(Cp[10]) - 1 \n else:\n self.cells['edges'][c,3]=self.UNDEFINED\n #HERE - need to copy that to self.cells['nodes']\n else:\n for ei in range(numsides):\n self.cells['nodes'][c,ei] = int(Cp[3+ei]) - 1\n self.cells['edges'][c,ei] = int(Cp[3+numsides+ei]) - 1\n self.cells['nodes'][c,numsides:]=self.UNDEFINED\n self.cells['edges'][c,numsides:]=self.UNDEFINED\n \n # choose some large, above-sea-level depth\n self.cells['depth_mean'] = -1000 # not sure this is doing anything...\n\n for e in range(Nsides):\n Cs = self.fp.readline().split()\n if hdr == self.hdr_08:\n # side num = int(Cs.pop(0))\n Cs.pop(0)\n elif hdr == self.hdr_old:\n # side depth?\n edge_depth = self.edges['depth_mean'][e] = float(Cs.pop(0))\n \n self.edges['nodes'][e,0] = int(Cs[0])-1 # vertex indices\n self.edges['nodes'][e,1] = int(Cs[1])-1\n \n self.edges['cells'][e,0] = int(Cs[2])-1 # cell neighbors\n self.edges['cells'][e,1] = int(Cs[3])-1\n\n if hdr == self.hdr_old:\n for nc in self.edges['cells'][e]:\n if nc >= 0 and edge_depth > self.cells['depth_mean'][nc]:\n self.cells['depth_mean'][nc] = edge_depth\n\n if hdr==self.hdr_old:\n # old format - have to infer cell nodes from edges\n self.make_cell_nodes_from_edge_nodes()\n\n # Try to make sense of the marks and red/black:\n self.cells['red'][:Nred] = True\n self.cells['mark'][:Nboundary_poly] = self.BOUNDARY\n self.edges['mark'][:Ninternal_sides] = 0\n self.edges['mark'][Ninternal_sides:Nflow_sides] = self.FLOW\n self.edges['mark'][Nflow_sides:] = self.LAND\n\n # Bathymetry:\n if hdr == self.hdr_08:\n # make a cheap tokenizer to read floats across lines\n # note that it's up to the user to know that all values from\n # the line are read, and not to get the iterator until you're\n # ready for some values to be read\n def tokenizer():\n while True:\n for item in self.fp.readline().split():\n yield item\n for c in range(Npolys):\n check_c,nis = [int(s) for s in self.fp.readline().split()]\n if check_c != c+1:\n print(\"ERROR: while reading cell subgrid, cell index mismatch: %s vs. %d\"%(c+1,check_c))\n \n next_token = tokenizer().next\n areas = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n \n self.cells['depth_mean'][c] = np.sum(areas*depths) / np.sum(areas)\n self.cells['_area'][c] = np.sum(areas)\n self.cells['depth_max'][c] = depths.max()\n self.cells['subgrid'][c] = (areas,depths)\n for e in range(Nflow_sides):\n l = self.fp.readline()\n # print \"%d/%d - Read line: %s\"%(e,self.Nsides,l)\n check_e,nis = [int(s) for s in l.split()]\n if check_e != e+1:\n print( \"ERROR: While reading edge subgrid, edge index mismatch: %s vs. %s\"%(e+1,check_e) )\n next_token = tokenizer().next\n lengths = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n if sum(lengths)<=0:\n print( \"edge %d has bad lengths\"%e )\n self.edges['depth_mean'][e] = np.sum(lengths*depths) / sum(lengths)\n self.edges['depth_max'][e] = depths.max()\n self.edges['subgrid'][e] = (lengths,depths)\n # and land boundaries get zeros.\n for e in range(Nflow_sides,Nsides):\n self.edges['depth_mean'][e] = 0.0\n self.edges['depth_max'][e] = 0.0\n self.edges['subgrid'][e] = ([],[])", "def _read_netCDF(filename):\n if any(fn in os.path.basename(filename) for fn in L1B_MATCHES):\n with h5py.File(filename, \"r\") as afile:\n data = afile[\"RAD\"][:]\n\n blank = afile[\"RAD\"].attrs[\"_FillValue\"][0]\n bzero = afile[\"RAD\"].attrs[\"add_offset\"][0]\n bscale = afile[\"RAD\"].attrs[\"scale_factor\"][0]\n bunit = afile[\"RAD\"].attrs[\"units\"].tobytes().decode(\"utf-8\").rstrip(\"\\x00\")\n\n data = data * bscale + bzero\n dqf = afile[\"DQF\"][:]\n\n header_info = dict((key, afile[key][...]) for key in afile.keys())\n header = _make_cdf_header(header_info)\n # Deal with this here as we require the file.\n for att, val in afile.attrs.items():\n if att in TAG_MAPPING:\n header[TAG_MAPPING[att]] = (\n val.tobytes().decode(\"utf-8\").rstrip(\"\\x00\")\n )\n header[\"NAXIS1\"] = data.shape[0]\n header[\"NAXIS2\"] = data.shape[1]\n header[\"BLANK\"] = blank\n header[\"BSCALE\"] = bscale\n header[\"BZERO\"] = bzero\n header[\"BUNIT\"] = bunit\n else:\n raise ValueError(f\"File {filename} does not look like a SUVI L1b netCDF file.\")\n return header, data, dqf", "def _prepareMCMCIn(self, D_COMP, D_FIELD, cmbYMethodNArgs, ydim):\n\t\t# Extract y and xf\n\t\ty = D_FIELD[:,0:ydim]\n\t\txf = D_FIELD[:,ydim:]\n\t\t(n,p) = xf.shape\n\t\t# Extract eta, xc, and tc\n\t\teta = D_COMP[:,0:ydim]\n\t\txc = D_COMP[:,ydim:(ydim+p)]\n\t\ttc = D_COMP[:,(p+ydim):]\n\t\tx = np.concatenate((xf,xc), axis=0)\n\t\t(m,q) = tc.shape\n\t\t# Mix max normalization x eta y and tc\n\t\tself._logger.debug('Data shape before norm of y xf eta xc tc: %s %s %s %s %s'%(y.shape, \n\t\t\t\t\t\t\txf.shape, eta.shape, xc.shape, tc.shape));\n\t\tx = self._getMinMaxNormalized(x);\n\t\ttc = self._getMinMaxNormalized(tc);\n\t\tif ydim > 1:\n\t\t\teta = self._getMinMaxNormalized(eta);\n\t\t\ty = self._getMinMaxNormalized(y); \n\t\t\tself._logger.debug('Data shape after norm of y eta x: %s %s %s'%(y.shape, eta.shape, x.shape));\n\t\t# Reduce dimension of z to one, if not one\n\t\tz = np.concatenate((y,eta), axis=0);\n\t\tself._logger.debug('Data shape before dim reduction of z: %s',z.shape);\n\t\tif len(cmbYMethodNArgs) > 0:\n\t\t\tz = cmbYMtdMapping[cmbYMethodNArgs[0]](z, *cmbYMethodNArgs[1:]);\n\t\tif len(z.shape) > 1:\n\t\t\tz = np.reshape(z, (-2,)) # Make z to be one-dim array\n\t\t# Standardize the z\n\t\tself._logger.debug('z shape before standardization %s', z.shape);\n\t\t(z_y_stand, z_eta_stand) = self._getStandardizedByEta(z[0:n], z[n:]);\n\t\tz = np.append(z_y_stand, z_eta_stand);\n\t\tself._logger.debug('z shape after standardization %s', z.shape);\n\t\t# Extract xf and xc\n\t\txf = x[0:n,:]\n\t\txc = x[n:,:]\n\t\treturn (z, xf, xc, tc)", "def prep(self):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n self.freq = self.freq_orig[self.chans]\n\n self.track0 = self.track(0.)\n self.twidth = 0\n for k in self.track0[1]:\n self.twidth = max(self.twidth, len(n.where(n.array(self.track0[1]) == k)[0]))\n\n print 'Track width in time: %d. Iteration could step by %d/2.' % (self.twidth, self.twidth)", "def read_nc(*args, **kwargs):\n a = read_nc_da(*args, **kwargs)\n return GeoArray(a)", "def __init__(self, config_data, in_dims, layer_num):\n self.layer_num = layer_num\n self.in_dims = in_dims\n self.out_dims = in_dims[\"width\"] * in_dims[\"height\"] * in_dims[\"channels\"]\n self.layer_type = config_data[\"type\"]\n self.name = config_data[\"name\"]\n self.params = []", "def extract_chunks(the_files, the_bands=None):\n ds_config = {}\n gdal_ptrs = []\n datatypes = []\n for the_file in the_files:\n g = gdal.Open(the_file)\n gdal_ptrs.append(gdal.Open(the_file))\n datatypes.append(GDAL2NUMPY[g.GetRasterBand(1).DataType])\n\n block_size = g.GetRasterBand(1).GetBlockSize()\n nx = g.RasterXSize\n ny = g.RasterYSize\n if the_bands is None:\n the_bands = np.arange(g.RasterCount) + 1\n proj = g.GetProjectionRef()\n geoT = g.GetGeoTransform()\n ds_config['nx'] = nx\n ds_config['ny'] = ny\n ds_config['nb'] = g.RasterCount\n ds_config['geoT'] = geoT\n ds_config['proj'] = proj\n block_size = [block_size[0]*2, block_size[1]*2]\n print(\"Blocksize is (%d,%d)\" % (block_size[0], block_size[1]))\n # block_size = [ 256, 256 ]\n # store these numbers in variables that may change later\n nx_valid = block_size[0]\n ny_valid = block_size[1]\n # find total x and y blocks to be read\n nx_blocks = (int)((nx + block_size[0] - 1) / block_size[0])\n ny_blocks = (int)((ny + block_size[1] - 1) / block_size[1])\n buf_size = block_size[0] * block_size[1]\n ################################################################\n # start looping through blocks of data\n ################################################################\n # loop through X-lines\n for X in range(nx_blocks):\n # change the block size of the final piece\n if X == nx_blocks - 1:\n nx_valid = nx - X * block_size[0]\n buf_size = nx_valid * ny_valid\n\n # find X offset\n this_X = X * block_size[0]\n\n # reset buffer size for start of Y loop\n ny_valid = block_size[1]\n buf_size = nx_valid * ny_valid\n\n # loop through Y lines\n for Y in range(ny_blocks):\n # change the block size of the final piece\n if Y == ny_blocks - 1:\n ny_valid = ny - Y * block_size[1]\n buf_size = nx_valid * ny_valid\n\n # find Y offset\n this_Y = Y * block_size[1]\n data_in = []\n for ig, ptr in enumerate(gdal_ptrs):\n buf = ptr.ReadRaster(this_X, this_Y, nx_valid, ny_valid,\n buf_xsize=nx_valid, buf_ysize=ny_valid,\n band_list=the_bands)\n a = np.frombuffer(buf, dtype=datatypes[ig])\n data_in.append(a.reshape((\n len(the_bands), ny_valid, nx_valid)).squeeze())\n\n yield (ds_config, this_X, this_Y, nx_valid, ny_valid,\n data_in)", "def get_nix(self):\n return self.dim", "def builddataframe(brick, path = \"..\", cutstring = \"1\", major = 0, minor = 0, newzprojection = None, charmsim = False):\n nplate =0\n\n print(\"Reading ScanSet at path \",path)\n\n #reading scanset\n sproc = r.EdbScanProc()\n sproc.eProcDirClient=path\n id = r.EdbID(brick,nplate,major,minor)\n ss = sproc.ReadScanSet(id)\n ss.Brick().SetID(brick)\n \n #preparing patterns\n npl = ss.eIDS.GetEntries()\n\n cut = r.TCut(cutstring)\n\n #intial empty arrays\n IDall = np.zeros(0,dtype=int)\n PIDall = np.zeros(0,dtype=int)\n\n xall = np.zeros(0,dtype=np.float32)\n yall = np.zeros(0,dtype=np.float32)\n zall = np.zeros(0,dtype=np.float32)\n TXall = np.zeros(0,dtype=np.float32)\n TYall = np.zeros(0,dtype=np.float32)\n\n MCEvtall = np.zeros(0,dtype=int)\n MCTrackall = np.zeros(0,dtype=int)\n Pall = np.zeros(0,dtype=np.float32)\n Flagall = np.zeros(0,dtype=int)\n\n print (\"Cut on couples \")\n cut.Print()\n\n print(\"Try to open folders at path \",path+\"/b00000\"+str(brick))\n for i in range(npl):\n idplate = ss.GetID(i)\n \n nplate = idplate.ePlate\n plate = ss.GetPlate(idplate.ePlate)\n #read pattern information\n p = r.EdbPattern()\n\n ect = r.EdbCouplesTree()\n if (nplate) <10:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p00{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n else:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p0{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n\n #addingcut\n ect.eCut = cut \n cutlist = ect.InitCutList()\n \n nsegcut = cutlist.GetN()\n nseg = ect.eTree.GetEntries()\n\n IDarray_plate = np.zeros(nsegcut,dtype=int)\n PIDarray_plate = np.zeros(nsegcut,dtype=int)\n\n xarray_plate = np.zeros(nsegcut,dtype=np.float32)\n yarray_plate = np.zeros(nsegcut,dtype=np.float32)\n zarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TXarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TYarray_plate = np.zeros(nsegcut,dtype=np.float32)\n \n MCEvtarray_plate = np.zeros(nsegcut,dtype=int)\n MCTrackarray_plate = np.zeros(nsegcut,dtype=int)\n Parray_plate = np.zeros(nsegcut,dtype=np.float32)\n Flagarray_plate = np.zeros(nsegcut,dtype=int)\n\n print (\"loop on {} segments over {} for plate {}\".format(nsegcut, nseg,nplate))\n for ientry in range(nsegcut):\n iseg = cutlist.GetEntry(ientry)\n ect.GetEntry(iseg)\n \n seg=ect.eS\n #//setting z and affine transformation\n seg.SetZ(plate.Z())\n seg.SetPID(i)\n seg.Transform(plate.GetAffineXY())\n\n if(newzprojection is not None):\n seg.PropagateTo(newzprojection[i])\n\n IDarray_plate[ientry] = seg.ID()\n PIDarray_plate[ientry] = seg.PID()\n \n xarray_plate[ientry] = seg.X()\n yarray_plate[ientry] = seg.Y()\n zarray_plate[ientry] = seg.Z()\n TXarray_plate[ientry] = seg.TX()\n TYarray_plate[ientry] = seg.TY()\n\n MCEvtarray_plate[ientry] = seg.MCEvt()\n MCTrackarray_plate[ientry] = seg.MCTrack()\n Parray_plate[ientry] = seg.P() \n if charmsim: #different place where pdgcode is stored\n Flagarray_plate[ientry] = seg.Vid(0)\n else:\n Flagarray_plate[ientry] = seg.Flag() \n\n #end of loop, storing them in global arrays\n IDall = np.concatenate((IDall,IDarray_plate),axis=0)\n PIDall = np.concatenate((PIDall,PIDarray_plate),axis=0)\n\n xall = np.concatenate((xall,xarray_plate),axis=0)\n yall = np.concatenate((yall,yarray_plate),axis=0)\n zall = np.concatenate((zall,zarray_plate),axis=0)\n TXall = np.concatenate((TXall,TXarray_plate),axis=0)\n TYall = np.concatenate((TYall,TYarray_plate),axis=0)\n MCEvtall = np.concatenate((MCEvtall,MCEvtarray_plate),axis=0)\n MCTrackall = np.concatenate((MCTrackall,MCTrackarray_plate),axis=0)\n Pall = np.concatenate((Pall,Parray_plate),axis=0)\n Flagall = np.concatenate((Flagall,Flagarray_plate),axis=0)\n\n data = {'ID':IDall,'PID':PIDall,'x':xall,'y':yall,'z':zall,'TX':TXall,'TY':TYall,'MCEvent':MCEvtall,'MCTrack':MCTrackall,'P':Pall,'Flag':Flagall}\n df = pd.DataFrame(data, columns = ['ID','PID','x','y','z','TX','TY','MCEvent','MCTrack','P','Flag'] )\n\n return df", "def convert_variable_type_n(df):\n # available columns\n \"\"\"\n 'source_file', 'source_id', 'report_id', 'observation_id',\n 'record_timestamp', 'iday', 'station_id', 'lat@hdr', 'lon@hdr',\n 'vertco_reference_1@body', 'obsvalue@body', 'varno@body', 'units',\n 'number_of_pressure_levels'\n \"\"\"\n dic_var_type = { 'int32' : ['varno@body', 'number_of_pressure_levels' , 'units', 'z_coordinate_type' , 'vertco_type@body' ] ,\n 'float32' : ['lat@hdr', 'lon@hdr' , 'vertco_reference_1@body', 'obsvalue@body', 'iday' ] ,\n 'string' : ['source_id' , 'station_id' , 'source_file' , 'report_id', 'observation_id', ] ,\n 'int64' : ['report_timestamp' , 'date_time', 'record_timestamp'] } \n \n convert = { 'int32' : np.int32 , \n 'string' : np.bytes_ ,\n 'float32' : np.float32 ,\n 'float64' : np.float64\n \n }\n # creating a dictionary variable - nptype \n mapping = {}\n for k in dic_var_type.keys():\n for l in dic_var_type[k]:\n mapping[l] = k \n\n for c in df.columns:\n try:\n #print('converting ' , c , ' to type ' , mapping[c] )\n df[c] = df[c].astype( convert[mapping[c]] )\n #print('converted: ', c )\n \n except:\n #print('could not convert type column ' , c )\n pass \n \n return df", "def c_shorts(x):\r\n return (c_short * len(x))(*x)", "def execute(self, in_nc, in_weight_table, out_nc, grid_name, conversion_flag, in_time_interval=\"6hr\"): # modified this line CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n\r\n # Validate the netcdf dataset\r\n vars_oi_index = self.dataValidation(in_nc)\r\n \r\n \"\"\"get conversion factor the flag is used to differentiate forecasts converted \r\n to netCDF from GRIB and the original netCDF. They both use the same weight tables\r\n but the original netCDF is in mm whereas the stock GRIB forecasts are in meters.\r\n Set the conversion_flag in the run.py configuration file.\r\n \"\"\"\r\n if conversion_flag: # Line Added CJB 20190218\r\n conversion_factor = 1.0 #Line Modified CJB 20190218\r\n elif grid_name == 'ecmwf_t1279' or grid_name == 'ecmwf_tco639': # Line Modified CJB 20190218\r\n #if grid_name == 'ecmwf_HRES_F' or grid_name == 'ecmwf_ENS_F': # Line Added/Modified CJB 20190108\r\n #new grids in mm instead of m\r\n conversion_factor = 0.001\r\n else: #set the conversion factor to 1 for everything else (data is in m but legacy installations do not have a flag) Line Added CJB 20190218\r\n conversion_factor = 1.0 # Line Added CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n # identify if the input netcdf data is the High Resolution data with three different time intervals\r\n id_data = self.dataIdentify(in_nc)\r\n if id_data is None:\r\n raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the netcdf dataset'''\r\n data_in_nc = NET.Dataset(in_nc)\r\n time = data_in_nc.variables['time'][:]\r\n\r\n # Check the size of time variable in the netcdf data\r\n if len(time) == 0: # *** MJS This change seems like it is too loose an error trap; should it account for instances when nc file time var is != in length with id_data lenght?\r\n raise Exception(self.errorMessages[3])\r\n #if len(time) != self.length_time[id_data]:\r\n # raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the weight table '''\r\n print(\"Reading the weight table...\", in_weight_table)\r\n dict_list = {self.header_wt[0]:[], self.header_wt[1]:[], self.header_wt[2]:[],\r\n self.header_wt[3]:[], self.header_wt[4]:[]}\r\n\r\n with open(in_weight_table, \"r\") as csvfile:\r\n reader = csv.reader(csvfile)\r\n count = 0\r\n for row in reader:\r\n if count == 0:\r\n #check number of columns in the weight table\r\n if len(row) < len(self.header_wt):\r\n raise Exception(self.errorMessages[4])\r\n #check header\r\n if row[1:len(self.header_wt)] != self.header_wt[1:]:\r\n raise Exception(self.errorMessages[5])\r\n count += 1\r\n else:\r\n for i in range(len(self.header_wt)):\r\n dict_list[self.header_wt[i]].append(row[i])\r\n count += 1\r\n\r\n ''' Calculate water inflows\r\n as a reminder, the first 91 time steps are T=0 to T=90 and are 1-hourly for HRES\r\n\t\t the next 18 time steps for HRES are T=93 to T=144 at 3-hourly\r\n then the final 16 time steps are T=150 to T=240 at 6-hourly for a total of 125 records\r\n\t\t\tFor ENS, the first 49 time steps are T=0 to T=144 at 3-hourly\r\n\t\t\tthe final 35 time steps are T=150 to T=360 at 6-hourly for a total of 84 records\r\n '''\r\n\t\t\t\r\n print(\"Calculating water inflows...\")\r\n\t\t\r\n ''' \r\n added the next section CJB 20180122 \r\n '''\r\n\r\n\t\t# Get the overall number of time steps\r\n size_time = self.getTimeSize(in_nc) #CJB 20180122\r\n # Determine the size of time steps in each group (1-hourly, 3-hourly, and/or 6-hourly)\r\n if id_data == \"HRES1\": # T <= 90 \r\n time_size = (size_time - 1)\r\n elif id_data == \"HRES13\": # 93 <= T <= 144\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - 1)\r\n elif id_data == \"HRES136\": # 150 <= T <= 240\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n elif in_time_interval == \"3hr\": # MJS Doesn't seem to be a case used currently, but added just in case later need.\r\n time_size = self.length_time_opt[\"HighRes-3hr-sub\"] # MJS This is HRES136, i.e., if for some reason in ecmwf_rapid_multi a 3 hr is asked for for this case, it should still have the 3hr_sub number of times\r\n elif in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - self.length_time_opt[\"HighRes-3hr-Sub\"] - 1)\r\n elif id_data == \"ENS3\": # T <= 144\r\n time_size = (size_time - 1)\r\n elif id_data == \"ENS36\": # 150 <= T <= 360\r\n if in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"LowResFull-3hr-Sub\"] - 1)\r\n else: # id_data == \"ENS6\": # T <= 360 but all 6-hourly\r\n time_size = (size_time - 1)\r\n #else: # something is wrong and need to throw an error message - likely a corrupt forecast file\r\n # raise Exception(self.errorMessages[3])\r\n #''' end of added section CJB 20180122 \r\n #'''\r\n\r\n #if id_data == \"LowRes\":\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #elif id_data == \"Low3HrRes\":\r\n # size_time = self.length_time_opt[\"LowRes-3hr\"]\r\n #elif id_data == \"LowResFull\":\r\n # if in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #else: #HighRes\r\n # if in_time_interval == \"1hr\":\r\n # size_time = self.length_time_opt[\"HighRes-1hr\"]\r\n # elif in_time_interval == \"3hr\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr\"]\r\n # elif in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"HighRes-6hr\"]\r\n\r\n size_streamID = len(set(dict_list[self.header_wt[0]]))\r\n\r\n # Create output inflow netcdf data\r\n # data_out_nc = NET.Dataset(out_nc, \"w\") # by default format = \"NETCDF4\"\r\n data_out_nc = NET.Dataset(out_nc, \"w\", format = \"NETCDF3_CLASSIC\")\r\n #dim_Time = data_out_nc.createDimension('Time', size_time)\r\n dim_Time = data_out_nc.createDimension('Time', time_size)\r\n dim_RiverID = data_out_nc.createDimension('rivid', size_streamID)\r\n var_m3_riv = data_out_nc.createVariable('m3_riv', 'f4', \r\n ('Time', 'rivid'),\r\n fill_value=0)\r\n \r\n #data_temp = NUM.empty(shape = [size_time, size_streamID])\r\n data_temp = NUM.empty(shape = [time_size, size_streamID])\r\n\r\n lon_ind_all = [int(i) for i in dict_list[self.header_wt[2]]]\r\n lat_ind_all = [int(j) for j in dict_list[self.header_wt[3]]]\r\n\r\n # Obtain a subset of runoff data based on the indices in the weight table\r\n min_lon_ind_all = min(lon_ind_all)\r\n max_lon_ind_all = max(lon_ind_all)\r\n min_lat_ind_all = min(lat_ind_all)\r\n max_lat_ind_all = max(lat_ind_all)\r\n\r\n # self.vars_oi[vars_oi_index][3] = RO; get that variable's 3D structure (time, lat_index, lon_index) ready to reshape into 2D (time, lat_index x lon_index)\r\n data_subset_all = data_in_nc.variables[self.vars_oi[vars_oi_index][3]][:, min_lat_ind_all:max_lat_ind_all+1, min_lon_ind_all:max_lon_ind_all+1]\r\n len_time_subset_all = data_subset_all.shape[0]\r\n len_lat_subset_all = data_subset_all.shape[1]\r\n len_lon_subset_all = data_subset_all.shape[2]\r\n data_subset_all = data_subset_all.reshape(len_time_subset_all, (len_lat_subset_all * len_lon_subset_all))\r\n\r\n # compute new indices based on the data_subset_all\r\n index_new = []\r\n for r in range(0,count-1):\r\n ind_lat_orig = lat_ind_all[r]\r\n ind_lon_orig = lon_ind_all[r]\r\n index_new.append((ind_lat_orig - min_lat_ind_all)*len_lon_subset_all + (ind_lon_orig - min_lon_ind_all))\r\n\r\n # obtain a new subset of data\r\n data_subset_new = data_subset_all[:,index_new]*conversion_factor\r\n\r\n # start compute inflow\r\n pointer = 0\r\n for s in range(0, size_streamID):\r\n npoints = int(dict_list[self.header_wt[4]][pointer])\r\n # Check if all npoints points correspond to the same streamID\r\n if len(set(dict_list[self.header_wt[0]][pointer : (pointer + npoints)])) != 1:\r\n print(\"ROW INDEX {0}\".format(pointer))\r\n print(\"RIVID {0}\".format(dict_list[self.header_wt[0]][pointer]))\r\n raise Exception(self.errorMessages[2])\r\n\r\n area_sqm_npoints = [float(k) for k in dict_list[self.header_wt[1]][pointer : (pointer + npoints)]]\r\n area_sqm_npoints = NUM.array(area_sqm_npoints)\r\n area_sqm_npoints = area_sqm_npoints.reshape(1, npoints)\r\n data_goal = data_subset_new[:, pointer:(pointer + npoints)]\r\n \r\n \r\n #remove noise from data\r\n data_goal[data_goal<=0.00001] = 0\r\n\r\n ''' IMPORTANT NOTE: runoff variable in ECMWF dataset is cumulative instead of incremental through time\r\n '''\r\n # For data with Low Resolution, there's only one time interval 6 hrs\r\n if id_data == \"ENS6\": # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints\r\n elif id_data == \"ENS3\": # there's only one time interval 3 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\r\n elif id_data == \"HRES1\": # there's only one time interval 1 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\t\r\n #For data with the full version of Low Resolution, from Hour 0 to 144 (the first 49 time points) are of 3 hr time interval,\r\n # then from Hour 144 to 360 (36 time points) are of 6 hour time interval\r\n elif id_data == \"ENS36\": # Line Added/Modified CJB 20190108\r\n if in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n ro_stream = NUM.subtract(data_goal[1:49,], data_goal[:48,]) * area_sqm_npoints\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[49:,], data_goal[48:-1,]) * area_sqm_npoints\r\n else: #\"LowRes-6hr\"\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240\r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n #convert all to 6hr\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[2:49:2,], data_goal[:48:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[49:,], data_goal[48:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b]) * area_sqm_npoints\r\n #For data with High Resolution, from Hour 0 to 90 (the first 91 time points) are of 1 hr time interval,\r\n # then from Hour 90 to 144 (18 time points) are of 3 hour time interval, and from Hour 144 to 240 (16 time points)\r\n # are of 6 hour time interval\r\n ##########################################################\r\n # MJS The following should handle id_data = HRES13 and HRES136\r\n ##########################################################\r\n else:\r\n if in_time_interval == \"1hr\":\r\n #ro_stream = NUM.subtract(data_goal[1:91,],data_goal[:90,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:1+time_size,],data_goal[:time_size,]) * area_sqm_npoints # Line Added/Modified CJB, MJS 20190108\r\n elif in_time_interval == \"3hr\": # MJS HRES 3hr not currently used\r\n # calculate time series of 3 hr data from 1 hr data\r\n ro_3hr_a = NUM.subtract(data_goal[3:91:3,],data_goal[:88:3,])\r\n # get the time series of 3 hr data\r\n #ro_3hr_b = NUM.subtract(data_goal[91:109,], data_goal[90:108,])\r\n ro_3hr_b = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) # MJS modified again; seems no case for this, but just in case later... Line Added/Modified CJB 20190108\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_3hr_a, ro_3hr_b]) * area_sqm_npoints\r\n elif in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n #ro_stream = NUM.subtract(data_goal[91:109,], data_goal[90:108,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) * area_sqm_npoints # MJS modified again; needs to handle HRES13 that might not have complete 3hr set... Line Added/Modified CJB 20190108\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[109:,], data_goal[108:-1,]) * area_sqm_npoints\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240 \r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n else: # in_time_interval == \"6hr\"\r\n #arcpy.AddMessage(\"6hr\")\r\n # calculate time series of 6 hr data from 1 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[6:91:6,], data_goal[:85:6,])\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[92:109:2,], data_goal[90:107:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_c = NUM.subtract(data_goal[109:,], data_goal[108:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b, ro_6hr_c]) * area_sqm_npoints\r\n \r\n #remove negative values\r\n ro_stream[ro_stream<0] = 0\r\n data_temp[:,s] = ro_stream.sum(axis = 1)\r\n\r\n pointer += npoints\r\n\r\n\r\n '''Write inflow data'''\r\n print(\"Writing inflow data...\")\r\n var_m3_riv[:] = data_temp\r\n # close the input and output netcdf datasets\r\n data_in_nc.close()\r\n data_out_nc.close()", "def run(self):\n if self.part == 'a':\n cond_latitude = \"b < 0\"\n else:\n cond_latitude = \"b >= 0\"\n\n if self.mode == 'full':\n extracmd = \"\"\"delcols \"pSaturated \\\n rErrBits iErrBits haErrBits errBits \\\n rPlaneX rPlaneY iPlaneX iPlaneY \\\n haPlaneX haPlaneY rAxis primaryID \\\n vignetted truncated badPix\" \"\"\"\n else:\n # select \"nBands == 3\"; \\\n extracmd = \"\"\"keepcols \"name ra dec \\\n r rErr \\\n i iErr \\\n ha haErr \\\n mergedClass errBits\";\"\"\"\n\n instring = ''\n for field in self.fieldlist:\n path = os.path.join(self.datapath,\n 'strip{0:.0f}'.format(self.strip),\n '{0}.fits'.format(field))\n instring += 'in={0} '.format(path)\n\n output_filename = self.get_output_filename()\n output_filename_gzip = self.get_output_filename(gzip=True)\n log.info('Writing data to {0}'.format(output_filename))\n\n version = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n\n # A bug in stilts causes long fieldIDs to be truncated if -utype S15 is not set\n # We also replace a bunch of column descriptions because they cannot be longer than 73 chars.\n param = {'stilts': constants.STILTS,\n 'in': instring,\n 'icmd': \"\"\"'clearparams *; \\\n setparam NAME \"IPHAS DR2 Source Catalogue (part \"\"\"+self.get_partname()+\"\"\")\"; \\\n setparam ORIGIN \"www.iphas.org\"; \\\n setparam AUTHOR \"Geert Barentsen, Hywel Farnhill, Janet Drew\"; \\\n setparam VERSION \\\"\"\"\"+version+\"\"\"\"; \\\n select \"(errBits < 64) \\\n & ! (r<12.5 & i<11.5 & ha<12) \\\n & (rErr < 0.198 || iErr < 0.198 || haErr < 0.198) \\\n & (pStar > 0.2 || pGalaxy > 0.2) \\\n & (NULL_rErrBits || NULL_iErrBits || NULL_haErrBits || ((rErrbits & iErrBits & haErrBits & 8) == 0))\n & l >= \"\"\"+str(self.lon1)+\"\"\" \\\n & l < \"\"\"+str(self.lon2)+\"\"\" \\\n & \"\"\"+str(cond_latitude)+\"\"\" \\\n & sourceID == primaryID\"; \\\n addcol -before ra \\\n -desc \"Source designation (JHHMMSS.ss+DDMMSS.s) without IPHAS2 prefix.\" \\\n name \\\n \"concat(\\\\\"J\\\\\", \n replaceAll(degreesToHms(ra, 2),\n \\\\\":\\\\\", \\\\\"\\\\\"), \n replaceAll(degreesToDms(dec, 1),\n \\\\\":\\\\\", \\\\\"\\\\\")\n )\"; \\\n addcol -before rMJD -desc \"True if source was blended with a nearby neighbour in the r-band.\" \\\n rDeblend \"NULL_rErrBits ? false : (rErrBits & 2) > 0\";\n addcol -before rMJD -desc \"True i the peak pixel count exceeded 55000 in r.\" \\\n rSaturated \"r<13 ? true : NULL_rErrBits ? false : (rErrBits & 8) > 0\";\n addcol -before iMJD -desc \"True if source was blended with a nearby neighbour in the i-band.\" \\\n iDeblend \"NULL_iErrBits ? false : (iErrBits & 2) > 0\";\n addcol -before iMJD -desc \"True if the peak pixel count exceeded 55000 in i.\" \\\n iSaturated \"i<12 ? true : NULL_iErrBits ? false : (iErrBits & 8) > 0\";\n addcol -before haMJD -desc \"True if source was blended with a nearby neighbour in H-alpha.\" \\\n haDeblend \"NULL_haErrBits ? false : (haErrBits & 2) > 0\";\n addcol -before haMJD -desc \"True if the peak pixel count exceeded 55000 in H-alpha.\" \\\n haSaturated \"ha<12.5 ? true : NULL_haErrBits ? false : (haErrBits & 8) > 0\";\n replacecol saturated \"rSaturated || iSaturated || haSaturated\";\n colmeta -name a10 reliable;\n replacecol a10 \"! saturated & nBands == 3 & rErr<0.1 & iErr<0.1 & haErr<0.1 & (abs(r-rAperMag1) < 3*hypot(rErr,rAperMag1Err)+0.03) & (abs(i-iAperMag1) < 3*hypot(iErr,iAperMag1Err)+0.03) & (abs(ha-haAperMag1) < 3*hypot(haErr,haAperMag1Err)+0.03)\";\n addcol -before fieldID -desc \"True if (a10 & pStar > 0.9 & ! deblend & ! brightNeighb)\" \\\n a10point \"a10 & pStar > 0.9 & ! deblend & ! brightNeighb\";\n replacecol -utype S15 fieldID \"fieldID\";\n replacecol -utype S1 fieldGrade \"toString(fieldGrade)\";\n colmeta -desc \"True if detected in all bands at 10-sigma plus other criteria.\" a10;\n colmeta -desc \"J2000 RA with respect to the 2MASS reference frame.\" ra;\n colmeta -desc \"Unique source identification string (run-ccd-detectionnumber).\" sourceID;\n colmeta -desc \"Astrometric fit error (RMS) across the CCD.\" posErr;\n colmeta -desc \"1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy.\" mergedClass;\n colmeta -desc \"N(0,1) stellarness-of-profile statistic.\" mergedClassStat;\n colmeta -desc \"1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy.\" rClass;\n colmeta -desc \"1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy.\" iClass;\n colmeta -desc \"1=galaxy, 0=noise, -1=star, -2=probableStar, -3=probableGalaxy.\" haClass;\n colmeta -desc \"Unique r-band detection identifier (run-ccd-detectionnumber).\" rDetectionID;\n colmeta -desc \"Unique i-band detection identifier (run-ccd-detectionnumber).\" iDetectionID;\n colmeta -desc \"Unique H-alpha detection identifier (run-ccd-detectionnumber).\" haDetectionID;\n colmeta -desc \"CCD pixel coordinate in the r-band exposure.\" rX;\n colmeta -desc \"CCD pixel coordinate in the r-band exposure.\" rY;\n colmeta -desc \"CCD pixel coordinate in the i-band exposure.\" iX;\n colmeta -desc \"CCD pixel coordinate in the i-band exposure.\" iY;\n colmeta -desc \"CCD pixel coordinate in the H-alpha exposure.\" haX;\n colmeta -desc \"CCD pixel coordinate in the H-alpha exposure.\" haY;\n colmeta -desc \"Survey field identifier.\" fieldID;\n colmeta -desc \"Probability the source is extended.\" pGalaxy;\n colmeta -desc \"Default r mag (Vega) using the 2.3 arcsec aperture.\" r;\n colmeta -desc \"Default i mag (Vega) using the 2.3 arcsec aperture.\" i;\n colmeta -desc \"Default H-alpha mag (Vega) using the 2.3 arcsec aperture.\" ha;\n colmeta -desc \"r mag (Vega) derived from peak pixel height.\" rPeakMag;\n colmeta -desc \"i mag (Vega) derived from peak pixel height.\" iPeakMag;\n colmeta -desc \"H-alpha mag (Vega) derived from peak pixel height.\" haPeakMag;\n colmeta -desc \"r mag (Vega) using the 1.2 arcsec aperture.\" rAperMag1;\n colmeta -desc \"i mag (Vega) using the 1.2 arcsec aperture.\" iAperMag1;\n colmeta -desc \"H-alpha mag (Vega) using the 1.2 arcsec aperture.\" haAperMag1;\n colmeta -desc \"r mag (Vega) using the 3.3 arcsec aperture.\" rAperMag3;\n colmeta -desc \"i mag (Vega) using the 3.3 arcsec aperture.\" iAperMag3;\n colmeta -desc \"H-alpha mag (Vega) using the 3.3 arcsec aperture.\" haAperMag3;\n colmeta -desc \"Internal quality control score of the field. One of A, B, C or D.\" fieldGrade;\n colmeta -desc \"Number of repeat observations of this source in the survey.\" nObs;\n colmeta -desc \"SourceID of the object in the partner exposure.\" sourceID2;\n colmeta -desc \"FieldID of the partner detection.\" fieldID2;\n colmeta -desc \"r mag (Vega) in the partner field, obtained within 10 minutes.\" r2;\n colmeta -desc \"Uncertainty for r2.\" rErr2;\n colmeta -desc \"i mag (Vega) in the partner field, obtained within 10 minutes.\" i2;\n colmeta -desc \"Uncertainty for i2.\" iErr2;\n colmeta -desc \"H-alpha mag (Vega) in the partner field, obtained within 10 minutes.\" ha2;\n colmeta -desc \"Uncertainty for ha2.\" haErr2;\n colmeta -desc \"flag brightNeighb (1), deblend (2), saturated (8), vignetting (64)\" errBits2;\n {0}\n '\"\"\".format(extracmd),\n 'out': output_filename}\n\n cmd = '{stilts} tcat {in} icmd={icmd} countrows=true lazy=true out={out}'\n mycmd = cmd.format(**param)\n log.info(mycmd)\n status = os.system(mycmd)\n log.info('concat: '+str(status))\n\n # zip\n mycmd = 'gzip --stdout {0} > {1}'.format(output_filename, output_filename_gzip)\n log.debug(mycmd)\n status = os.system(mycmd)\n log.info('gzip: '+str(status))\n\n return status", "def __init__(self, fvcom):\n\n # Prepare this object with all the objects we'll need later on (data, dims, time, grid, atts).\n self._prep()\n\n self.obj_iter = lambda x: [a for a in dir(x) if not a.startswith('__')]\n\n grid_names = ('lon', 'lat', 'lonc', 'latc', 'nv',\n 'h', 'h_center',\n 'nbe', 'ntsn', 'nbsn', 'ntve', 'nbve',\n 'art1', 'art2', 'a1u', 'a2u',\n 'siglay', 'siglev')\n time_names = ('time', 'Times', 'datetime', 'Itime', 'Itime2')\n\n for key in fvcom:\n if key in grid_names:\n setattr(self.grid, key, fvcom[key])\n elif key in time_names:\n setattr(self.time, key, fvcom[key])\n else: # assume data.\n setattr(self.data, key, fvcom[key])\n # Make some dimensions\n self.dims.three = 3\n self.dims.four = 4\n self.dims.maxnode = 11\n self.dims.maxelem = 9\n # This is a little repetitive (each dimension can be set multiple times), but it has simplicity to its\n # advantage.\n for obj in self.obj_iter(self.data):\n if obj in ('ua', 'va'):\n try:\n self.dims.time, self.dims.nele = getattr(self.data, obj).shape\n except ValueError:\n # Assume we've got a single position.\n self.dims.time = getattr(self.data, obj).shape[0]\n self.dims.nele = 1\n elif obj in ('temp', 'salinity'):\n try:\n self.dims.time, self.dims.siglay, self.dims.node = getattr(self.data, obj).shape\n except ValueError:\n # Assume we've got a single position\n self.dims.time, self.dims.siglay = getattr(self.data, obj).shape[:2]\n self.dims.node = 1\n self.dims.siglev = self.dims.siglay + 1\n elif obj in ['zeta']:\n try:\n self.dims.time, self.dims.node = getattr(self.data, obj).shape\n except ValueError:\n # Assume we've got a single position\n self.dims.time = getattr(self.data, obj).shape[0]\n self.dims.node = 1\n elif obj in ('Times'):\n self.dims.time, self.dims.DateStrLen = getattr(self.time, obj).shape\n elif obj in ('time', 'Itime', 'Itime2', 'datetime'):\n self.dims.time = getattr(self.time, obj).shape" ]
[ "0.5600547", "0.54413694", "0.54308397", "0.5418377", "0.54078954", "0.53668946", "0.5344015", "0.5336579", "0.5290628", "0.52342385", "0.52231324", "0.51942706", "0.5134216", "0.5109639", "0.50503546", "0.50287384", "0.5022853", "0.5013268", "0.49844584", "0.496534", "0.49313447", "0.491932", "0.49151608", "0.49007598", "0.48769245", "0.4872824", "0.48721966", "0.48573086", "0.48450908", "0.4840413", "0.4835683", "0.48309624", "0.4830201", "0.48218122", "0.4814762", "0.48112458", "0.48105586", "0.4804491", "0.4791109", "0.47895694", "0.47870284", "0.47868448", "0.47832462", "0.47721085", "0.47505307", "0.4728599", "0.47212243", "0.4720602", "0.47036415", "0.46994618", "0.46980575", "0.46892023", "0.46845537", "0.46827963", "0.46725762", "0.46669036", "0.4654379", "0.464887", "0.4645234", "0.4632388", "0.46265513", "0.4618351", "0.4614836", "0.46145865", "0.4610668", "0.4606712", "0.4603365", "0.46028727", "0.4598671", "0.45969704", "0.45965722", "0.45913535", "0.45890513", "0.45835465", "0.4578393", "0.45780894", "0.4573462", "0.45689753", "0.456696", "0.45661297", "0.45651022", "0.45644322", "0.45605367", "0.4559076", "0.45580596", "0.45569935", "0.45562437", "0.45548615", "0.45528284", "0.45501944", "0.45470065", "0.45360035", "0.45332894", "0.45245016", "0.45241183", "0.45222673", "0.4521074", "0.4519583", "0.4516176", "0.45100886" ]
0.5500222
1
Case 4 A node making multiple primary declarations for a particular node. Consider 4 nodes A, B, C and D. Lets say node B is malicious and is repeatedly declaring Node D as primary
def testPrimaryElectionCase4(case4Setup, looper): allNodes = case4Setup A, B, C, D = allNodes looper.run(checkNodesConnected(allNodes)) # Node B sends multiple declarations of node D's 0th protocol instance as # primary to all nodes for i in range(5): # B.send(Primary(D.name, 0, B.viewNo)) B.send(primaryByNode(D.name, B, 0)) # No node from node A, node C, node D(node B is malicious anyway so not # considering it) should have more than one primary declaration for node # D since node D is slow. The one primary declaration for node D, # that nodes A, C and D might have would be because of node B def x(): primDecs = [p[0] for p in node.elector.primaryDeclarations[0].values()] assert primDecs.count(D.name) <= 1 # also have to take into account the catchup procedure timeout = waits.expectedPoolNominationTimeout(len(allNodes)) + \ waits.expectedPoolCatchupTime(len(allNodes)) for node in (A, C, D): looper.run(eventually(x, retryWait=.5, timeout=timeout)) timeout = waits.expectedPoolElectionTimeout( len(allNodes)) + delaySelfNomination ensureElectionsDone(looper=looper, nodes=allNodes, customTimeout=timeout) # Node D should not have any primary replica assert not D.hasPrimary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def primary(self):\n ...", "def assign_ids(ast):\n def f_either(obj, *child_results):\n id_ = slast.SlAst.id_\n obj.id_ = id_[0]\n id_[0] += 1\n\n # def f_either(obj, *child_results):\n # _id_dict = slast.SlAst._id_dict\n # id_ = slast.SlAst.id_\n # # FIXME: Assign same id to all data predicate calls with the same root/stop-nodes\n # key = str(obj.to_sl_expr())\n # if key in _id_dict:\n # obj.id_ = _id_dict[key]\n # else:\n # obj.id_ = id_[0]\n # _id_dict[key] = id_[0]\n # id_[0] += 1\n\n astutils.fold(f_either, f_either, ast)", "def __init__(self, node, declare):\n preproc.__init__(self, node, declare, \"hashElse\", \"#else\")", "def visit_tertiary_node(self, node, children):\n return {node.rule_name: children[0]}", "def visit_Declaration(self, node):\n name = self.name_gen.next()\n extend_ops = self.extend_ops\n self.push_name(name)\n base_code = compile(node.base.py_ast, self.filename, mode='eval')\n extend_ops([\n # f_globals = globals()\n (LOAD_GLOBAL, 'globals'),\n (CALL_FUNCTION, 0x0000),\n (STORE_FAST, 'f_globals'),\n\n # eval_ = eval\n (LOAD_GLOBAL, 'eval'),\n (STORE_FAST, 'eval_'),\n\n # foo_cls = eval('Window', toolkit, f_globals)\n # foo = foo_cls.__enaml_call__(identifiers, toolkit)\n (LOAD_FAST, 'eval_'),\n (LOAD_CONST, base_code),\n (LOAD_FAST, 'toolkit'),\n (LOAD_FAST, 'f_globals'),\n (CALL_FUNCTION, 0x0003),\n (LOAD_ATTR, '__enaml_call__'),\n (LOAD_FAST, 'identifiers'),\n (LOAD_FAST, 'toolkit'),\n (CALL_FUNCTION, 0x0002),\n (STORE_FAST, name),\n ])\n\n if node.identifier:\n extend_ops([\n # identifiers['foo'] = foo\n (LOAD_FAST, name),\n (LOAD_FAST, 'identifiers'),\n (LOAD_CONST, node.identifier),\n (STORE_SUBSCR, None),\n ])\n \n visit = self.visit\n for item in node.body:\n visit(item)\n \n extend_ops([\n # return foo\n (LOAD_FAST, name),\n (RETURN_VALUE, None),\n ])\n\n self.pop_name()", "def visit(self, node):", "def visit(self, node):", "def visit_Declaration(self, node):\n code_ops = self.code_ops\n name = node.name\n description = DeclarationCompiler.compile(node, self.filename)\n code_ops.extend([\n (SetLineno, node.lineno),\n (LOAD_NAME, '_make_enamldef_helper_'), # Foo = _make_enamldef_helper_(name, base, description, globals)\n (LOAD_CONST, name),\n (LOAD_NAME, node.base),\n (LOAD_CONST, description), # description is a marshalable dict\n (LOAD_NAME, 'globals'),\n (CALL_FUNCTION, 0x0000),\n (CALL_FUNCTION, 0x0004),\n (STORE_NAME, name),\n ])\n\n # We now have a new Declarative subclass stored at 'name' to\n # which we need to add any user defined attributes and events.\n code_ops.extend([\n (LOAD_NAME, name),\n (LOAD_ATTR, '_add_user_attribute'),\n ])\n\n # Dispatch to add any class-level info contained within the\n # declaration body. Visit nonstrict since not all child nodes\n # are valid at the class-level. The '_add_user_attribute'\n # class method is left on the top of the stack and popped\n # at the end of the visitors.\n for child_node in node.body:\n self.visit_nonstrict(child_node)\n\n code_ops.append((POP_TOP, None))", "def fix_ids(self, tree: nodes.document) -> None:\n def update_node_id(node: Element) -> None:\n \"\"\"Update IDs of given *node*.\"\"\"\n new_ids: list[str] = []\n for node_id in node['ids']:\n new_id = self.fix_fragment('', node_id)\n if new_id not in new_ids:\n new_ids.append(new_id)\n node['ids'] = new_ids\n\n for reference in tree.findall(nodes.reference):\n if 'refuri' in reference:\n m = self.refuri_re.match(reference['refuri'])\n if m:\n reference['refuri'] = self.fix_fragment(m.group(1), m.group(2))\n if 'refid' in reference:\n reference['refid'] = self.fix_fragment('', reference['refid'])\n\n for target in tree.findall(nodes.target):\n update_node_id(target)\n\n next_node: Node = target.next_node(ascend=True)\n if isinstance(next_node, nodes.Element):\n update_node_id(next_node)\n\n for desc_signature in tree.findall(addnodes.desc_signature):\n update_node_id(desc_signature)", "def test_normal_fields_can_be_defined_between_primary_keys(self):", "def node_mapping(self):\n ...", "def visit_Declaration(self, node):\n # This creates a function from the generated code ops then\n # wraps that function in an EnamlDeclaration.\n func_code = DeclarationCompiler.compile(node, self.filename)\n name = node.name\n self.code_ops.extend([\n (LOAD_CONST, func_code),\n (MAKE_FUNCTION, 0),\n (STORE_NAME, name),\n (LOAD_NAME, 'EnamlDeclaration'),\n (LOAD_NAME, name),\n (CALL_FUNCTION, 0x0001),\n (STORE_NAME, name),\n ])", "def visit_Declaration(self, node):\n self.block = node.name\n obj = {\n 'enamldef': True,\n 'type': node.name,\n 'base': node.base,\n 'doc': node.doc,\n 'lineno': node.lineno,\n 'identifier': node.identifier,\n 'filename': self.filename,\n 'block': self.block,\n 'children': [],\n 'bindings': [],\n }\n self.stack.append(obj)\n for item in node.body:\n self.visit(item)", "def declaration(self) -> global___Statement.Declaration:", "def repeated_elements(self) -> global___Statement.Declaration:", "def primary(self):\n return Seq(''.join([r.aa for r in self.residues]), protein_alphabet)", "def visit_nodes(self, node, initial):\n #L.debug(\"visit_nodes %s %s\", node, initial)\n mapping = dict(initial)\n for visit_node in reversed(node.precedes()):\n sources = tuple(mapping[s] for s in visit_node.sources)\n mapping[visit_node] = self.visit_node(visit_node, sources)\n return mapping[node]", "def compile_scad(object):\n\t\n\t# Regardless of the type of seleectors used, select all parts that are \"truthy\"\n\tselection = Object.compose(lambda x: bool(x), object).get_selection(True)\n\t\n\tif selection.inverted:\n\t\traise Exception('The top-level node is inverted.')\n\t\n\tif selection.void:\n\t\traise Exception('The top-level node is void.')\n\t\n\troot_node = selection.node\n\tnodes_set = set() # All nodes in the project\n\tnodes_list = [] # All nodes ordered by their appearence. Nodes that depend on other nodes come before their dependees in this list.\n\treused_nodes = set() # Nodes that are referenced by other nodes more than once.\n\t\n\tdef walk_nodes(node):\n\t\tif node in nodes_set:\n\t\t\treused_nodes.add(node)\n\t\telse:\n\t\t\tnodes_set.add(node)\n\t\t\t\n\t\t\tfor i in node.child_nodes:\n\t\t\t\twalk_nodes(i)\n\t\t\t\n\t\t\tnodes_list.append(node)\n\t\n\twalk_nodes(root_node)\n\t\n\tnode_replacements = { } # by actual node for all noces\n\tmodules = [] # Modules for all nodes that are referenced more than once.\n\t\n\t# Iterate in serversed order so that nodes will be written to the file before their dependees.\n\tfor i in reversed(nodes_list):\n\t\tif i in reused_nodes:\n\t\t\tmodule = _Module('node_{}'.format(len(modules) + 1), i)\n\t\t\t\n\t\t\tmodules.append(module)\n\t\t\treplacement_node = module.reference\n\t\telse:\n\t\t\treplacement_node = i\n\t\t\n\t\tnode_replacements[i] = replacement_node\n\t\n\tyield from root_node.iter_lines(node_replacements)\n\t\n\tfor i in modules:\n\t\tyield '' # An empty line before each module definition\n\t\tyield from i.iter_lines(node_replacements)", "def _get_primary(self, elements):\n if not elements:\n return None\n verified = [x for x in elements if x.is_verified is True]\n\n if len(verified) == 0:\n if len([e for e in elements if e.is_primary]) > 0:\n raise PrimaryElementViolation('There are unconfirmed primary elements')\n return None\n\n res = [x for x in verified if x.is_primary is True]\n if len(res) != 1:\n raise PrimaryElementViolation(\n \"{!s} contains {!s}/{!s} primary elements\".format(self.__class__.__name__, len(res), len(elements))\n )\n return res[0]", "def visit_Node(self, node):\n pass", "def map(self) -> global___Statement.Declaration:", "def _validate_enamldef(self, node, lexer):\n ident_names = set()\n\n def check_id(name, node):\n if name in ident_names:\n msg = \"redeclaration of identifier '%s'\"\n msg += \" (this will be an error in Enaml version 1.0)\"\n syntax_warning(msg % name, FakeToken(lexer, node.lineno))\n ident_names.add(name)\n\n # validate the identifiers\n ChildDef = enaml_ast.ChildDef\n TemplateInst = enaml_ast.TemplateInst\n stack = list(reversed(node.body))\n while stack:\n node = stack.pop()\n if isinstance(node, ChildDef):\n if node.identifier:\n check_id(node.identifier, node)\n stack.extend(reversed(node.body))\n elif isinstance(node, TemplateInst):\n idents = node.identifiers\n if idents is not None:\n for name in idents.names:\n check_id(name, idents)\n if idents.starname:\n check_id(idents.starname, idents)", "def transform(\n self, node: ast.AST, nextSyntaxId=0\n ) -> Tuple[ast.AST, List[ast.AST], int]:\n self.nextSyntaxId = nextSyntaxId\n wrapped = self.visit(ast.fix_missing_locations(node))\n if self.is_proposition_factory(wrapped):\n return wrapped, self.nextSyntaxId\n newNode = self._create_atomic_proposition_factory(node)\n return newNode, self.nextSyntaxId", "def _check_primary(self, old_list):\n try:\n self._get_primary(self._elements)\n except PrimaryElementViolation:\n self._elements = copy.copy(old_list)\n raise", "def __init__(self, node, declare):\n preproc.__init__(self, node, declare, \"hashElif\", \"#elif\")\n self.string = \" \"+self.test", "def preprocessNode(self):\n pass", "def pg_secondary_keys(self):", "def test_insert_node_multiple_content_1():\n first = 0\n second = 1\n third = 3\n chain = N.Node(second, N.Node(third))\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at start)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"", "def test_insert_node_multiple_content_2():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(third))\n node = N.Node(second)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at mid)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"", "def transform_all(self, node):\n # don't traverse, only handle field lists that are immediate children\n summary = []\n data = {}\n name, uid = _get_desc_data(node.parent)\n for child in node:\n if isinstance(child, remarks):\n remarks_string = transform_node(child)\n data['remarks'] = remarks_string\n elif isinstance(child, addnodes.desc):\n if child.get('desctype') == 'attribute':\n attribute_map = {} # Used for detecting duplicated attributes in intermediate data and merge them\n\n for item in child:\n if isinstance(item, desc_signature) and any(isinstance(n, addnodes.desc_annotation) for n in item):\n # capture attributes data and cache it\n data.setdefault('added_attribute', [])\n\n item_ids = item.get('ids', [''])\n\n if len(item_ids) == 0: # find a node with no 'ids' attribute\n curuid = item.get('module', '') + '.' + item.get('fullname', '')\n # generate its uid by module and fullname\n else:\n curuid = item_ids[0]\n\n if len(curuid) > 0:\n parent = curuid[:curuid.rfind('.')]\n name = item.children[0].astext()\n\n if curuid in attribute_map:\n if len(item_ids) == 0: # ensure the order of docstring attributes and real attributes is fixed\n attribute_map[curuid]['syntax']['content'] += (' ' + item.astext())\n # concat the description of duplicated nodes\n else:\n attribute_map[curuid]['syntax']['content'] = item.astext() + ' ' + attribute_map[curuid]['syntax']['content']\n else:\n if _is_desc_of_enum_class(node):\n addedData = {\n 'uid': curuid,\n 'id': name,\n 'parent': parent,\n 'langs': ['python'],\n 'name': name,\n 'fullName': curuid,\n 'type': item.parent.get('desctype'),\n 'module': item.get('module'),\n 'syntax': {\n 'content': item.astext(),\n 'return': {\n 'type': [parent]\n }\n }\n }\n else:\n addedData = {\n 'uid': curuid,\n 'class': parent,\n 'langs': ['python'],\n 'name': name,\n 'fullName': curuid,\n 'type': 'attribute',\n 'module': item.get('module'),\n 'syntax': {\n 'content': item.astext()\n }\n }\n\n attribute_map[curuid] = addedData\n else:\n raise Exception('ids of node: ' + repr(item) + ' is missing.')\n # no ids and no duplicate or uid can not be generated.\n if 'added_attribute' in data:\n data['added_attribute'].extend(attribute_map.values()) # Add attributes data to a temp list\n\n # Don't recurse into child nodes\n continue\n elif isinstance(child, nodes.field_list):\n (entries, types) = _hacked_transform(self.typemap, child)\n _data = get_data_structure(entries, types, child)\n data.update(_data)\n elif isinstance(child, addnodes.seealso):\n data['seealso'] = transform_node(child)\n elif isinstance(child, nodes.admonition) and 'Example' in child[0].astext():\n # Remove the admonition node\n child_copy = child.deepcopy()\n child_copy.pop(0)\n data['example'] = transform_node(child_copy)\n else:\n content = transform_node(child)\n\n # skip 'Bases' in summary\n if not content.startswith('Bases: '):\n summary.append(content)\n\n if \"desctype\" in node.parent and node.parent[\"desctype\"] == 'class':\n data.pop('exceptions', '') # Make sure class doesn't have 'exceptions' field.\n\n if summary:\n data['summary'] = '\\n'.join(summary)\n # Don't include empty data\n for key, val in data.copy().items():\n if not val:\n del data[key]\n data['type'] = PatchedDocFieldTransformer.type_mapping(node.parent[\"desctype\"]) if \"desctype\" in node.parent else 'unknown'\n self.directive.env.docfx_info_field_data[uid] = data\n super(PatchedDocFieldTransformer, self).transform_all(node)", "def tc_advice_id(self, dpid, tc_type, tc_subtype, src_mac, detail1):\n switch = self.switches[dpid]\n #*** TBD, deal with context:\n context = self.context_default\n #*** Look up source mac to get a port number:\n port_number = switch.mactable.mac2port(src_mac, context)\n\n #*** TBD, handle return value for port not found...\n\n if tc_subtype == 'lldp':\n #*** Check to see if we already know this identity:\n db_data = {'id_type': tc_subtype,\n 'src_mac': src_mac, 'node_name': detail1}\n db_result = self.dbidnode.find_one(db_data)\n if not db_result:\n #*** LLDP identity not in database so add it:\n db_data = {'last_seen': time.time(), 'id_type': tc_subtype,\n 'src_mac': src_mac, 'node_name': detail1}\n db_result = self.dbidnode.insert_one(db_data)\n self.logger.info(\"Created new ID Node record id_type=%s \"\n \"node_name=%s\", tc_subtype, detail1)\n #*** Check to see if we need to add a flow to switch:\n switch.flowtables.add_fe_tc_id(tc_subtype, detail1, src_mac,\n self.main_policy.optimised_rules.get_rules())\n else:\n #*** Just update the last_seen field:\n db_result = self.dbdpae.update_one(\n {'id_type': tc_subtype,\n 'src_mac': src_mac, 'node_name': detail1},\n {\n '$set': {\n 'last_seen': time.time()\n },\n }\n )\n self.logger.debug(\"Last seen updated for %s of %s ID Node \"\n \"record(s) id_type=%s node_name=%s\",\n db_result.modified_count,\n db_result.matched_count,\n tc_subtype, detail1)\n else:\n self.logger.info(\"Didn't action tc_subtype=%s\", tc_subtype)", "def sym_nodes(self):\n def primary():\n n = self.copy()\n n.name = '{}_p'.format(self.name)\n return n\n\n def x_sym():\n n = self.copy()\n n.name = '{}_x'.format(self.name)\n n[1] *= -1\n return n\n\n def y_sym():\n n = self.copy()\n n.name = '{}_y'.format(self.name)\n n[0] *= -1\n return n\n\n def xy_sym():\n n = self.copy()\n n.name = '{}_xy'.format(self.name)\n n[:2] *= -1\n return n\n\n if self.symmetry is None:\n return primary(),\n\n elif self.symmetry == 'x':\n return primary(), x_sym()\n\n elif self.symmetry == 'y':\n return primary(), y_sym()\n\n elif self.symmetry == 'xy':\n return primary(), x_sym(), y_sym(), xy_sym()", "def test_insert_node_singleton_content_3():\n first = 0\n second = 1\n chain = N.Node(first)\n node = N.Node(second)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def do_xref(self, node):\n id = node.attributes['id'].value\n self.parse(self.randomChildElement(self.refs[id]))", "def cross_ref_hint():\n relations_dict = parse_hint(hint_csv_path)\n uniprot_ref_dict = get_uniprots_for_hint()\n\n processed_nodes = set()\n actual_cross_links = 0\n breakpoints = 300\n size = len(relations_dict)\n\n log.info('Starting inserting HINT for %s primary nodes' % size)\n\n for i, (legacyId, linked_legacyIds) in enumerate(relations_dict.items()):\n\n if i % breakpoints:\n # TODO: [progress bar]\n log.info('\\t %.2f %%' % (float(i) / float(size) * 100))\n\n if legacyId in list(uniprot_ref_dict.keys()):\n for linked_legacyId in linked_legacyIds:\n if linked_legacyId in list(uniprot_ref_dict.keys()):\n actual_cross_links += 1\n\n DatabaseGraph.link(uniprot_ref_dict[legacyId], uniprot_ref_dict[linked_legacyId],\n 'is_interacting',\n {'source': 'HINT',\n 'parse_type': 'physical_entity_molecular_interaction'})\n\n\n log.info('HINT Cross-links: %s, HINT processed nodes: %s',\n actual_cross_links, len(processed_nodes))", "def test_strict_order_valid_code(f2003_create):\n subclasses = [F2003.Specification_Part, F2003.Execution_Part]\n reader = get_reader(\n \"\"\"\n program main\n integer :: i\n real :: rho\n i = 2\n rho = i * 3.14\n end program main\n \"\"\"\n )\n\n expected = remove_indentation(\n \"\"\"([\n Program_Stmt('PROGRAM', Name('main')),\n Specification_Part(\n Type_Declaration_Stmt(\n Intrinsic_Type_Spec('INTEGER', None), None,\n Entity_Decl_List(\n ',',\n (Entity_Decl(Name('i'), None, None, None),))),\n Type_Declaration_Stmt(\n Intrinsic_Type_Spec('REAL', None), None,\n Entity_Decl_List(\n ',',\n (Entity_Decl(Name('rho'), None, None, None),)))),\n Execution_Part(\n Assignment_Stmt(Name('i'), '=', Int_Literal_Constant('2', None)),\n Assignment_Stmt(\n Name('rho'), '=', Add_Operand(Name('i'), '*',\n Real_Literal_Constant('3.14', None)))),\n End_Program_Stmt('PROGRAM', Name('main'))\n ],)\n \"\"\"\n )\n result = BlockBase.match(\n F2003.Program_Stmt,\n subclasses,\n F2003.End_Program_Stmt,\n reader,\n strict_order=True,\n )\n\n assert str(result) == expected", "def primary(self, key):\n match = self.find(key)\n\n if not match:\n raise UserDBValueError(\"Element not found in list, can't set as primary\")\n\n if not match.is_verified:\n raise PrimaryElementViolation(\"Primary element must be verified\")\n\n # Go through the whole list. Mark element as primary and all other as *not* primary.\n for this in self._elements:\n this.is_primary = bool(this.key == key)", "def test_insert_node_singleton_content_2():\n first = 0\n second = first\n chain = N.Node(second)\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def main(rules, antecedent_prefix, consequent_prefix, deltas_prefix):\n _main(rules, antecedent_prefix, consequent_prefix, deltas_prefix)", "def setReferencePrimary(self, reference: ghidra.program.model.symbol.Reference, primary: bool) -> None:\n ...", "def assignCladeToLin(assignment_old_d, heiarchy_old_d, clade_old_s):\n\n\n\treassignCladeNames ={}\n\tclade_s = set()\n\tassignment_d = {}\n\theiarchy_d = {}\n\t\n\tfor clade in clade_old_s:\n\t\ttipLinForClade = []\n\t\tfor node in assignment_old_d:\n\t\t\tif 'NODE' not in node:\n\t\t\t\tif assignment_old_d[node] == clade:\n\t\t\t\t\t#print(node)\n\t\t\t\t\ttipLinForClade = tipLinForClade + [node.split(\"_\")[-3]]\n\t\tif clade != 'anc':\n\t\t\ttry:\n\t\t\t\tlineage = mode(tipLinForClade)\n\t\t\texcept ValueError:\n\t\t\t\tlineage = \"2lin\"\n\t\t\tnewClade = lineage + \"_\" + clade\n\t\telse:\n\t\t\tnewClade = clade\n\n\t\t\n\t\treassignCladeNames[clade] = newClade\n\t\tclade_s.add(newClade)\n\n\tfor child in heiarchy_old_d:\n\t\tparent = heiarchy_old_d[child]\n\t\tif parent != 'NA':\n\n\t\t\theiarchy_d[reassignCladeNames[child]] = reassignCladeNames[parent]\n\t\telse:\n\t\t\theiarchy_d[reassignCladeNames[child]] = parent\n\tfor samp in assignment_old_d:\n\t\tassignment_d[samp] = reassignCladeNames[assignment_old_d[samp]]\n\n\treturn (assignment_d, heiarchy_d, clade_s)", "def test_sort_chain_multiple_reuse():\n data = [-10, 42, 8, 64, -6, 76, 48, 8, -30, 1, 11, 92, 37, 4]\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n id_record = {}\n walker = chain\n while walker is not None:\n id_record[id(walker)] = walker.data\n walker = walker.next\n\n result = A8.sort_chain(chain)\n\n walker = result\n while walker is not None:\n assert id(walker) in id_record, \"sort_chain created new node\"\n assert id_record[id(walker)] == walker.data, \"sort_chain moved data value {} to new node\".format(walker.data)\n walker = walker.next", "def _traverse_1_0_1(item, nodes):\n if 'content' in item.keys():\n ids = []\n for node in item['content']:\n nodes[node['id']] = node\n ids.append(node['id'])\n _traverse_1_0_1(node, nodes)\n item['content'] = ids", "def generate_and_add_key_from_rule(self, root):\n if root.children:\n childrens = self.get_children(root)\n all_child_label = \" \".join(childrens)\n #print s\n\n if root.label in self.unique_LHS_count:\n self.unique_LHS_count[root.label] += 1\n else:\n self.unique_LHS_count[root.label] = 1\n\n key = root.label+'-->'+all_child_label\n\n if key in self.unique_rules_and_their_count_dict:\n self.unique_rules_and_their_count_dict[key] += 1\n else:\n self.unique_rules_and_their_count_dict[key] = 1\n\n for each_child in root.children:\n self.generate_and_add_key_from_rule(each_child)\n else:\n return", "def m_create_test_identities():\n\n # Get the ROOT account (it was created in the deployment of the Smart Contracts)\n ROOT_address, ROOT_key = wallet.account_from_name(\"ROOT\", \"ThePassword\")\n\n # Create the Alastria account for node \"ala\"\n print(f\"\\n==> Creating the Alastria account\")\n Alastria_account = wallet.new_account(\n \"Alastria\", \"ThePassword\")\n alakey = Alastria_account.key\n print(f\"Alastria key: {alakey}\")\n\n print(f\"Done\")\n\n # Set the subnode \"ala\"\n print(f\"\\n==> Creating the ala subnode in the Trust Framework\")\n success, _, _ = ens.setSubnodeOwner(\n node_name=\"root\",\n label=\"ala\",\n new_owner_address=Alastria_account.address,\n current_owner_key=ROOT_key\n )\n print(f\"ala subnode created\")\n\n # Assign the name for reverse resolution\n resolver.setName(\"ala\", \"ala\", Alastria_account.key)\n\n # And assign approval to the PublicResolver contract so it can call ENS methods on behalf of Alastria\n print(f\"Resolver address for ROOT: {resolver.address()}\")\n ens.setApprovalForAll(resolver.address(), True, Alastria_account.key)\n\n ################################\n # Heathrow airport\n print(f\"\\n==> Creating the Heathrow identity\")\n\n DID = \"did:elsi:VATGB-927365404\"\n domain_name = \"heathrow.ala\"\n website = \"www.heathrow.com\"\n commercial_name = \"Heathrow Airport Limited\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # AENA\n print(f\"\\n==> Creating the AENA identity\")\n\n DID = \"did:elsi:VATES-A86212420\"\n domain_name = \"aena.ala\"\n website = \"www.aena.es\"\n commercial_name = \"Aena\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # Lanzarote airport\n # The airport belongs to AENA and does not have independent entity (shares the same VAT, for example)\n # In production, the node should be created by AENA, as a subnode controlled by them.\n # In this PoC, the node is created automatically to facilitate the tests\n print(f\"\\n==> Creating the César Manrique airport identity\")\n\n DID = \"did:elsi:VATES-A86212420-1\"\n domain_name = \"ace.ala\"\n website = \"www.aena.es/es/aeropuerto-lanzarote\"\n commercial_name = \"Aeropuerto de Lanzarote-Cesar Manrique\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # Metrovacesa\n print(f\"\\n==> Creating the Metrovacesa identity\")\n\n DID = \"did:elsi:VATES-A87471264\"\n domain_name = \"metrovacesa.ala\"\n website = \"metrovacesa.com\"\n commercial_name = \"Metrovacesa\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # IN2\n print(f\"\\n==> Creating the IN2 identity\")\n\n DID = \"did:elsi:VATES-B60645900\"\n domain_name = \"in2.ala\"\n website = \"www.in2.es\"\n commercial_name = \"IN2 Innovating 2gether\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # Perfect Health\n print(f\"\\n==> Creating the Perfect Health identity\")\n\n DID = \"did:elsi:VATES-X12345678X\"\n domain_name = \"perfecthealth.ala\"\n website = \"www.perfecthealth.org\"\n commercial_name = \"Perfect Health plc\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # BME\n print(f\"\\n==> Creating the BME identity\")\n\n DID = \"did:elsi:VATES-A83246314\"\n domain_name = \"bme.ala\"\n website = \"www.bolsasymercados.es\"\n commercial_name = \"Bolsas y Mercados Españoles\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)", "def _change_xpub_mappings_primary_key(write_cursor: 'DBCursor', conn: 'DBConnection') -> None:\n log.debug('Enter _change_xpub_mappings_primary_key')\n with conn.read_ctx() as read_cursor:\n xpub_mappings = read_cursor.execute('SELECT * from xpub_mappings').fetchall()\n write_cursor.execute(\"\"\"CREATE TABLE xpub_mappings_copy (\n address TEXT NOT NULL,\n xpub TEXT NOT NULL,\n derivation_path TEXT NOT NULL,\n account_index INTEGER,\n derived_index INTEGER,\n blockchain TEXT NOT NULL,\n FOREIGN KEY(blockchain, address)\n REFERENCES blockchain_accounts(blockchain, account) ON DELETE CASCADE\n FOREIGN KEY(xpub, derivation_path, blockchain) REFERENCES xpubs(\n xpub,\n derivation_path,\n blockchain\n ) ON DELETE CASCADE\n PRIMARY KEY (address, xpub, derivation_path, blockchain)\n );\n \"\"\")\n write_cursor.executemany('INSERT INTO xpub_mappings_copy VALUES (?, ?, ?, ?, ?, ?)', xpub_mappings) # noqa: E501\n write_cursor.execute('DROP TABLE xpub_mappings')\n write_cursor.execute('ALTER TABLE xpub_mappings_copy RENAME TO xpub_mappings')\n log.debug('Exit _change_xpub_mappings_primary_key')", "def simple_linkage(x):\n \n nodes = []\n edges = []\n for i in range(len(x)):\n node_attr ={\"lvl\":x[i]}\n nodes.append((i, node_attr))\n edges.append((i,i+1,{'weight':1}))\n edges.pop()\n \n g =nx.Graph()\n g.add_nodes_from(nodes) \n g.add_edges_from(edges) \n return g", "def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))", "def test_insert_node_singleton_content_1():\n first = 0\n second = 1\n chain = N.Node(second)\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def associate_node_id(tr, node=\"\"):\n return {\"id\": tr.get_uml_id(name=node)}", "def test_insert_node_multiple_content_3():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(second))\n node = N.Node(third)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at end)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"", "def check_standalone_declaration(self, return_type, scope):\n index = self.token_index\n token = self.get_specific_token(index)\n if token:\n if token.token_type == u'T_ID' or\\\n token.token_type == u'T_RESERVED_WORD':\n self.log_message(token)\n identifier_token = token\n index = self.token_index = index + 1\n token = self.get_specific_token(index)\n if token:\n if token.token_type == u'T_ASSIGN':\n self.add_to_symbols_table(identifier_token,\n return_type, scope)\n right_side_declaration =\\\n self.check_right_side_declaration(\n return_type, scope, identifier_token)\n if right_side_declaration.place:\n more_declarations =\\\n self.check_more_declarations(return_type,\n scope)\n token = self.get_specific_token(self.token_index)\n if token:\n if token.token_type == u'T_SEMICOLON':\n self.log_message(token)\n self.token_index += 1\n standalone_declaration = Production()\n standalone_declaration.append_code(\n right_side_declaration.code)\n return standalone_declaration\n self.set_syntactic_error(u'T_SEMICOLON', token)\n self.set_eof_error(u'T_SEMICOLON')\n standalone_declaration = Production()\n return standalone_declaration\n elif token.token_type == u'T_COMMA':\n self.add_to_symbols_table(identifier_token,\n return_type, scope)\n self.check_more_declarations(return_type, scope)\n token = self.get_specific_token(self.token_index)\n if token.token_type == u'T_SEMICOLON':\n self.log_message(token)\n self.token_index += 1\n standalone_declaration = Production()\n return standalone_declaration\n # Commented, check if there are any consequences\n # return True\n self.set_syntactic_error(u'T_SEMICOLON', token)\n elif token.token_type == u'T_SEMICOLON':\n self.add_to_symbols_table(identifier_token,\n return_type, scope)\n self.log_message(token)\n self.token_index += 1\n standalone_declaration = Production()\n return standalone_declaration\n self.set_syntactic_error(u'T_ASSIGN or T_COMMA', token)\n self.set_eof_error(u'T_ASSIGN or T_COMMA')\n self.set_syntactic_error(u'T_ID or T_RESERVED_WORD', token)\n # If no token is found, it's likely that there are no more declarations\n return False", "def add_sister_prefixes_helper(a, ephrases, enode, i):\n\n j = i+enode.length\n if logger.level >= 3:\n logger.write(\"(i,j) = %s\\n\" % ((i,j),))\n x = enode.label\n j1 = i\n for ci in range(len(enode.children)):\n child = enode.children[ci]\n j1 += child.length\n if logger.level >= 3:\n logger.write(\"(i,j1) = %s\\n\" % ((i,j1),))\n if j1 < j and (i,j1) in ephrases:\n\n # constprefix3:\n #x1 = sym.fromtag(\"%s*\" % x)\n\n # subcat-lr2:\n #subcat = [sister.label for sister in enode.children[ci+1:] if sister.required]\n #x1 = sym.fromtag(\"/\".join([\"%s*\"%x]+subcat))\n \n # markov1:\n x1 = sym.fromtag(\"%s/%s\" % (x, enode.children[ci+1].label))\n\n # markov2:\n #x1 = sym.fromtag(\"%s(%s)\" % (x, enode.children[ci].label))\n \n a.espans.setdefault((i,j1),[]).append(x1)\n prefix_labels.add(x1)\n \n for child in enode.children:\n add_sister_prefixes_helper(a, ephrases, child, i)\n i += child.length", "def add_more_relaxed_candidates(self, a_list):\n for a in a_list:\n try:\n a.info['key_value_pairs']['raw_score']\n except KeyError:\n print(\"raw_score not put in atoms.info['key_value_pairs']\")\n\n g = self.get_generation_number()\n\n # Insert gaid by getting the next available id and assuming that the\n # entire a_list will be written without interuption\n next_id = self.get_next_id()\n with self.c as con:\n for j, a in enumerate(a_list):\n if 'generation' not in a.info['key_value_pairs']:\n a.info['key_value_pairs']['generation'] = g\n\n gaid = next_id + j\n relax_id = con.write(a, relaxed=1, gaid=gaid,\n key_value_pairs=a.info['key_value_pairs'],\n data=a.info['data'])\n assert gaid == relax_id\n a.info['confid'] = relax_id\n a.info['relax_id'] = relax_id", "def _init_on_load(self):\n nfc = self.node_from_code\n ntc = self.node_to_code\n par_num = self.parallel_num\n idx = (nfc, ntc, par_num)\n if not idx in self.lst['key'].keys():\n self.lst['key'][idx] = self\n self.lst['nodes'].setdefault((nfc, ntc), []).append(self)\n # self.lst.setdefault((ntc, nfc), []).append(self)\n # self.lst.setdefault(nfc, []).append(self)\n # self.lst.setdefault(ntc, []).append(self)\n else:\n raise Exception('tried to add same line %i - %i n_par = %i twice!'\n % idx)\n\n # if nfc in self.lines_index.keys():\n # if ntc in self.lines_index[nfc].keys():\n # if par_num in self.lines_index[nfc][ntc].keys():\n # raise Exception('tried to add same line %i - %i n_par = %i twice!'\n # % (nfc, ntc, par_num))\n # else:\n # self.lines_index[nfc][ntc][par_num] = self._id\n # else:\n # self.lines_index[nfc][ntc] = {par_num: self._id}\n # else:\n # self.lines_index[nfc] = {ntc: {par_num: self._id}}\n # self.group_line_div = {}\n # self.group_line_flipped = {}", "def primary(self):\n if self.currtok[1].name == \"IDENT\":\n tmp = self.currtok\n if self.functions.get(tmp[0]) is not \"first_call\" and self.functions.get(tmp[0]) is not None:\n func = self.FunctionCall()\n return func\n\n elif self.ids.get(tmp[0]) is not None or self.functions.get(tmp[0]) is \"first_call\":\n self.currtok = next(self.tg)\n return IDExpr(tmp[0])\n else:\n raise SLUCSyntaxError(\n \"ERROR: Given ID {0} was not declared above on line {1}\".format(tmp[0], str(self.currtok[2] - 1)))\n\n if self.currtok[1].name == \"INTLIT\":\n tmp = self.currtok\n self.currtok = next(self.tg)\n return IntLitExpr(tmp[0])\n\n if self.currtok[1].name == \"BOOL\":\n tmp = self.currtok[0]\n self.currtok = next(self.tg)\n return BoolExpr(tmp[0])\n\n if self.currtok[1].name == \"REAL\":\n tmp = self.currtok\n self.currtok = next(self.tg)\n return Real_Expr(tmp[0])\n\n if self.currtok[1].name == \"STRING_LIT\":\n tmp = self.currtok\n self.currtok = next(self.tg)\n return String_LitExpr(tmp[0])\n\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n tree = self.Expression(True)\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n return tree\n else:\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Unexpected token {0} on line {1}\".\n format(self.currtok[1], str(self.currtok[2] - 1)))", "def makePrimaryType(tagname, keyword, G, extraAttr=None):\n method = G(\"%sMethod\" % keyword)\n comment = G(\"%sComment\" % keyword)\n refs = G(keyword + 'Ref') # Sources\n\n string = \"\\n<%s\" % tagname\n if method:\n string += ' methodRef=\"M%s-%s\"' % (NODEID, method)\n if extraAttr:\n for k, v in extraAttr.items():\n string += ' %s=\"%s\"'% (k, G(v))\n string += '>'\n if comment:\n string += '<Comments>%s</Comments>' % quoteattr('%s' % comment)[1:-1]\n string += makeSourceRefs(refs)\n\n return string", "def _add_acl_sequence_numbers(self):\n\n ipv4_acl_sw = 'ip access-list'\n # ipv6_acl_sw = ('ipv6 access-list')\n if self.host.os in ['ios']:\n acl_line_sw = ('permit', 'deny')\n else:\n acl_line_sw = ('permit', 'deny', 'remark')\n for child in self.children:\n if child.text.startswith(ipv4_acl_sw):\n sn = 10\n for sub_child in child.children:\n if sub_child.text.startswith(acl_line_sw):\n sub_child.text = \"{} {}\".format(sn, sub_child.text)\n sn += 10\n\n return self", "def update_node_id(node: Element) -> None:\n new_ids: list[str] = []\n for node_id in node['ids']:\n new_id = self.fix_fragment('', node_id)\n if new_id not in new_ids:\n new_ids.append(new_id)\n node['ids'] = new_ids", "def _generate_type(self, n, modifiers=[], emit_declname = True):\n\t\ttyp = type(n)\n\n\t\t#~ print(n, modifiers)\n\n\t\tif typ == pycparser.c_ast.TypeDecl:\n\t\t\ts = ''\n\t\t\tif n.quals: s += ' '.join(n.quals) + ' '\n\t\t\ts += self.visit(n.type)\n\n\t\t\t# Local variables & parameter renaming.\n\t\t\t#\n\t\t\t# Variable name substitution only applies to local variables or parameters names within function prototypes\n\t\t\t# (thus, global variables and function names need to be excluded)\n\t\t\t#\n\t\t\t# case 1: level-0 function parameters (no remanimg for nested parameters)\n\t\t\t# case 2: local variable declaration (thus excluding functions, global vars, struct-enum-union fields, nested parameters)\n\t\t\t#\n\t\t\tif self.__visitingParam == 1: # case 1\n\t\t\t\tif self.__debug: print(\"SETTING NEWID for [%s,%s] (case I)\") % (self.__currentFunction,n.declname)\n\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.paramprefix + self.__currentFunction + '_'+self.inlineInfix #S:\n\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.paramprefix + self.__currentFunction + '_'+self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\telse: \n\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.paramprefix + self.__currentFunction + '_'+self.inlineInfix,self.__visitingCompound)]\n\t\t\t\tn.declname = (self.paramprefix + self.__currentFunction + '_' + self.inlineInfix + n.declname) if n.declname else '' #S:\n\t\t\t\n\t\t\telif (self.__visitingParam == 0 and # case 2\n\t\t\t\t\tself.__visitFuncDef == 0 and\n\t\t\t\t\tn.declname not in self.Parser.funcName and\n\t\t\t\t\t#n.declname not in self.Parser.varNames[''] and\n\t\t\t\t\tself.__currentFunction != '' and\n\t\t\t\t\tself.__visitStructUnionEnum == 0):\n\t\t\t\tif self.__debug: print(\"SETTING NEWID for [%s,%s] (case II)\") % (self.__currentFunction,n.declname)\n\t\t\t\t#S: env.local, the followin two lines are replaced with the following if\n\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.prefix + self.__currentFunction + '_'\n\t\t\t\t#n.declname = self.prefix + self.__currentFunction + '_' + n.declname if n.declname else ''\n\t\t\t\tif self.__init: \n\t\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.prefix + self.__currentFunction + '_' +self.inlineInfix #S:\n\t\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.prefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\t\telse: \n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.prefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)]\n\t\t\t\t\tn.declname = self.prefix + self.__currentFunction + '_' + self.inlineInfix + n.declname if n.declname else '' #S:\n\t\t\t\telse:\n\t\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix #S:\n\t\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)]\n\t\t\t\t\tn.declname = self.nondetprefix + self.__currentFunction + '_' + self.inlineInfix + n.declname if n.declname else '' #S:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t#print n.declname\n\t\t\t\t#print self.newIDs\n\t\n\n\t\t\tnstr = n.declname if n.declname else ''\n\n\t\t\t# Resolve modifiers.\n\t\t\t# Wrap in parens to distinguish pointer to array and pointer to\n\t\t\t# function syntax.\n\t\t\t#\n\t\t\tfor i, modifier in enumerate(modifiers):\n\t\t\t\tif isinstance(modifier, pycparser.c_ast.ArrayDecl):\n\t\t\t\t\tif (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):\n\t\t\t\t\t\tnstr = '(' + nstr + ')'\n\t\t\t\t\tnstr += '[' + self.visit(modifier.dim) + ']'\n\t\t\t\telif isinstance(modifier, pycparser.c_ast.FuncDecl):\n\t\t\t\t\tif (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):\n\t\t\t\t\t\tnstr = '(' + nstr + ')'\n\t\t\t\t\tnstr += '(' + self.visit(modifier.args) + ')'\n\t\t\t\telif isinstance(modifier, pycparser.c_ast.PtrDecl):\n\t\t\t\t\tif modifier.quals:\n\t\t\t\t\t\tnstr = '* %s %s' % (' '.join(modifier.quals), nstr)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnstr = '*' + nstr\n\t\t\tif nstr: s += ' ' + nstr\n\t\t\treturn s\n\t\telif typ == pycparser.c_ast.Decl:\n\t\t\treturn self._generate_decl(n.type)\n\t\telif typ == pycparser.c_ast.Typename:\n\t\t\treturn self._generate_type(n.type)\n\t\telif typ == pycparser.c_ast.IdentifierType:\n\t\t\treturn ' '.join(n.names) + ' '\n\t\telif typ in (pycparser.c_ast.ArrayDecl, pycparser.c_ast.PtrDecl, pycparser.c_ast.FuncDecl):\n\t\t\treturn self._generate_type(n.type, modifiers + [n])\n\t\telse:\n\t\t\treturn self.visit(n)\n\n\n\n\t\tdef visit_Compound(self, n):\n\t\t\tself.__visitingCompound += 1\n\t\t\ts = super(self.__class__, self).visit_Compound(n)\n\t\t\tfor key in self.newIDs: #S: remove pairs that have been added in this compound\n\t\t\t\tstack = self.newIDs[key] \n\t\t\t\tif stack and stack[-1][1] == self.__visitingCompound: \n\t\t\t\t\tstack.pop()\n\t\t\tself.__visitingCompound -= 1\n\t\t\treturn s", "def visitNode(node,doublevars,doublevars_modified):\n children = []\n doublevars_predefined = set()\n if hasattr(node, \"content\"):\n children = node.content\n elif hasattr(node, \"items\"):\n children = node.items\n elif type(node) in (tuple, list):\n children = node\n for child in children:\n if(type(child)==fparser.one.statements.Assignment):\n lhs = cleanVariableName(child.variable)\n # Visit an assignment statement, e.g. \"a = b + c\"\n if(lhs in doublevars):\n doublevars_modified.add(lhs)\n rhs = child.expr\n readDoubleVars = set(filter(lambda x: x in rhs, doublevars))\n doublevars_predefined = doublevars_predefined.union(readDoubleVars.difference(doublevars_modified))\n else:\n newmodified, newpredefined = visitNode(child, doublevars, doublevars_modified)\n doublevars_modified = doublevars_modified.union(newmodified)\n doublevars_predefined = doublevars_predefined.union(newpredefined)\n return doublevars_modified, doublevars_predefined", "def match_any_node_id(self, match):\n pass", "def ID3(self,data,classData,featureNames, parentMajority):\n\t\t\n\t\tnData = len(data)\n\t\tnClasses = len(classData)\n\n\t\t# base case 1: if D is empty, return the parentMajority class\n\t\tif nData==0 and nClasses==0:\n\t\t\t return parentMajority\n\n\t\t# get the number of features\n\t\tnFeatures = 0\n\t\tif nData != 0:\n\t\t\tnFeatures = len(data[0])\n\t\t\t\n\t\t# find the majority of target value\n\t\tmajority = self.majority_class(classData)\n\n\t\t# base case 2: if d is empty (no features), return the majority class\n\t\tif nFeatures == 0 :\n\t\t\treturn majority\n\n\t\t# base case 3: if all instances have the same target value, return the first target value\n\t\telif classData.count(classData[0]) == nData:\n\t\t\treturn classData[0]\n\t\t\n\t\t# general case to recursively build the tree\n\t\telse:\n\n\t\t\t# Choose the best feature based on information gain\n\t\t\tgain = np.zeros(nFeatures)\n\t\t\tfor feature in range(nFeatures):\n\t\t\t\tgain[feature] = self.info_gain(data,classData,feature)\n\t\t\tbestFeature = np.argmax(gain)\n\t\t\tbestFeatureName = featureNames[bestFeature]\n\t\t\t\n\t\t\ttree = {bestFeatureName:{}}\n\t\t\t#print \"The tree %s afer the best feature %s\" % (tree, bestFeatureName)\n\n\t\t\t# Load the bestFeature's possible values into a list\n\t\t\tvalues = []\n\t\t\tfor i in range(len(self.featureValues[bestFeatureName])):\n\t\t\t\tvalues.append(self.featureValues[bestFeatureName][i])\n\t\t\t#print \"The best feature %s values %s\" % (bestFeatureName, str(values))\n\n\t\t\t# Partition the original datapoints based on the best feature possible values\n\t\t\t# and then recursively invoke ID algorithm to build subtrees\n\t\t\tfor value in values:\n\t\t\t\tnewData = []\n\t\t\t\tnewClassData = []\n\t\t\t\tindex = 0\n\n\t\t\t\t# partition the data\n\t\t\t\tfor datapoint in data:\n\t\t\t\t\tif datapoint[bestFeature]==value:\n\t\t\t\t\t\tif bestFeature==0:\n\t\t\t\t\t\t\tnewdatapoint = datapoint[1:]\n\t\t\t\t\t\t\tnewNames = featureNames[1:]\n\t\t\t\t\t\telif bestFeature==nFeatures:\n\t\t\t\t\t\t\tnewdatapoint = datapoint[:-1]\n\t\t\t\t\t\t\tnewNames = featureNames[:-1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnewdatapoint = datapoint[:bestFeature]\n\t\t\t\t\t\t\tnewdatapoint.extend(datapoint[bestFeature+1:])\n\t\t\t\t\t\t\tnewNames = featureNames[:bestFeature]\n\t\t\t\t\t\t\tnewNames.extend(featureNames[bestFeature+1:])\n\n\t\t\t\t\t\tnewData.append(newdatapoint)\n\t\t\t\t\t\tnewClassData.append(classData[index])\n\t\t\t\t\tindex += 1\n\n\t\t\t\t# Now do recursive call to build the subtrees\n\t\t\t\tsubtree = self.ID3(newData,newClassData,newNames, majority)\n\n\t\t\t\t# Add the subtree on to the tree\n\t\t\t\t#print \"The subtree %s for the current tree %s\" % ( subtree, tree,)\n\t\t\t\ttree[bestFeatureName][value] = subtree\n\n\t\t\treturn tree", "def _mutate_expand_node_helper(node, pb_en_out_link=config.MUTPB_EN_OUT_LINK):\n var_edge = gen_random_var()\n var_node = gen_random_var()\n if random.random() < pb_en_out_link:\n new_triple = (node, var_edge, var_node)\n else:\n new_triple = (var_node, var_edge, node)\n return new_triple, var_node, var_edge", "def l1():\n head = l1 = ListNode(3)\n l1.next = ListNode(4)\n l1.next.next = ListNode(5)\n return head", "def create_identity_subnode(\n did: str,\n domain_name: str,\n website: str,\n commercial_name: str,\n new_privatekey: PrivatekeyJWK,\n parent_privatekey: PrivatekeyJWK, \n ) -> Tuple[str, DIDDocument]:\n\n # Check that node has at least two components: subnode.parent\n s = domain_name.partition(\".\")\n if len(s[1]) == 0:\n return \"Domain name has only one component\", None\n\n this_node = s[0]\n parent_node = s[2]\n\n # Obtain subnode's private and public key and Ethereum address\n subnode_account = Account.from_key(base64url_decode(new_privatekey.d))\n subnode_publicKey = base64url_decode(new_privatekey.x) + base64url_decode(new_privatekey.y)\n pb = PublicKey(subnode_publicKey)\n subnode_address = pb.to_checksum_address()\n\n # The caller account from its private key\n Manager_account = Account.from_key(base64url_decode(parent_privatekey.d))\n\n # Initialize the DIDDocument\n didDoc = DIDDocument(\n DID=did,\n node_name=parent_node,\n label=this_node,\n address=subnode_address,\n publicKey=subnode_publicKey,\n manager_account=Manager_account\n )\n\n # Add the entity info\n service = {\n \"id\": did + \"#info\",\n \"type\": \"EntityCommercialInfo\",\n \"serviceEndpoint\": website,\n \"name\": commercial_name\n }\n didDoc.addService(service)\n\n # Add the Secure Messaging Server info\n service = {\n \"id\": did + \"#sms\",\n \"type\": \"SecureMessagingService\",\n \"serviceEndpoint\": \"https://safeisland.hesusruiz.org/api\"\n }\n didDoc.addService(service)\n\n # Store the info in the blockchain trust framework\n success, tx_receipt, tx_hash = didDoc.createIdentity(ens, resolver)\n if not success:\n return \"Failed to create identity in blockchain\", None\n\n success, tx_receipt, tx_hash = ens.setApprovalForAll(resolver.address(), True, subnode_account.key)\n if not success:\n return \"Failed in setApprovalForAll\", None\n\n return None, didDoc", "def readjust_node_id(self, lowerbound = 1):\n for i in range(lowerbound, len(self.nodes)):\n if self.nodes[i]:\n self.nodes[i].node_id = i", "def add_uniprot(old_node_dict, old_to_new_node_ids_dict, new_db_api):\n\n # getting the old node id, and the old node's properties\n old_node_id = old_node_dict[\"id\"]\n old_node_alt_accession = old_node_dict[\"alt_accession\"]\n old_node_name = old_node_dict[\"name\"]\n old_aliases = old_node_dict['aliases']\n tax_id = old_node_dict[\"tax_id\"]\n pathways = old_node_dict[\"pathways\"]\n\n new_node_dict = {\n \"name\" : old_node_name,\n \"alt_accession\" : old_node_alt_accession,\n \"tax_id\" : tax_id,\n \"pathways\" : pathways,\n \"aliases\" : old_aliases,\n \"topology\" : \"\"\n }\n\n # inserting the node to the PSI-MI SQLite\n new_db_api.insert_unique_node(new_node_dict)\n new_node_dict['id'] = new_db_api.last_row_id\n\n # getting the new last row id of the inserted node\n new_node_id = new_node_dict['id']\n\n # if the node maps to more than one swissprot uniprot id it will be inserted for every swissprot id and\n # this function will be called for every insertion\n if not old_to_new_node_ids_dict.has_key(old_node_id):\n old_to_new_node_ids_dict[old_node_id] = [new_node_id]\n else:\n old_to_new_node_ids_dict[old_node_id].append(new_node_id)", "def associate_successors(graph, node=\"\"):\n return {\n \"successors\": [\n {\n \"source\": node,\n \"target\": succ,\n \"edge_attribute\": graph.succ[node][succ][\"edge_attribute\"],\n }\n for succ in graph.succ[node]\n ]\n }", "def make_set(node):\n node.parent = node\n node.rank = 0", "def test_consistent_ids(self) -> None:\n bnode = BNode()\n g0_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Golan Trevize\")),\n (bnode, RDF.type, FOAF.Person),\n }\n bnode = BNode()\n g1_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Janov Pelorat\")),\n (bnode, RDF.type, FOAF.Person),\n }\n\n g0 = Graph()\n g0 += g0_ts\n cg0 = to_canonical_graph(g0)\n cg0_ts = GraphHelper.triple_set(cg0)\n\n g1 = Graph()\n g1 += g1_ts\n cg1 = to_canonical_graph(g1)\n cg1_ts = GraphHelper.triple_set(cg1)\n\n assert cg0_ts.issubset(\n cg1_ts\n ), \"canonical triple set cg0_ts should be a subset of canonical triple set cg1_ts\"", "def eval_declaration(declaration, motif_node_dict):\n # We are only concerned with declaration type \"struct provenance\" or \"struct provenance *\"\n if (type(declaration.type).__name__ == 'PtrDecl' and type(declaration.type.type).__name__ == 'TypeDecl' and type(declaration.type.type.type).__name__ == 'Struct' and declaration.type.type.type.name == 'provenance') or (type(declaration.type).__name__ == 'TypeDecl' and type(declaration.type.type).__name__ == 'Struct' and declaration.type.type.name == 'provenance'):\n # if it is immediately assigned by a function call\n if type(declaration.init).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(declaration.init, motif_node_dict)\n if not motif_node:\n print('\\33[101m' + '[error][eval_declaration]: ' + declaration.name + ' must be associated with a MotifNode.\\033[0m')\n exit(1)\n else:\n # it should be the first time we see the name in the dictionary\n if declaration.name in motif_node_dict:\n print('\\33[101m' + '[error][eval_declaration]: ' + declaration.name + ' should not already be in the dictionary.\\033[0m') \n exit(1)\n else:\n motif_node_dict[declaration.name] = [motif_node]\n return tree_node\n # if it is set to NULL first\n elif type(declaration.init).__name__ == 'ID':\n if declaration.init.name == 'NULL':\n # it should be the first time we see the name in the dictionary\n if declaration.name in motif_node_dict:\n print('\\33[101m' + '[error][eval_declaration]: ' + declaration.name + ' is set to NULL and should not already be in the dictionary.\\033[0m') \n exit(1)\n else:\n motif_node_dict[declaration.name] = []\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n print('\\33[101m' + '[error][eval_declaration]: ' + declaration.name + ' is set to an unknown condition that is not considered yet.\\033[0m') \n exit(1)\n return None\n # if it is not set at all, then it must be set later\n elif type(declaration.init).__name__ == 'NoneType':\n if declaration.name in motif_node_dict:\n print('\\33[101m' + '[error][eval_declaration]: ' + declaration.name + ' is not set and should not already be in the dictionary.\\033[0m') \n exit(1)\n else:\n #######################################################\n # We encounter an exception here\n # TODO: WHAT CAN WE DO?\n #######################################################\n if declaration.name == 'pckprov':\n motif_node_dict[declaration.name] = [provenance.create_motif_node(declaration.name)]\n else:\n motif_node_dict[declaration.name] = []\n return None\n # it must be set through other methods, so we can only infer the type from its name\n else:\n if declaration.name in motif_node_dict:\n print('\\33[101m' + '[error][eval_declaration]: ' + declaration.name + ' is not set in an unknown way but should not already be in the dictionary.\\033[0m') \n exit(1)\n else:\n motif_node_dict[declaration.name] = [provenance.create_motif_node(declaration.name)]\n return None\n \n else:\n return None", "def create_basic_adjacency_map_1():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def test_insert_node_multiple_structure_1():\n chain = N.Node(1, N.Node(3))\n node = N.Node(0)\n\n result = A8.insert_node(node, chain)\n\n assert result is not None, \"insert_node returned empty chain given a node and chain length 2 (insert at start)\"\n assert result.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert at start)\"\n assert result.next.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert at start)\"\n assert result.next.next.next is None, \"insert_node returned badly formed chain given a node and chain length 2 (insert at start)\"", "def get_primary_id(self):", "def identifier(self):", "def test_4():\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_4).construct_hierarchy()\n asc = h.atom_selection_cache()\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n assert len(ncs_groups) == 1\n # group 1\n assert ncs_groups[0].master_iselection.all_eq(\n asc.selection(string = \"chain A\").iselection())\n g1_c = ncs_groups[0].copies\n assert len(g1_c)==1\n assert g1_c[0].iselection.all_eq(\n asc.selection(string = \"chain B\").iselection())", "def __init__(self, node, declare, type, prettyType=\"\"):\n self.name = getTag(node, \"name\")\n self.info = getTag(node, \"info\")\n self.comment = comment(node, declare)\n self.type = type\n self.prettyType = prettyType\n if prettyType == \"\":\n self.prettyType = type\n m = hash()\n m.update(self.name)\n m.update(self.info)\n m.update(self.type)\n self.link = \"a\"+m.hexdigest()", "def _first_IDAT(self, data):\n self._actl()\n self._fctl()\n self._copy()", "def get_primary_afferents_names(self):\n\t\treturn self._primaryAfferentsNames", "def primary_assignment(node_scores: nb.float32[:,:],\n group_ids: nb.int64[:] = None) -> nb.boolean[:]:\n if group_ids is None:\n return nbl.argmax(node_scores, axis=1).astype(np.bool_)\n\n primary_labels = np.zeros(len(node_scores), dtype=np.bool_)\n node_scores = nbl.softmax(node_scores, axis=1)\n for g in np.unique(group_ids):\n mask = np.where(group_ids == g)[0]\n idx = np.argmax(node_scores[mask][:,1])\n primary_labels[mask[idx]] = True\n\n return primary_labels", "def create_basic_adjacency_map_3():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\", \"Y\"],\n \"E\": [\"X\"],\n \"X\": [\"Z\"],\n \"Y\": [\"Z\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def __init__(self, node, declare):\n preproc.__init__(self, node, declare, \"hashIf\", \"#if\")\n self.string = \" \" + self.test", "def __init__(self):\n A = { 'edges': [], 'pred': None }\n self.B = { 'edges': [], 'pred': A }\n self.next_insert = self.B\n self.nodect = 2", "def inline_single_starrable(self):\n\n # Map a rule name to the phrase it should be replaced with.\n replacement = dict()\n\n # Process descendants first\n for A in reversed(self.preorder()):\n A_rule = self.rules[A].as_container()\n if len(A_rule) == 1:\n option = A_rule[0].as_container()\n if len(option) == 1:\n first = option[0]\n if first.is_symbol_name():\n first_name = first.content\n if self.rules[first_name].as_starred(first_name) is not None:\n replacement[A] = [first]\n\n # Update this rule with any scheduled replacements.\n changed_rule = False\n new_options = []\n for option in A_rule:\n changed_parts = False\n parts = []\n for x in option.as_container():\n if x.is_symbol_name() and x.content in replacement:\n parts.extend(replacement[x.content])\n changed_parts = True\n changed_rule = True\n else:\n parts.append(x)\n new_options.append(self.MakeSeq(parts) if changed_parts else option)\n if changed_rule:\n self.rules[A] = self.MakeChoice(new_options)\n\n self.remove_unused_rules()", "def test_stable_ordering(self):\n with Graph('g') as graph:\n a = ParrotNode(['a'])\n p = a | pike.merge()\n b = ParrotNode(['b'])\n graph.source | b | p\n # Make sure that b runs before a\n if graph.nodes.index(b) > graph.nodes.index(a):\n graph.nodes.remove(b)\n graph.nodes.insert(graph.nodes.index(a), b)\n ret = graph.run()\n self.assertEqual(list(ret['default']), ['a', 'b'])", "def map_to_parent_eid(self, eid):\n ...", "def test_default_simple(self):\n n = self._process(\"\"\"\nclass c1 \"C1\"\nclass c2 \"C2\"\nclass c3 \"C3\"\n\"\"\")\n c1n = find_node(n, 'c1')\n c2n = find_node(n, 'c2')\n c3n = find_node(n, 'c3')\n\n c1 = c1n.style\n c2 = c2n.style\n c3 = c3n.style\n\n # no artificial groups\n self.assertEquals([c1n, c2n, c3n], n.children)\n\n self.assertTrue(n.data.get('align') is None)\n self._check_c(MiddleEq, c1, c2)\n self._check_c(MinHDist, c1, c2)\n self._check_c(MiddleEq, c2, c3)\n self._check_c(MinHDist, c2, c3)", "def assign_simple_node_features(ndata, g, ntype, assign_id=False):\n for col in g.nodes[ntype].data.keys():\n if not assign_id and col == dgl.NID:\n continue\n induced_nodes = ndata[dgl.NID]\n ndata[col] = g.nodes[ntype].data[col][induced_nodes]", "def _usage_id_from_node(self, node, parent_id, id_generator=None):\n if id_generator is not None:\n warnings.warn(\n \"Passing an id_generator directly is deprecated \"\n \"in favor of constructing the Runtime with the id_generator\",\n DeprecationWarning,\n stacklevel=3,\n )\n\n id_generator = id_generator or self.id_generator\n\n block_type = node.tag\n # remove xblock-family from elements\n node.attrib.pop('xblock-family', None)\n # TODO: a way for this node to be a usage to an existing definition?\n def_id = id_generator.create_definition(block_type)\n usage_id = id_generator.create_usage(def_id)\n keys = ScopeIds(None, block_type, def_id, usage_id)\n block_class = self.mixologist.mix(self.load_block_type(block_type))\n # pull the asides out of the xml payload\n aside_children = []\n for child in node.iterchildren():\n # get xblock-family from node\n xblock_family = child.attrib.pop('xblock-family', None)\n if xblock_family:\n xblock_family = self._family_id_to_superclass(xblock_family)\n if issubclass(xblock_family, XBlockAside):\n aside_children.append(child)\n # now process them & remove them from the xml payload\n for child in aside_children:\n self._aside_from_xml(child, def_id, usage_id, id_generator)\n node.remove(child)\n block = block_class.parse_xml(node, self, keys, id_generator)\n block.parent = parent_id\n block.save()\n return usage_id", "def first_part_pid(self,text,pid):\n\n len_max=4\n key_list=pid.keys()\n while 1:\n num=min(len_max,len(text))\n if len_max==0:\n sys.exit('error pid dico not complete or invalid input :'+str([text[:min(3,len(text))]])+'\\\n \\n Complete proc_info.py')\n \n if text[:num].lower() in key_list:\n tag=text[:num].lower()\n text=text[num:]\n return text, pid[tag]\n else:\n len_max+=-1", "def is_primary(self):\n\n return not self.parent.non_primary", "def test_insert_node_multiple_structure_2():\n chain = N.Node(1, N.Node(3))\n node = N.Node(2)\n\n result = A8.insert_node(node, chain)\n\n assert result is not None, \"insert_node returned empty chain given a node and chain length 2 (insert between)\"\n assert result.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert between)\"\n assert result.next.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert between)\"\n assert result.next.next.next is None, \"insert_node returned badly formed chain given a node and chain length 2 (insert between)\"", "def test_insert_node_multiple_structure_3():\n chain = N.Node(1, N.Node(3))\n node = N.Node(4)\n\n result = A8.insert_node(node, chain)\n\n assert result is not None, \"insert_node returned empty chain given a node and chain length 2 (insert at end)\"\n assert result.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert at end)\"\n assert result.next.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert at end)\"\n assert result.next.next.next is None, \"insert_node returned badly formed chain given a node and chain length 2 (insert at end)\"", "def test_deep_default_interleave(self):\n # diagram:\n # -- c --\n # |c1 c2|\n # -------\n # c3 c4\n #\n n = self._process(\"\"\"\nclass c \"C\"\n class c1 \"C1\"\n class c2 \"C2\"\nclass c3 \"C3\"\nclass c4 \"C4\"\n\n:layout:\n right g1: c1 c3\n left g2: c2 c4\n\"\"\")\n c = find_style(n, 'c')\n c1 = find_style(n, 'c1')\n c2 = find_style(n, 'c2')\n c3 = find_style(n, 'c3')\n c4 = find_style(n, 'c4')\n g1 = find_style(n, 'g1')\n\n self._check_c(RightEq, c1, c3)\n self._check_c(MinVDist, c, c3)\n self._check_c(LeftEq, c2, c4)\n self._check_c(MinVDist, g1, c4)\n\n self._check_c(MiddleEq, c1, c2)\n self._check_c(MinHDist, c1, c2)", "def label_paragraphs(root_el, fastcase_data):\n # case metadata\n citations = [alphanum_lower(\" \".join((c[\"Volume\"], c[\"Reporter\"], c[\"Page\"]) + ((c[\"Suffix\"],) if \"Suffix\" in c else ()))) for c in fastcase_data['Citations']]\n name_clean = alphanum_lower(fastcase_data['PartyHeader']) if fastcase_data['PartyHeader'] else None\n court_clean = alphanum_lower(fastcase_data['CourtName'] or fastcase_data['CourtAbbreviation'])\n docket_numbers_clean = [alphanum_lower(d) for d in fastcase_data['DocketNumbers']]\n\n # via https://github.com/harvard-lil/CaselawAccessProjectSchemas/blob/master/casebodyxml/v1/casebodyxml.xsd\n states = {k:i for i, k in enumerate([None, \"citation\", \"parties\", \"docketnumber\", \"court\", \"otherdate\", \"decisiondate\", \"history\", \"syllabus\", \"attorneys\", \"judges\", \"disposition\", \"_opinionstart\", \"_preauthor\", \"author\", \"opinion\"])}\n reverse_states = {v:k for k, v in states.items()}\n\n state = 0\n header_els = []\n opinions = [[]]\n header_complete = False\n extra_els = []\n blank_els = []\n authors = []\n opinion_starts = []\n paragraph_id = 1\n\n def shift_to_opinion(i):\n \"\"\"Move i elements from the end of header to the start of opinion.\"\"\"\n if not i:\n return\n nonlocal header_els\n opinions[0][0:0] = header_els[-i:]\n header_els = header_els[:-i]\n\n def add_el(el, state, target_list=header_els):\n nonlocal blank_els, paragraph_id\n if state:\n if not reverse_states[state].startswith('_'):\n el.attrib['class'] = reverse_states[state]\n if state == states['_opinionstart']:\n opinion_starts.append((len(target_list), el))\n elif state == states['author']:\n authors.append((len(target_list), el))\n blank_els = []\n else:\n blank_els.append(el)\n el.attrib['id'] = f'p-{paragraph_id}'\n paragraph_id += 1\n target_list.append(el)\n\n def append_to_previous(line):\n PyQuery(header_els[-1]).append(PyQuery(line))\n\n for el_pq in PyQuery(root_el)('root').children().items():\n\n if extra_els:\n extra_els.append(el_pq)\n el_pq = extra_els.pop(0)\n\n el = el_pq[0]\n\n # mark the end of the labeled front matter (which may or may not align with actual end)\n if el.tag == 'header-end':\n header_complete = True\n if state == states[\"author\"]:\n state = states[\"opinion\"]\n continue\n\n # skip\n if el.text == \"COPYRIGHT MATERIAL OMITTED\":\n continue\n\n # add linebreak after element for indentation\n if not (el.tail and el.tail.startswith('\\n')):\n el.tail = '\\n' + (el.tail or '')\n\n line = inner_html(el)\n line_text = strip_tags(line)\n line_text_lower = line_text.lower()\n line_alphanum_chars = alphanum_lower(line_text)\n\n # if we've had 5 regular paragraphs in a row, assume we missed the start of the opinion\n if state < states[\"opinion\"] and len(blank_els) >= 5:\n shift_to_opinion(len(blank_els))\n state = states[\"opinion\"]\n\n # we have now reached the opinion and no longer have to process header lines\n if state >= states[\"opinion\"]:\n # check short lines for the start of a concurrence or dissent\n m = new_opinion_re.match(line_text)\n if m:\n el.attrib['class'] = 'author'\n el.attrib['opinion-type'] = opinion_type_lookup[m[1].lower()]\n opinions.append([])\n\n add_el(el, 0, opinions[-1])\n continue\n\n # citation\n if state <= states[\"citation\"]:\n if any(c in line_alphanum_chars for c in citations) or all(citation_like_re.match(s) for s in line.split('<br>')):\n state = states[\"citation\"]\n continue # don't include citation lines in output\n\n # parties\n if state < states[\"parties\"]:\n # special case -- if the case doesn't have a name, like NE2d/939/939ne2d586.xml,\n # assume that whatever comes after the last citation is the name\n if name_clean is None or line_alphanum_chars == name_clean:\n state = states[\"parties\"]\n add_el(el, state)\n elif header_els and name_clean == alphanum_lower(inner_html(header_els[-1]) + line):\n # handle edge case where name is split across two paragraphs\n append_to_previous(line)\n elif line_alphanum_chars.startswith(name_clean) or similar_strings(line_text, fastcase_data['PartyHeader']):\n # special cases -- NW2d/881/881 N.W.2d 813-4_Replace.xml, NW2d/792/792NW2d203.xml\n state = states[\"parties\"]\n add_el(el, state)\n else:\n # if we haven't found a valid name yet, paragraphs are just regular paragraphs\n add_el(el, 0)\n continue\n\n # docket numbers or court\n if state < states[\"court\"]:\n # detect 'Supreme Judicial Court of Massachusetts.' and 'United States Bankruptcy Appellate Panel of the Ninth Circuit.' as a court, but not\n # 'Court of Appeals Case No. 04A03-1707-IF-1724' or 'Consol. Court No. 16-00054'\n # line may be 'Court of Appeals of Virginia, Chesapeake.' if court is 'Court of Appeals of Virginia'\n # line may be 'North Carolina Court of Appeals.' if court is 'Court of Appeals of North Carolina'\n # if 'court' in line.lower() or 'panel' in line.lower()) and ('No.' not in line or 'Division No.' in line):\n if any(line_alphanum_chars.startswith(s) for s in docket_numbers_clean):\n state = states[\"docketnumber\"]\n elif line_alphanum_chars.startswith(court_clean) or (\n (line_text.endswith('Court of Appeals.') or any(line_text_lower.startswith(s) for s in ('court of appeal', 'supreme court')))\n ):\n state = states[\"court\"]\n else:\n state = states[\"docketnumber\"]\n add_el(el, state)\n continue\n\n # accidental start of opinion included in head matter\n # NW2d/737/737NW2d768_3New.xml -- \"On order of the Court ...\"\n if state >= states[\"decisiondate\"]:\n if line_text.startswith(\"On order of the Court\"):\n state = states[\"opinion\"]\n add_el(el, 0, opinions[-1])\n continue\n\n # dates\n # 'DATED at Olympia, Washington, this 31st day of October, 2018.'\n # '01-04-2017'\n if state <= states[\"decisiondate\"]:\n # long line isn't decision date -- SCt/134/134sct985_2.xml\n if len(line_text) < 80 and (date_re.search(line_text) or line_text_lower.startswith('dated at') or re.match(r'\\d{1,2}-\\d{2}-\\d{4}$', line_text)):\n if any(line_text.startswith(s) for s in ('Released', 'Submitted', 'Dissenting')) and 'Decided' not in line_text:\n # handle case like\n # 'Submitted June 5, 2007, at Lansing.'\n # 'Decided June 12, 2007, at 9:05 a.m.'\n # 'Released for Publication October 11, 2007\n # 'Dissenting Opinion of Chief Justice Maynard June 27, 2008.'\n # avoid\n # 'Submitted March 2, 2010.<br>Decided April 2, 2010.'\n state = states[\"otherdate\"]\n else:\n state = states[\"decisiondate\"]\n add_el(el, state)\n continue\n\n if state < states[\"judges\"]:\n # strip off judges lines appended to current line, and add as an extra_el\n # \"for Respondent.<strong>Justice BEATTY.</strong></p>\" SE2d/708/708se2d750.xml\n # \"... West Virginia Insurance Federation.<strong>DAVIS, Justice:</strong></p>\" SE2d/719/719se2d830.xml\n # \"for appellees.<strong>Present: HUMPHREYS, McCLANAHAN and BEALES, JJ.</strong><strong>BEALES, Judge.</strong>\" SE2d/708/708se2d429.xml\n while True:\n m = re.search('(.+)(<strong>([^<]+)</strong>)$', line)\n if m and is_judges_or_author(m[3]):\n extra_els.insert(0, PyQuery('<p>'+m[2]+'</p>'))\n line = m[1]\n el_pq.html(line)\n line_text = strip_tags(line)\n line_alphanum_chars = alphanum_lower(line_text)\n continue\n break\n\n # history\n # 'Appeal by defendant from judgment entered 8 December 2004 by Judge Robert H. Hobgood in Alamance County Superior Court. Heard in the Court of Appeals 2 November 2005.'\n if line_text_lower.startswith('appeal') or any(s in line_text for s in ('Superior Court', 'District Court', 'Circuit Court')):\n state = states[\"history\"]\n add_el(el, state)\n continue\n\n # syllabus\n if 'Syllabus by the Court' in line_text or (state == states[\"syllabus\"] and re.match(r'\\d+\\.|[a-z\\[]', line_text)):\n if re.match(r'[a-z\\[]', line_text):\n # handle case where syllabus is split midsentence\n append_to_previous(line)\n else:\n state = states[\"syllabus\"]\n add_el(el, state)\n continue\n\n # attorneys\n # 'Garrett D. Blanchfield, Jr., Reinhardt Wendorf & Blanchfield, St. Paul, MN, for Appellants.'\n if any(line_text.startswith(s) for s in (\"An amicus\", \"For the\", \"On behalf of\")) or any(s in line_text for s in (' for ', 'amici curiae', 'pro se')):\n state = states[\"attorneys\"]\n add_el(el, state)\n continue\n\n # titles that mark the start of an opinion, like \"OPINION\"\n if line_alphanum_chars in opinion_start_lines or any(line_alphanum_chars.startswith(s) for s in opinion_start_line_prefixes):\n state = states[\"_opinionstart\"]\n if line_text != \"OPINION\":\n add_el(el, state)\n continue\n\n # Handle paragraph that is definitely followed by author, like \"The opinion of the court was delivered by\", A3d/148/148 A.3d 441_Replace.xml\n if line_text == \"The opinion of the court was delivered by\":\n state = states[\"_preauthor\"]\n add_el(el, 0)\n continue\n if state == states[\"_preauthor\"]:\n add_el(el, states[\"author\"])\n state = states[\"opinion\"]\n continue\n\n # author\n # note, in theory fastcase_data[\"Author\"] could be useful for identifying author paragraph, but it's often not set,\n # and when it is it can also appear in the judges line and other places ...\n judges_or_author = is_judges_or_author(line_text)\n if judges_or_author == \"judges\":\n state = states[\"judges\"]\n add_el(el, state)\n continue\n elif judges_or_author == \"author\":\n add_el(el, states[\"author\"])\n state = states[\"opinion\"] if header_complete else states[\"author\"]\n continue\n\n # weird special case where there's an order provided before the start of the opinion\n # E.g. NW2d/740/740NW2d659_1.xml, 'ORDER ENTERED JUNE 8, 2007' and subsequent unlabeled lines\n if line_text.startswith(\"ORDER ENTERED\") or state == states[\"disposition\"]:\n state = states[\"disposition\"]\n add_el(el, state)\n continue\n\n # regular paragraph\n add_el(el, 0)\n continue\n\n # fixups\n labels = [el.attrib.get('class') for el in header_els]\n # rewrite special case like NE2d/944/944ne2d1119.xml:\n # [['parties', '...'],\n # ['docketnumber', 'Feb. 15'],\n # ['docketnumber', '2011.'],\n # ['court', 'Court of Appeals of New York.']]\n # to\n # [['parties', '...'],\n # ['court', 'Court of Appeals of New York.'],\n # ['decisiondate', 'Feb. 15, 2011.']]\n if labels == [None, 'docketnumber', 'docketnumber', 'court']:\n docket_combined = header_els[1].text + \", \" + header_els[2].text\n if date_re.match(docket_combined):\n header_els[1].attrib['class'] = 'decisiondate'\n header_els[1].text = docket_combined\n header_els = [header_els[0], header_els[3], header_els[1]]\n\n # change all author labels but the last to judges; we likely misdetected one earlier\n for i, el in authors[:-1]:\n el.attrib['class'] = \"judges\"\n\n # if we didn't find an author and the last line is unlabeled, assume that's the author with a typo --\n # e.g. NW2d/753/753NW2d552_1.xml , missing comma\n if header_els and not authors and not opinion_starts and state >= states[\"judges\"] and header_els[-1].attrib.get('class') is None:\n header_els[-1].attrib['class'] = \"author\"\n authors = [(len(header_els)-1, header_els[-1])]\n\n # move author, and any paragraphs after it, to beginning of first opinion\n move_index = opinion_starts[0][0] + 1 if opinion_starts else authors[-1][0] if authors else None\n if move_index is not None:\n shift_to_opinion(len(header_els)-move_index)\n\n return header_els, opinions", "def test_global_relaxed_validation_policy_01(non_conformant_data: NonConformantData):\n login_dict = non_conformant_data.data_for_name(\"login-duplicate-section\")\n enable_relaxed_validation()\n assert get_relaxed_validation()\n assert OPLoginItem(login_dict)", "def setReferencePrimary(self, reference: ghidra.program.model.symbol.Reference) -> None:\n ...", "def primary_types(helper):\n\n # These constants are global in all SD municipalities (because they are created\n # by the SD->MO importer.\n PRIMARY = \"Ansat\"\n NO_SALARY = \"status0\"\n NON_PRIMARY = \"non-primary\"\n FIXED_PRIMARY = \"explicitly-primary\"\n\n logger.info(\"Read primary types\")\n primary = None\n no_salary = None\n non_primary = None\n fixed_primary = None\n\n primary_types = helper.read_classes_in_facet(\"primary_type\")\n for primary_type in primary_types[0]:\n if primary_type[\"user_key\"] == PRIMARY:\n primary = primary_type[\"uuid\"]\n if primary_type[\"user_key\"] == NON_PRIMARY:\n non_primary = primary_type[\"uuid\"]\n if primary_type[\"user_key\"] == NO_SALARY:\n no_salary = primary_type[\"uuid\"]\n if primary_type[\"user_key\"] == FIXED_PRIMARY:\n fixed_primary = primary_type[\"uuid\"]\n\n type_uuids = {\n \"primary\": primary,\n \"non_primary\": non_primary,\n \"no_salary\": no_salary,\n \"fixed_primary\": fixed_primary,\n }\n if None in type_uuids.values():\n raise Exception(\"Missing primary types: {}\".format(type_uuids))\n return type_uuids", "def primary_preprocessor(f):\n f = preprocessor(f)\n f._is_primary = True\n return f" ]
[ "0.5375646", "0.5154644", "0.51295924", "0.49382252", "0.4831968", "0.4788528", "0.4788528", "0.4756773", "0.4754792", "0.4708603", "0.4700092", "0.4677574", "0.46142638", "0.46059236", "0.45961127", "0.45947063", "0.45883384", "0.45769686", "0.45754498", "0.45723775", "0.45571318", "0.45219734", "0.45158678", "0.44806424", "0.44756037", "0.44733986", "0.44660056", "0.44604152", "0.4456504", "0.4454941", "0.44396225", "0.44190913", "0.4405426", "0.44019276", "0.43983993", "0.43977112", "0.43838066", "0.43768102", "0.43724793", "0.43656105", "0.43603146", "0.43560952", "0.43554622", "0.43546605", "0.43511513", "0.43498784", "0.43452463", "0.43420514", "0.43397033", "0.4333771", "0.43300155", "0.43275774", "0.43158808", "0.4302357", "0.4294137", "0.4293596", "0.42889202", "0.42857882", "0.4280621", "0.42799866", "0.42720902", "0.4271914", "0.42706797", "0.4264347", "0.42592928", "0.4249535", "0.424809", "0.4240653", "0.42364165", "0.42355648", "0.42336687", "0.4233365", "0.4232477", "0.4223169", "0.42194942", "0.4215777", "0.4212776", "0.42088512", "0.42074406", "0.4205142", "0.42018992", "0.4199373", "0.41953912", "0.41931123", "0.41888386", "0.4182439", "0.41779792", "0.41769674", "0.4173283", "0.41719395", "0.417157", "0.4171214", "0.41706267", "0.41705817", "0.4161259", "0.41600528", "0.41593337", "0.41570678", "0.41544312", "0.41516355" ]
0.55320704
0
To convert tf.Tensor > json serializable np.ndarray > json serializable
def tf_tensor_2_serializable(obj): import tensorflow as tf import numpy as np # Tensor -> ndarray or object if isinstance(obj, tf.Tensor): if tf.__version__.startswith("1."): with tf.compat.v1.Session(): obj = obj.numpy() else: obj = obj.numpy() # ndarray -> serializable python object TYPES = (int, float, str) if isinstance(obj, np.ndarray): for _type in TYPES: # dtype of string/bytes ndarrays returned by tensor.numpy() # are both np.dtype(object), which are not json serializable try: obj = obj.astype(_type) except (UnicodeDecodeError, ValueError, OverflowError): continue break else: try: obj = np.vectorize(bytes_2_tf_b64)(obj) except ValueError: pass obj = obj.tolist() elif isinstance(obj, bytes): # tensor.numpy() will return single value directly try: obj = obj.decode("utf8") except UnicodeDecodeError: obj = bytes_2_tf_b64(obj) return obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_numpy(tensor):\n raise NotImplementedError", "def tensor_to_json(self, tensor_name):\n name = tensor_name\n tup = self.tensor_info[name]\n pert_list = tup[1]\n repr = tup[2]\n\n repr_data = repr.to_json() if repr is not None else None\n pert_data = [o.to_json()\n for o in pert_list] if pert_list is not None else None\n\n tensor_dict = {\n \"name\": name,\n \"repr\": repr_data,\n \"perturb\": pert_data\n }\n\n return tensor_dict", "def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n todecode = dct['__ndarray__'].encode(\"ascii\")\n data = base64.b64decode(todecode)\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "def tensor2obj(tensor):\n return pickle.loads(tensor.cpu().numpy().tobytes())", "def _json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "def to_tensor(self): \n raise NotImplementedError", "def json_serialize_numpy_array(array):\n return json.dumps(json_ready_numpy_array(array))", "def json_serialize_numpy_array(array):\n return json.dumps(json_ready_numpy_array(array))", "def np2str(a: np.ndarray) -> str:\n return json.dumps(a.tolist())", "def _to_tensor(cls, tensor):\n if isinstance(tensor, Tensor):\n return tensor\n return Tensor(data=tensor)", "def TensorRepresentations(self) -> tensor_adapter.TensorRepresentations:", "def tensor2npy(x):\n out = x.detach().cpu().numpy().transpose(1, 2, 0)\n return out", "def test_numpy():\n\n with tf.Graph().as_default():\n A = tf.convert_to_tensor(np.arange(100))\n\n std_out = io.StringIO()\n with redirect_stdout(std_out):\n tf_dprint(A)\n\n expected_out = textwrap.dedent(\n \"\"\"\n Tensor(Const):0,\\tdtype=int64,\\tshape=[100],\\t\"Const:0\"\n | [ 0 1 2 ... 97 98 99]\n \"\"\"\n )\n\n assert std_out.getvalue() == expected_out.lstrip()\n\n N = 100\n np.random.seed(12345)\n X = np.vstack([np.random.randn(N), np.ones(N)]).T\n\n with tf.Graph().as_default():\n X_tf = tf.convert_to_tensor(X)\n\n std_out = io.StringIO()\n with redirect_stdout(std_out):\n tf_dprint(X_tf)\n\n expected_out = textwrap.dedent(\n \"\"\"\n Tensor(Const):0,\\tdtype=float64,\\tshape=[100, 2],\\t\"Const:0\"\n | [[-0.20470766 1. ]\n [ 0.47894334 1. ]\n [-0.51943872 1. ]\n ...\n [-0.74853155 1. ]\n [ 0.58496974 1. ]\n [ 0.15267657 1. ]]\n \"\"\"\n )\n\n assert std_out.getvalue() == expected_out.lstrip()", "def serialize(self, data):\n if isinstance(data, dict):\n return json.dumps(\n {\n key: value.tolist() if isinstance(value, np.ndarray) else value\n for key, value in data.items()\n }\n )\n\n if hasattr(data, \"read\"):\n return data.read()\n\n if isinstance(data, np.ndarray):\n return json.dumps(data.tolist())\n\n return json.dumps(data)", "def serialize(obj):\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)", "def serialize(obj):\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)", "def list_to_backend_type(data: List) -> TTensor:", "def to_numpy(tensor):\n return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()", "def tensor2np(x):\n return x.cpu().numpy()", "def _torch_to_numpy(tensor):\n return tensor.detach().cpu().numpy()", "def assure_numpy(a: Union[tf.Tensor, np.ndarray]) -> np.ndarray:\n if isinstance(a, np.ndarray):\n return a\n return a.numpy()", "def serialize(loss):\n return serialize_keras_object(loss)", "def acti_to_json(self, tensor_name):\n name = tensor_name\n tup = self.acti_info[name]\n pert_list = tup[0]\n repr = tup[1]\n\n repr_data = repr.to_json() if repr is not None else None\n pert_data = [o.to_json()\n for o in pert_list] if pert_list is not None else None\n\n acti_dict = {\n \"name\": name,\n \"repr\": repr_data,\n \"perturb\": pert_data\n }\n\n return acti_dict", "def numpy(x):\n if isinstance(x, torch.Tensor):\n x = x.detach().cpu().numpy()\n elif isinstance(x, tf.Tensor):\n x = x.numpy()\n return x.astype(np.float64)", "def serialize_ndarrays(d):\n def dict_handler(d):\n return d.items()\n\n handlers = {list: enumerate, tuple: enumerate,\n set: enumerate, frozenset: enumerate,\n dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n\n return serialize(d)", "def to_numpy(x: torch.Tensor) -> np.ndarray:\n if isinstance(x, dict):\n r = {}\n for k, v in x.items():\n if isinstance(v, torch.Tensor):\n if v.device.type == 'cuda':\n r.update({k: v.detach().cpu().numpy()})\n else:\n r.update({k: v.detach().numpy()})\n else:\n r.update({k: v})\n return r\n else:\n if x.device.type == 'cuda':\n return x.detach().cpu().numpy()\n else:\n return x.detach().numpy()", "def _img_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.ravel()))", "def _img_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.ravel()))", "def _from_numpy(array):\n return tf.constant(array)", "def do_decode(self, value, decode_fn):\n del decode_fn\n tensor_proto = value.numpy_value\n numpy = tensor_util.MakeNdarray(tensor_proto)\n return numpy", "def _feature_to_numpy(\n feature: tf_feature_pb2.Feature,\n tensor_info: feature_lib.TensorInfo,\n feature_name: str,\n) -> type_utils.NpArrayOrScalar:\n dtype = tensor_info.np_dtype\n shape = tensor_info.shape\n if feature.HasField(\"int64_list\"):\n value_array = feature.int64_list.value\n elif feature.HasField(\"float_list\"):\n value_array = feature.float_list.value\n elif feature.HasField(\"bytes_list\"):\n value_array = feature.bytes_list.value\n else:\n raise AttributeError(f\"cannot convert '{feature_name}' from proto to NumPy\")\n value_array = np.array(value_array, dtype=dtype)\n if not shape:\n return value_array.item()\n return value_array", "def to_tfrecord(data_blob):\n\n id = np.array(data_blob['id'], dtype=np.int32).tobytes()\n dim = np.array(data_blob['images'].shape, dtype=np.int32).tobytes()\n\n images = np.array(data_blob['images'], dtype=np.uint8).tobytes()\n poses = np.array(data_blob['poses'], dtype=np.float32).tobytes()\n depth = np.array(data_blob['depth'], dtype=np.float32).tobytes()\n filled = np.array(data_blob['filled'], dtype=np.float32).tobytes()\n intrinsics = np.array(data_blob['intrinsics'], dtype=np.float32).tobytes()\n\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[id])),\n 'dim': tf.train.Feature(bytes_list=tf.train.BytesList(value=[dim])),\n 'images': tf.train.Feature(bytes_list=tf.train.BytesList(value=[images])),\n 'poses': tf.train.Feature(bytes_list=tf.train.BytesList(value=[poses])),\n 'depth': tf.train.Feature(bytes_list=tf.train.BytesList(value=[depth])),\n 'filled': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filled])),\n 'intrinsics': tf.train.Feature(bytes_list=tf.train.BytesList(value=[intrinsics])),\n }))\n\n return example", "def tensor(self, X):\n return tf.convert_to_tensor(X, dtype=self.dtype)", "def _to_tensor(x, dtype):\n x = tf.convert_to_tensor(x)\n if x.dtype != dtype:\n x = tf.cast(x, dtype)\n return x", "def tensoras(tensor):\r\n\r\n if pytorch.is_dense(tensor):\r\n m = tensor.detach().cpu().numpy()\r\n if m.ndim == 0:\r\n m = m.item()\r\n elif pytorch.is_sparse(tensor):\r\n m = pytorch.sparse_tensor_to_sparse_adj(tensor)\r\n elif gg.TF_ENABLED and tensorflow.is_dense(tensor):\r\n m = tensor.numpy()\r\n elif gg.TF_ENABLED and tensorflow.is_sparse(tensor):\r\n m = tensorflow.sparse_tensor_to_sparse_adj(tensor)\r\n elif isinstance(tensor, np.ndarray) or sp.isspmatrix(tensor):\r\n m = tensor.copy()\r\n else:\r\n m = np.asarray(tensor)\r\n return m", "def serialize_example(x, y):\n input_features = tf.train.FloatList(value=x)\n label = tf.train.FloatList(value=y)\n features = tf.train.Features(\n feature={\n \"input_features\": tf.train.Feature(float_list=input_features),\n \"label\": tf.train.Feature(float_list=label)\n }\n )\n example = tf.train.Example(features=features)\n return example.SerializeToString()", "def do_encode(self, numpy_value, encode_fn):\n del encode_fn\n encoded_numpy = struct_pb2.StructuredValue()\n encoded_numpy.numpy_value.CopyFrom(\n tensor_util.make_tensor_proto(numpy_value)\n )\n return encoded_numpy", "def decode_tensorflow(self, encoded_chunks: tf.Tensor) -> tf.Tensor:", "def numpy(self):\n value = self._value()\n if not tf.executing_eagerly():\n raise NotImplementedError(\n 'DeferredTensor.numpy() is only supported in eager execution mode.')\n return np.array(value)", "def output_fn(predictions, content_type):\n assert content_type == 'application/json'\n res = predictions.cpu().numpy().tolist()\n return json.dumps(res)", "def json_serving_input_fn():\n inputs = {}\n for feat in INPUT_COLUMNS:\n inputs[feat.name] = tf.placeholder(shape=[None], dtype=feat.dtype)\n\n features = {\n key: tf.expand_dims(tensor, -1)\n for key, tensor in inputs.iteritems()\n }\n return tf.contrib.learn.InputFnOps(features, None, inputs)", "def to_numpy(x):\n if isinstance(x, torch.Tensor):\n x = x.cpu().detach().numpy()\n return x", "def tensor_to_np(img_var):\n img_np = img_var[0].cpu().numpy()\n return img_np", "def _stringify_tensor(obj):\n if hasattr(obj, 'name'): return str(obj.name)\n else: return str(obj)", "def _stringify_tensor(obj):\n if hasattr(obj, 'name'): return str(obj.name)\n else: return str(obj)", "def _convert_data(self, data):\n if isinstance(data, Tensor):\n data = data.asnumpy()\n elif isinstance(data, list):\n data = np.array(data)\n elif isinstance(data, np.ndarray):\n pass\n else:\n raise TypeError('Input data type must be tensor, list or numpy.ndarray')\n return data", "def do_decode(self, value, decode_fn):\n del decode_fn\n tensor_proto = value.tensor_value\n tensor = constant(tensor_util.MakeNdarray(tensor_proto))\n return tensor", "def tensor2np(x):\n if x is None:\n return x\n return x.cpu().detach().numpy()", "def location_to_json(pose: tf.Transform) -> typing.List[float]:\n return [\n pose.location[0],\n pose.location[1],\n pose.location[2]\n ]", "def to_tensor(x, **kwargs):\n return x.transpose(2, 0, 1).astype('float32')", "def serialize_example_pyfunction(feature0, feature1, feature2, feature3):\n\n # Create a dictionary mapping the feature name to the tf.Example-compatible\n # data type.\n\n feature = {\n 'feature0': _int64_feature(feature0.numpy()),\n 'feature1': _int64_feature(feature1.numpy()),\n 'feature2': _bytes_feature(feature2.numpy()),\n 'feature3': _float_feature(feature3.numpy()),\n }\n\n # Create a Features message using tf.train.Example.\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()", "def tf_encode(pt, en):\n result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])\n result_pt.set_shape([None])\n result_en.set_shape([None])\n return result_pt, result_en", "def to_tensor_proto(arr):\n if isinstance(arr, float):\n arr = np.asarray(arr).astype('float32')\n elif isinstance(arr, int):\n arr = np.asarray(arr).astype('int32')\n assert isinstance(arr, np.ndarray), type(arr)\n try:\n dtype = _DTYPE_DICT[arr.dtype]\n except KeyError:\n raise KeyError(\"Dtype {} is unsupported by current ZMQ Op!\".format(arr.dtype))\n\n ret = TensorProto()\n shape = ret.tensor_shape\n for s in arr.shape:\n d = shape.dim.add()\n d.size = s\n\n ret.dtype = dtype\n\n buf = arr.tobytes()\n ret.tensor_content = buf\n return ret", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.tobytes()]))", "def pyplot_to_tensor(pyplot_figure):\n x = pyplot_to_numpy(pyplot_figure=pyplot_figure)\n return x", "def to_data(tensor_or_var):\n if type(tensor_or_var) is Variable:\n return tensor_or_var.data\n else:\n return tensor_or_var", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n # BytesList won't unpack a string from an EagerTensor.\n value = value.numpy()\n if isinstance(value, list):\n value = [six.ensure_binary(token) for token in value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n # value = str([six.ensure_text(token, \"utf-8\") for \\\n # token in value]).encode()\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))): # if value ist tensor\n value = value.numpy() # get value of tensor\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def simple_unet_data():\n return tf.constant(value=1.0, shape=(1, 256, 256, 1))", "def _int64_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))", "def _convert_ndarray_to_tensor(self, state_dict: Dict[str, Any]) -> None:\n # model could be an OrderedDict with _metadata attribute\n # (as returned by Pytorch's state_dict()). We should preserve these\n # properties.\n for k in list(state_dict.keys()):\n v = state_dict[k]\n if not isinstance(v, np.ndarray) and not isinstance(v,\n torch.Tensor):\n raise ValueError(\n \"Unsupported type found in checkpoint! {}: {}\".format(k,\n type(\n v))\n )\n if not isinstance(v, torch.Tensor):\n state_dict[k] = torch.from_numpy(v)", "def deserialize(self, indv):\n for key in indv:\n if key in self.evolvables and not key in self.scalars:\n if not isinstance(indv[key], np.ndarray):\n indv[key] = np.array(indv[key])\n\n return indv", "def bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.numpy()]))", "def to_numpy(tensor: torch.Tensor) -> np.ndarray:\n\n return (\n tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()\n )", "def _serialize(self, data):\n data = [np.array(j) for j in data]\n self._data_shape_list = [j.shape for j in data]\n serialized_data = [j.ravel() for j in data]\n serialized_data = np.hstack(serialized_data)\n return serialized_data", "def _build_tensor(self, ndarray):\n\n ndarray = np.asarray(ndarray).astype(self.dtype)\n return tf1.placeholder_with_default(\n ndarray, shape=ndarray.shape if self.use_static_shape else None)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2B2If().pack(_x.role, _x.id, _x.local_time, _x.system_time, _x.voltage))\n buff.write(self.pos_3d.tostring())\n buff.write(self.eop_3d.tostring())\n buff.write(self.vel_3d.tostring())\n buff.write(self.angle_3d.tostring())\n buff.write(self.quaternion.tostring())\n buff.write(self.imu_gyro_3d.tostring())\n buff.write(self.imu_acc_3d.tostring())\n length = len(self.nodes)\n buff.write(_struct_I.pack(length))\n for val1 in self.nodes:\n _x = val1\n buff.write(_get_struct_2B3f().pack(_x.role, _x.id, _x.dis, _x.fp_rssi, _x.rx_rssi))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _create_serialized_example(speakers, utterances, emotions):\n example = tf.train.SequenceExample(feature_lists=tf.train.FeatureLists(feature_list={\n 'utterances': tf.train.FeatureList(\n feature=[_float_feature(u) for u in utterances]),\n 'speakers': tf.train.FeatureList(\n feature=[_int64_feature(speakers)]),\n 'emotions': tf.train.FeatureList(\n feature=[_int64_feature(emotions)])}))\n return example.SerializeToString()", "def t2np(v):\n\n if type(v) == torch.Tensor:\n return v.detach().cpu().numpy()\n else:\n try:\n return v.cpu().numpy()\n except:\n return v", "def _bytes_feature(value): \n if isinstance(value, type(tf.constant(0))): \n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def _deserialize_single_field(\n example_data, tensor_info: feature_lib.TensorInfo\n):\n # Ragged tensor case:\n if tensor_info.sequence_rank > 1:\n example_data = _dict_to_ragged(example_data, tensor_info)\n\n # Restore shape if possible. TF Example flattened it.\n elif tensor_info.shape.count(None) < 2:\n shape = [-1 if i is None else i for i in tensor_info.shape]\n example_data = tf.reshape(example_data, shape)\n\n # Restore dtype\n if example_data.dtype != tensor_info.tf_dtype:\n example_data = tf.dtypes.cast(example_data, tensor_info.tf_dtype)\n return example_data", "def numpy(self) -> np.ndarray:\n return self.tensor.numpy()", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n \n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def batch_format_fn(element):\n return collections.OrderedDict(\n x=tf.reshape(element['pixels'], [-1, 784]),\n y=tf.reshape(element['label'], [-1, 1]))", "def to_json(self) -> str:\n return json.dumps([x.to_dict() for x in self.inputs])", "def _float64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def test_to_plain_python_obj_numpy(test_input, expected):\n output = r.to_plain_python_obj(test_input)\n assert output == expected\n # We should not get a json conversion error\n json.dumps(output)", "def trajectory_to_json(trajectory: Trajectory) -> str:\n # numpy arrays need to be converted to normal tuples\n return json.dumps(trajectory, cls=NumpyEncoder)", "def serialize_example(feature0, feature1, feature2, feature3):\n \n # Create a dictionary that maps feature names to tf.Example compatible data types.\n \n feature = {\n 'feature0': _int64_feature(feature0),\n 'feature1': _int64_feature(feature1),\n 'feature2': _bytes_feature(feature2),\n 'feature3': _float_feature(feature3),\n }\n \n # Create a feature message using tf.train.Example.\n \n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()", "def tf_flatten(x):\n return tf.contrib.layers.flatten(x)", "def bytes_feature(values):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))", "def tensors2ndarray(tensors) -> list:\n\n new_tensors = []\n if isinstance(tensors, (list, tuple)):\n for i in tensors:\n new_tensors.extend(tensors2ndarray(i))\n elif isinstance(tensors, dict):\n for k, v in tensors.items():\n new_tensors.extend(tensors2ndarray(v))\n elif hasattr(tensors, 'detach') and hasattr(tensors, 'numpy'):\n new_tensors.append(tensors.detach().numpy())\n elif not isinstance(tensors, (bool, str, int, float, types.FunctionType)):\n for k in tensors.__dir__():\n if not k.startswith('__'):\n v = getattr(tensors, v)\n new_tensors.extend(tensors2ndarray(v))\n return new_tensors", "def to_numpy(x):\n if isinstance(x, np.ndarray): \n return x\n if isinstance(x, Variable):\n x = x.data\n return x.cpu().numpy()", "def bytes_feature(value):\n if isinstance(value, list):\n for i in range(len(value)):\n if not isinstance(value[i], bytes):\n value[i] = value[i].encode()\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n else:\n if not isinstance(value, bytes):\n value = value.encode()\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def _bytes_feature(value):\n # return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_h16fh8f().pack(_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _convert_to_numpy_safe(t: Optional[torch.Tensor]) -> torch.Tensor:\n\n if t is not None and t.device.type == \"cpu\":\n return t.numpy()\n return t", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def convert_raw_arrays(x, f):\n try:\n # Tensor, TensorNetwork...\n x = x.copy()\n x.apply_to_arrays(f)\n return x\n except AttributeError:\n pass\n\n try:\n # raw structured arrays that provide the {get|set}_params interface\n x = x.copy()\n x.set_params(tree_map(f, x.get_params()))\n return x\n except AttributeError:\n pass\n\n # other raw arrays\n return f(x)", "def convert_to_ndarray(entity):\n if isinstance(entity, np.ndarray) and entity.dtype.kind in set('biufc'):\n # entity is numerical ndarray already\n return entity\n if isinstance(entity, np.ndarray) and isinstance(entity.flat[0], qt.Qobj):\n # entity is output from qt.eigenstates\n return convert_esys_to_ndarray(entity)\n if isinstance(entity, list) and isinstance(entity[0], np.ndarray) and isinstance(entity[0].flat[0], qt.Qobj):\n # entity is a list of qt.eigenstates\n return np.asarray([convert_esys_to_ndarray(entry) for entry in entity])\n # possibly we have a list of numerical values or a list of ndarrays\n converted_entity = np.asarray(entity)\n if converted_entity.dtype.kind not in set('biufc'):\n raise TypeError('Unable to convert data to numerical numpy array: ', entity)\n return converted_entity", "def bytes_feature(values):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))", "def serialize_example(feature0, feature1, feature2, feature3):\n\n # Create a dictionary mapping the feature name to the tf.Example-compatible\n # data type.\n\n feature = {\n 'feature0': _int64_feature(feature0),\n 'feature1': _int64_feature(feature1),\n 'feature2': _bytes_feature(feature2),\n 'feature3': _float_feature(feature3),\n }\n\n # Create a Features message using tf.train.Example.\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "def _bytes_feature(value):\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))" ]
[ "0.68428576", "0.6705407", "0.6659738", "0.6618609", "0.6618609", "0.6618609", "0.6599845", "0.6567279", "0.64083153", "0.63227916", "0.63227916", "0.62989205", "0.628012", "0.6269711", "0.6266001", "0.6180537", "0.6166389", "0.61231035", "0.61231035", "0.61120456", "0.61048424", "0.61045915", "0.60885924", "0.6075527", "0.60355896", "0.5967765", "0.5952377", "0.5931499", "0.5910456", "0.59091413", "0.59091413", "0.58957845", "0.5889988", "0.5889747", "0.5889143", "0.5887479", "0.5886514", "0.5882544", "0.5876884", "0.5834575", "0.5781007", "0.5775696", "0.5752294", "0.57509893", "0.5726283", "0.57250804", "0.5712049", "0.5712049", "0.5710937", "0.57053685", "0.57007325", "0.56987244", "0.5695131", "0.5667202", "0.5665161", "0.5663246", "0.5661577", "0.56582505", "0.5652024", "0.56456125", "0.56401885", "0.5624943", "0.56190354", "0.56143206", "0.56030303", "0.56015354", "0.56002045", "0.5596427", "0.55836093", "0.5580518", "0.55709505", "0.55683094", "0.5568081", "0.5554401", "0.5549372", "0.55465394", "0.5545788", "0.55385745", "0.55215675", "0.55212635", "0.55179024", "0.5516635", "0.551181", "0.5506963", "0.55043024", "0.5503163", "0.54999506", "0.5496619", "0.5491556", "0.54883534", "0.548559", "0.5482574", "0.5482574", "0.54812324", "0.5478439", "0.54776317", "0.5476817", "0.54750156", "0.54750156", "0.54750156" ]
0.78063214
0
>>> lst = [ [1], [1, 2], [1, 2, 3], None, ] >>> concat_list(lst) [1, 1, 2, 1, 2, 3], [slice(0, 1), slice(1, 3), slice(3, 6), None]
def concat_list(lst, batch_flags=None): slices = [slice(0)] * len(lst) datas = [] row_flag = 0 for i, r in enumerate(lst): if r is None: slices[i] = None continue j = -1 if batch_flags is None or batch_flags[i]: for j, d in enumerate(r): datas.append(d) slices[i] = slice(row_flag, row_flag + j + 1) else: datas.append(r) slices[i] = row_flag row_flag += j + 1 return datas, slices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concat_list(in_list):\n return list(itertools.chain(*in_list))", "def concat_succ(L):\n if len(L) < 2:\n return L\n res = []\n last = L.pop()\n othe = L.pop()\n for i in last:\n for j in othe:\n if type(i) is list:\n if type(j) is list:\n res.append(i+j)\n else:\n res.append(i+[j])\n elif type(j) is list:\n res.append([i] + j)\n else:\n res.append([i] + [j])\n L = [res] + L\n return concat_succ(L)", "def concat(list_of_lists):\n return reduce(lambda a,b : a+b,list_of_lists,[])", "def flatten_lists(lst):\n return list(chain(*lst))", "def listExtend(lst, items):\n if lst is None:\n return list(items)\n else:\n lst.extend(items)\n return lst", "def concat(seqs):\n return itertools.chain.from_iterable(seqs)", "def flatten(lst):\n if atomp(lst):\n return lst\n return _flatten(lst)", "def flatten(lst):\n if atomp(lst): # scalar\n return lst\n return _flatten(lst)", "def concat(*seqs):\n return itertools.chain.from_iterable(seqs)", "def concat_list(str_lst):\n concatenation = ''\n if len(str_lst) != 0:\n for string in str_lst:\n concatenation = concatenation + string\n return concatenation", "def concat_lists(list1, list2):\n\n # return list1 + list2\n for item in list2:\n list1.append(item)\n\n return list1", "def flatten_list(lst):\r\n if not lst:\r\n return []\r\n if type(lst[0]) == list:\r\n return flatten_list(lst[0]) + flatten_list(lst[1:])\r\n return [lst[0]] + flatten_list(lst[1:])", "def flatten(lst):\n out = []\n for v in lst:\n if v is None: continue\n if isinstance(v, list):\n out.extend(flatten(v))\n else:\n out.append(v)\n return out", "def flatten_list(_list):\n if not _list:\n return []\n return reduce(operator.add, _list)", "def concatenateList(list1, list2):\n outputList = []\n\n ## list1\n # if it's an empty list\n if len(list1) == 0:\n outputList.append(list1)\n # if it's already a list of list (from previous __add__)\n elif isinstance(list1[0], list):\n for i in range(len(list1)):\n outputList.append(list1[i])\n # first use of __add__, not a list of list\n else:\n outputList.append(list1)\n\n ## list2\n # if it's an empty list\n if len(list2) == 0:\n outputList.append(list2)\n # if it's already a list of list (from previous __add__)\n elif isinstance(list2[0], list):\n for i in range(len(list2)):\n outputList.append(list2[i])\n # first use of __add__, not a list of list\n else:\n outputList.append(list2)\n\n return outputList", "def flatten_list(lol):\n return list(itertools.chain.from_iterable(lol))", "def flatten_list(lst):\n assert isinstance(lst, list), \"you didn't pass a list!\"\n\n if isinstance(lst[0], list):\n if len(lst[0])>1:\n return ['-'.join(i) for i in lst] # then its a kpoints list\n return flatten_list([i[0] for i in lst])\n else:\n return [i for i in lst]", "def flat_list(list_: list) -> list:\n return [item for sublist in list_ for item in sublist]", "def flattenList(l=None):\r\n flat_list = [item for sublist in l for item in sublist]\r\n return flat_list", "def expand_list(\r\n l: List, length: int, with_value: Any = None, with_none: bool = False\r\n ) -> List:\r\n\r\n if with_none:\r\n l.extend([None] * (length - len(l)))\r\n elif with_value is not None:\r\n l.extend([with_value] * (length - len(l)))\r\n else:\r\n l.extend([l[-1]] * (length - len(l)))\r\n return l", "def concat(list_of_arrays):\n shape = np.shape(list_of_arrays)\n newShape = [ shape[0]*shape[1] ]\n if len(shape)>2:\n for i in range(2,len(shape)):\n newShape.append(shape[i])\n \n array_concat = np.zeros(newShape)\n s=0\n e=shape[1]\n \n for i in range(0,shape[0]):\n array_concat[s:e] = list_of_arrays[i]\n s=e\n e=e+shape[1] \n return array_concat", "def sublistsC (seq):\n if seq:\n sublists = [([seq[0]] + a, b) for a, b in sublistsC(seq[1:])]\n return sublists + [(b, a) for a, b in sublists]\n else:\n return [([], [])]", "def genSubset(L):\n if len(L) == 0:\n return [[]] # list of empty list\n smaller = genSubset(L[:-1]) # the list without last element\n extra = L[-1:] # a list of just the last element\n new = []\n for small in smaller:\n new.append(small + extra)\n return smaller + new", "def chop(some_list):\n # This function will take the list and remove the first and last element in list but return None.\n some_list.pop(0)\n some_list.pop(-1)\n return None", "def concat(iterables):\n return itertools.chain.from_iterable(iterables)", "def concatenate(tensor_list, axis=0):\n concat_size = sum(tt.shape[axis] for tt in tensor_list)\n\n output_shape = ()\n for k in range(axis):\n output_shape += (tensor_list[0].shape[k],)\n output_shape += (concat_size,)\n for k in range(axis + 1, tensor_list[0].ndim):\n output_shape += (tensor_list[0].shape[k],)\n\n out = tensor.zeros(output_shape)\n offset = 0\n for tt in tensor_list:\n indices = ()\n for k in range(axis):\n indices += (slice(None),)\n indices += (slice(offset, offset + tt.shape[axis]),)\n for k in range(axis + 1, tensor_list[0].ndim):\n indices += (slice(None),)\n\n out = tensor.set_subtensor(out[indices], tt)\n offset += tt.shape[axis]\n\n return out", "def concat(xs, axis=1):\n return Concat(axis=axis)(*xs)", "def merge_many(*lsts):\n if not lsts:\n return []\n elif len(lsts) == 1:\n return lsts[0][:]\n elif len(lsts) == 2:\n return merge(*lsts)\n else:\n left = lsts[len(lsts) // 2:]\n right = lsts[:len(lsts) // 2]\n\n return merge(merge_many(*left), merge_many(*right))", "def cons(l, x):\n return l + [x]", "def list_flatten(input_list):\n if len(input_list) > 0 and isinstance(input_list[0], (list, np.ndarray)):\n return functools.reduce(operator.iconcat, input_list, [])\n\n return input_list", "def concatenate_lists(*layers, **kwargs):\n ...", "def concatenate(tensor_list, axis=0):\n concat_size = sum(tt.shape[axis] for tt in tensor_list)\n\n output_shape = ()\n for k in range(axis):\n output_shape += (tensor_list[0].shape[k], )\n output_shape += (concat_size, )\n for k in range(axis + 1, tensor_list[0].ndim):\n output_shape += (tensor_list[0].shape[k], )\n\n out = tensor.zeros(output_shape)\n offset = 0\n for tt in tensor_list:\n indices = ()\n for k in range(axis):\n indices += (slice(None), )\n indices += (slice(offset, offset + tt.shape[axis]), )\n for k in range(axis + 1, tensor_list[0].ndim):\n indices += (slice(None), )\n\n out = tensor.set_subtensor(out[indices], tt)\n offset += tt.shape[axis]\n\n return out", "def flatten_list(l, log):\n warning_msg = 'Warning: returning None.'\n if l is None or l[0] is None:\n if log is not None:\n log.info(warning_msg)\n else:\n print warning_msg\n return [None]\n else:\n return [val for sublist in l for val in sublist]", "def cat_lists(*list_args):\n result = []\n for List in list_args:\n result.extend(List)\n\n return result", "def concatenate(tensor_list, axis=0):\n concat_size = sum(tt.shape[axis] for tt in tensor_list)\n\n output_shape = ()\n for k in range(axis):\n output_shape += (tensor_list[0].shape[k],)\n output_shape += (concat_size,)\n for k in range(axis + 1, tensor_list[0].ndim):\n output_shape += (tensor_list[0].shape[k],)\n\n out = T.zeros(output_shape)\n offset = 0\n for tt in tensor_list:\n indices = ()\n for k in range(axis):\n indices += (slice(None),)\n indices += (slice(offset, offset + tt.shape[axis]),)\n for k in range(axis + 1, tensor_list[0].ndim):\n indices += (slice(None),)\n\n out = T.set_subtensor(out[indices], tt)\n offset += tt.shape[axis]\n\n return out", "def _deduplicate(lst):\n out = []\n for i in lst:\n if i not in out:\n out.append(i)\n return out", "def flattened(list_of_lists):\n res = functools.reduce(operator.iconcat, list_of_lists, [])\n return res", "def merge_segments(lst):\n ii = 0\n while True:\n jj = ii + 1\n if len(lst) <= jj:\n return lst\n seg1 = lst[ii]\n seg2 = lst[jj]\n if seg1.merge(seg2):\n if seg2.empty():\n del lst[jj]\n else:\n ii += 1\n else:\n ii += 1\n return lst", "def concat(a: list[int], b: list[int]) -> list[int]:\n result: list[int] = list()\n\n for x in a:\n result.append(x)\n for y in b: \n result.append(y)\n return result", "def flatten(src):\n return [item for sublist in src for item in sublist]", "def flattenList(input_list):\r\n return [item for sublist in input_list for item in sublist]", "def head_of_all(x, l):\n return [[x] + p for p in l]", "def ijoin_lists(l):\n if l:\n try:\n if not all(ymap(isinstance, l, list)):\n from tek.errors import MooException\n raise MooException('Some elements aren\\'t lists!')\n for i in cumsum([0] + list(map(len, l[:-1]))):\n l[i:i+1] = l[i]\n except Exception as e:\n logger.debug('ijoin_lists failed with: ' + str(e))\n return l", "def flatten(llst):\n res = []\n for lst in llst:\n res += lst\n return res", "def concat_map(f, xs):\n return concat(imap(f, xs))", "def concat(x: list[int], y: list[int]) -> list[int]:\n con = list()\n i: int = 0\n count: int = 0\n while len(y) > i: \n con.append(y[i])\n i += 1\n while len(x) > count: \n con.append(x[count])\n count += 1 \n return con", "def join(self: \"_List[_List[T]]\") -> \"_List[T]\":\n return ListMonad(*[element for lists in self for element in lists])", "def concat(self, tensors):\n # check the arguments and try the fast path: only one tensor\n tensors = list(tensors)\n if not tensors:\n return []\n length = len(tensors[0])\n if length == 0:\n raise ValueError('`tensors` must be list of non-empty Tensor '\n 'lists.')\n for t in tensors[1:]:\n if len(t) != length:\n raise ValueError('`tensors` must be list of Tensor lists of '\n 'the same length.')\n if length == 1:\n return [t[0] for t in tensors]\n\n # do the slow path: concat all tensors\n with tf.device(self.main_device), tf.name_scope('average_tensors'):\n return [tf.concat(t, axis=0) for t in tensors]", "def concatv(*seqs):\n return concat(seqs)", "def flatten_list(l):\n return [item for sublist in l for item in sublist]", "def concat(xss):\n return list(anyconfig.compat.from_iterable(xs for xs in xss))", "def flatten(lst):\n \"*** YOUR CODE HERE ***\"", "def flatten(x: List) -> List:\n return functools.reduce(lambda cum, this: cum + this, x, [])", "def merge(list1, list2): \r\n if len(list1) == 0 or len(list2) == 0:\r\n new_list = [item for item in list1]\r\n new_list.extend(list2)\r\n return new_list\r\n else:\r\n if list1[0] <= list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(merge(list1[1:], list2))\r\n return new_list\r\n else:\r\n new_list = list([list2[0]])\r\n new_list.extend(merge(list1, list2[1:]))\r\n return new_list", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def flatten(listOfLists):\n return list(chain.from_iterable(listOfLists))", "def prefix_all(value, LL):\n return [[value] + L for L in LL]", "def flatten_list(alist):\n return list(flatten_list_gen(alist))", "def safe_concat(arrs, default=None, **kwargs):\n arrs = [arr for arr in arrs]\n if not arrs:\n return default\n if isinstance(arrs[0], pd.Series):\n arrs = [arr.values for arr in arrs]\n if isinstance(arrs[0], pd.DataFrame):\n if all([arr.empty for arr in arrs]):\n return default\n return pd.concat([arr for arr in arrs if not arr.empty], **kwargs)\n if isinstance(arrs[0], np.ndarray):\n if all([arr.shape[0] == 0 for arr in arrs]):\n return default\n return np.concatenate([arr for arr in arrs if not arr.shape[0] == 0], **kwargs)", "def mutate_list_2(lst):\r\n elem = lst[0]\r\n lst.remove(elem)\r\n lst.append(elem)\r\n return lst", "def compress_list(src_list):\n return [item for item in src_list if item]", "def remove_duplicates(list1):\r\n if len(list1) == 1 or len(list1) == 0:\r\n return [item for item in list1]\r\n else:\r\n if list1[-1] == list1[-2]:\r\n return remove_duplicates(list1[:-1])\r\n else:\r\n new_list = remove_duplicates(list1[:-1])\r\n new_list.append(list1[-1])\r\n return new_list", "def list_copy(l: List[Any]) -> List[Any]:\n return [item for item in l]", "def cartn_append(*list_of_lists: list) -> list:\n def aux(list_of_lists: list) -> list:\n if len(list_of_lists) == 0:\n return []\n elif len(list_of_lists) == 1:\n return list_of_lists[0]\n else:\n return aux([cart2_append(list_of_lists[0], list_of_lists[1])] + tail(list_of_lists[1:]))\n \n return aux(list(list_of_lists))", "def concat_all(self):\n return self.merge(1)", "def flatten(list_of_lists: List[List]) -> List:\n return list(itertools.chain(*list_of_lists))", "def flatten(l: iter):\n return functools.reduce(lambda x, y: x + y, l)", "def flatten( liste ) :\n return list(set([ e for sublist in liste for e in sublist ]))\n # TODO :\n # more efficient to use\n # import itertools\n # list(itertools.chain(*list2d))", "def merge_lists(inputs: List[List[int]]) -> List[int]:\n output = []\n while len(inputs):\n next_item = inputs[0][0]\n min_index = 0\n for i in range(1, len(inputs)):\n if inputs[i][0] < next_item:\n next_item = inputs[i][0]\n min_index = i\n output.append(next_item)\n inputs[min_index].pop(0)\n if not len(inputs[min_index]):\n inputs.pop(min_index)\n return output", "def lflatten(*lst):\n return flatten(list(lst))", "def merge_lists(l1, l2):\n return [ *l1, *l2 ]", "def _concatenated_list(s):\n m = LIST_CONCAT_P.match(s.strip())\n if not m:\n raise ValueError(s)\n maybe_list = _decode_flag_val(m.group(1))\n if isinstance(maybe_list, list):\n return maybe_list * int(m.group(2))\n return s", "def listAppend(lst, item):\n if lst is None:\n return [item]\n else:\n lst.append(item)\n return lst", "def listify(x, dedup=True):\n if not isinstance(x, list):\n x = [x]\n res = flatten(x)\n res = [x for x in res if x is not None]\n if dedup:\n return dedup_list(res)\n return res", "def __join_expanded(expanded: list[typing.Union[str, list[str]]]) -> list[str]:\n list_values = [(i, val) for i, val in enumerate(expanded) if isinstance(val, list)]\n\n if len(list_values) == 0:\n return [\"\".join(expanded)]\n\n initial_len = len(list_values[0][1]) if list_values else None\n\n if not all(len(i) == initial_len for _, i in list_values[1::]):\n raise ValueError(\"not all non-expanded list are of the same size\")\n\n pairs = zip(*[[(i, j) for j in val] for i, val in list_values])\n\n result = list()\n for pair in pairs:\n cc = expanded.copy()\n\n for i, v in pair:\n del(cc[i])\n cc.insert(i, v)\n\n result.append(\"\".join(cc))\n\n return result", "def flatten(l: List[List[Any]]) -> List[Any]:\n\n return [x for y in l for x in y]", "def _flatten_list(x):\n return list(chain.from_iterable(x))", "def partition(lst, pred):\n start = []\n append = start.append\n\n while lst:\n x, lst_ = lst.uncons\n if pred(x):\n break\n lst = lst_\n append(x)\n\n return List(start), lst", "def deflatten(flat_li, *original_li):\n if len(original_li) == 1:\n original_li = original_li[0]\n deflatten_li = []\n i = 0\n for el in original_li:\n if isinstance(el, Sequence):\n deflatten_li.append(flat_li[i:i+len(el)])\n i += len(el)\n else:\n deflatten_li.append(flat_li[i])\n i += 1\n return deflatten_li", "def first_last_item(input_list: list) -> list:\n\n if len(input_list) > 1:\n return [input_list[0], input_list[-1]]\n else:\n return []", "def flatten(items):\n if items == []:\n return items\n if isinstance(items, list):\n flattend = []\n for item in items:\n flattend.extend(flatten(item))\n return flattend\n return [items]", "def concatenate(tensor_list, axis=0):\r\n # Check someone did not make the common mistake to do something like:\r\n # c = concatenate(x, y)\r\n # instead of\r\n # c = concatenate((x, y))\r\n if not isinstance(tensor_list, (tuple, list)):\r\n raise TypeError(\"The 'tensors' argument must be either a tuple \"\r\n \"or a list, make sure you did not forget () or [] around \"\r\n \"arguments of concatenate.\", tensor_list)\r\n return join(axis, *tensor_list)", "def _flatten(x: Sequence) ->list:\n return [item for sublist in x for item in sublist]", "def splice(l, a, b, c):\n\n return l[:a] + [c] + l[a + b:], l[a:a + b]", "def mergesort(lst):\n result = []\n i = 0\n while i < len(lst):\n left = lst[i] if isinstance(lst[i], list) else [lst[i]]\n i += 1\n\n right = lst[i] if isinstance(lst[i], list) else [lst[i]]\n i += 1\n\n result.append(merge(left, right))\n return result", "def expand_list(a):\n rv, _ = _expand_sublist(a, 0)\n return rv", "def flatten(lis):\n new_lis = []\n for item in lis:\n if type(item) == type([]):\n new_lis.extend(flatten(item))\n else:\n new_lis.append(item)\n return new_lis", "def flatten(ls):\n return sum(ls, [])", "def merge(list_of_lists):\n merged = list()\n for l in list_of_lists:\n merged.extend(l)\n return merged", "def flatten(ls):\r\n return [item for sublist in ls for item in sublist]", "def humanise_list(lst):\n assert len(lst) > 0\n if len(lst) == 1:\n return lst[0]\n head = \", \".join(lst[:-1])\n tail = lst[-1]\n return f\"{head} and {tail}\"", "def flatten(list):\n\n if isinstance(list, collections.Iterable) and not isinstance(list, (str, bytes)):\n return [a for i in list for a in flatten(i)]\n else:\n return [list]", "def flatten_list(in_list):\n return [item for sublist in in_list for item in sublist]", "def flatten(lst):\n buildlist = []\n for i in lst:\n if type(i) is list:\n buildlist += flatten(i)\n else:\n buildlist.append(i)\n return buildlist", "def union(list_a: list, list_b: list) -> list:\n if list_a is None:\n list_a = [None]\n if list_b is None:\n list_b = [None]\n return list(set(list_a) | set(list_b))", "def flatten(l):\n result = []\n for x in l:\n if type(x) is list:\n result.extend(flatten(x))\n else:\n result.append(x)\n return result", "def flatten(list_of_lists: List[List]) -> List:\n return reduce(iconcat, list_of_lists, [])", "def wrapped_concatenate(splits, start, end, total_cycles):\n rng = range(start, min(end, total_cycles))\n if end > total_cycles:\n rng.extend(range(0, end%total_cycles))\n return concatenate(splits[j] for j in rng)", "def mutate_list_1(lst, size):\r\n count = 0\r\n while count < size:\r\n elem = lst[0]\r\n lst.remove(elem)\r\n lst.append(elem)\r\n count += 1\r\n return lst" ]
[ "0.71166927", "0.6419672", "0.6270731", "0.62562144", "0.6135522", "0.6087907", "0.6058204", "0.6038355", "0.6037199", "0.6023297", "0.59915894", "0.5978991", "0.5917092", "0.5858604", "0.5852147", "0.5850211", "0.584778", "0.58327496", "0.58193696", "0.58092976", "0.57957524", "0.579166", "0.578694", "0.5785782", "0.5759318", "0.5753363", "0.57468635", "0.5738038", "0.57265854", "0.5714795", "0.5705829", "0.56934166", "0.56866497", "0.5683915", "0.5681093", "0.5672021", "0.5670472", "0.5666241", "0.56639564", "0.56396794", "0.5624675", "0.55969757", "0.55719453", "0.55625445", "0.55533373", "0.55463076", "0.5541531", "0.5538311", "0.55265665", "0.5508393", "0.5505439", "0.54956055", "0.54920894", "0.5487051", "0.5471986", "0.5471986", "0.54590875", "0.5456571", "0.5448213", "0.54434574", "0.5439047", "0.5419131", "0.5418678", "0.5416637", "0.54146606", "0.541303", "0.5407571", "0.5398758", "0.53929925", "0.5387992", "0.53645974", "0.5344274", "0.53404194", "0.53325856", "0.5331816", "0.53253305", "0.53246456", "0.5321257", "0.53206", "0.53194493", "0.5310589", "0.5307977", "0.5307247", "0.5296272", "0.529614", "0.52692086", "0.5262822", "0.52495724", "0.5248179", "0.5238956", "0.52380687", "0.5237367", "0.52260053", "0.5224695", "0.52222055", "0.5213053", "0.5211276", "0.52059364", "0.52054286", "0.5197999" ]
0.74441135
0
listen for message event
async def on_message(self, msg: Message): try: cmsg = await WechatyMessage(msg) except NotImplementedError as e: logger.debug("[WX] {}".format(e)) return except Exception as e: logger.exception("[WX] {}".format(e)) return logger.debug("[WX] message:{}".format(cmsg)) room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None isgroup = room is not None ctype = cmsg.ctype context = self._compose_context(ctype, cmsg.content, isgroup=isgroup, msg=cmsg) if context: logger.info("[WX] receiveMsg={}, context={}".format(cmsg, context)) self.produce(context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_message(self, msg):\n self.event('message', msg)", "def on_message(data):\n pass", "def event_in_cb(self, msg):\n self.event = msg.data", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)", "def onMessage(self, message):\n raise NotImplementedError", "def msg_event(self, event):\r\n pass", "def receive_message(self, message):", "def on_message(self, message):\n log.debug(\"Protocol got message {message}\", message=message)\n if message['type'] == \"change\":\n self.handler.process_packet(message['packet'])\n self.send_packet()\n elif message['type'] == \"chat\":\n self.on_chat_message(message)\n elif message['type'] == \"action\":\n self.on_action(message)\n else:\n log.warn(\"Unrecognized message type {type}\", type=message['type'])", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)\n self.write_message(\"Conn!\")", "def on_message(self,ws,message):\n pass", "def listen(client, main):\n\n @client.event\n async def on_message_edit(old, message):\n main.message_handler(message, True)", "def on_message(self, event):\n self.response = event.message\n self.connection.container.yield_() # Wake up the wait() loop to handle the message.", "def receive_message(self, message):\r\n return", "def handleMessage(msg):", "def message_callback(self, message):\n pass", "def handle_message(self, message):", "def receive(self, message):", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender", "def handle_message(self, msg):\n pass", "def _on_message(self, message):\n print(\"RECEIVED on \" + self.session_name + \":\")\n message_json = json.loads(message)\n print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n for singleMsg in message_json:\n self._process_message(singleMsg)", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "async def on_message(self, message: \"steam.Message\") -> None:", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def received_message(self, m):\n self.receiver.handle_message(m)", "def handle(self, message):", "def on_msg(self, callback):\n self._msg_callback = callback", "def listen_for_any_message(self, msg, match):\n question=\"{}\".format(msg)\n return self.cbmodel.get_response(question)", "def on_receive(self, msg):\n raise NotImplementedError", "def receive(self, msg):\n pass", "def event_receive(self,event):\n\n pass", "def on_message(self, message):\n self.write_message(u\"%s\" % message)", "def listen():\n msg = MSG()\n ctypes.windll.user32.GetMessageA(ctypes.byref(msg), 0, 0, 0)", "def listen():\n msg = MSG()\n ctypes.windll.user32.GetMessageA(ctypes.byref(msg), 0, 0, 0)", "def on_message(client1, userdata, message):\n print(\"message received \" ,str(message.payload.decode(\"utf-8\")))", "def listen_to_message(**payload):\n\n data = payload['data']\n\n try:\n message = data['text']\n user = data['user']\n message_id = data['client_msg_id']\n time = data['event_ts']\n channel = data['channel']\n process_data({'user': user, 'message': message, 'message_id': message_id, 'channel': channel, 'time': time})\n except KeyError:\n pass\n except Exception as e:\n logging.error(e)\n return None", "def on_message(self, message):\n obj = json_decode(message)\n self.writing_logs(obj)\n return", "def onMessage(self, msg, binary):\r\n self._assembler.processMessage(msg, binary)", "def onMessage(self, msg):\n log.msg(str(msg))", "def send(self, event, message):\n pass", "def receive_message(self, context, message):\r\n pass", "def SendMessage(self, event):\n pass", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "async def websocket_receive(self, event):\n data_received = event.get('text')\n if not data_received:\n return\n\n data = json.loads(data_received)\n message = data['message']\n\n await self.new_message(message)", "def onMessage(self, payload, isBinary):", "def handle_message(self, message):\n\n\t\tself.console.handle_message(message)", "def eventInCallback(self, msg):\n rospy.loginfo(\"event_in msg received\")\n self.event_in = msg.data", "def on_message(self, event):\n event_data = EventData(message=event)\n if self._callback:\n self._callback(event_data)\n self.offset = event_data.offset\n return event_data", "def on_message(self, msg):\n self.log.info(msg)", "def on_message(self, userdata, message):\n logging.debug(f\"Message arrived from {message.topic}\")\n self.process(userdata, message)", "def on_message(self, ws, message):\n message = json.loads(message)\n if message['type'] == 'error':\n self.on_error(None, message['message'])\n elif message['type'] == 'subscriptions':\n print(\"Subscribed to {}\".format(', '.join([ channel['name'] for channel in message['channels'] ])))\n else:\n if ((message['type']=='ticker' and message['product_id'] in self._ticker) or \n (message['type'] in [\"snapshot\", \"l2update\"] and message['product_id'] in self._level2) or \n (message['type'] in [\"received\",\"open\",\"done\",\"match\",\"change\",\"activate\"] )):\n self.messages.append(message)\n elif message['type']=='heartbeat':\n self.updated_time = time.time()", "def receive_event(self):\n msg = self.msg_queue.get()\n\n # get the logical clock time of the machine that sent the message\n other_system_clock = msg[msg.index(\":\") + 1:] \n \n # set the clock time to the maximum of self's clock time and other \n # system's clock time\n self.clock_time = max(self.clock_time, int(other_system_clock))\n\n # increment the logical clock time and log that a message was received\n self.clock_time += 1\n self.log(\" Received message from \" + str(msg[:msg.index(\":\")]) + \n \" with LC time \" + str(msg[msg.index(\":\") + 2:]) + \n \"; messages left to process: \" + str(self.msg_queue.qsize()))", "def onMessage(self, msg, binary):\r\n# print('WebSocket: Received new message from client. '\r\n# '(binary={0})'.format(binary))\r\n\r\n try:\r\n self._assembler.processMessage(msg, binary)\r\n except InvalidRequest as e:\r\n self.sendErrorMessage('Invalid Request: {0}'.format(e))\r\n except DeadConnection:\r\n self.sendErrorMessage('Dead Connection')\r\n self.dropConnection()\r\n except:\r\n import traceback\r\n traceback.print_exc()\r\n self.sendErrorMessage('Fatal Error')", "def on_message(self, _, message):\n with self.message_lock:\n self.messages.append(Message.deserialize(message))\n self.new_message_available.set()\n super().on_message(_, message)", "def received(self, message):\n raise NotImplementedError()", "def handle_message(self, data, channel):\n pass", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def onMessageBegin(self, isBinary):", "def on_message(self, wsobj, message):\n\n message = json.loads(message)\n\n # If needed, complete the websocket handshake\n if message[\"op\"] == \"C\":\n self.on_open(wsobj, message=message)\n\n # The next few lines ensure only gameplay related event for the\n # specified game are provided. Otherwise, ESPN's websockets include\n # noisy league-wide information.\n elif \"pl\" in message:\n if message[\"pl\"] != \"0\" and message[\"tc\"] == self.channel:\n decoded = self.decode_message(message)\n self.write_message(wsobj, decoded)", "def on_message(self, room: Room, event: Dict) -> None:\n logger.debug(event)\n\n logger.info('stores msg in db')\n self.store_msg(event)\n\n if event['content'].get('msgtype') == 'm.text' and event['sender'] != \\\n self.uid:\n\n # add config to event\n event['config'] = self.config\n\n # gives event to mossbot and watching out for a return message\n msg = MOSS.serve(event)\n\n if msg and msg.data:\n\n if msg.type == 'text':\n logger.info('sending text msg...')\n room.send_text(msg.data)\n\n elif msg.type == 'notice':\n logger.info('sending notice msg...')\n room.send_notice(msg.data)\n\n elif msg.type == 'html':\n logger.info('sending html msg...')\n room.send_html(msg.data)\n\n elif msg.type == 'image':\n logger.info('sending image msg...')\n self.write_media('image', room, msg.data)\n\n else:\n logger.error(\n 'could not recognize msg type \"%s\"',\n msg[0]\n )\n\n elif msg and msg.type == 'skip':\n logger.info('skipping msg...')\n\n else:\n logger.debug('no matching in event')", "def on_message(self, msg) -> None:\n\n decoded_msg = json.loads(msg)\n message_type = decoded_msg[\"type\"]\n\n if message_type == MSG_SUBCRIPTIONS:\n\n product_ids = decoded_msg[\"channels\"]\n logging.debug(\"Subscriptions: {}\".format(product_ids))\n\n elif message_type == MSG_SNAPSHOT:\n\n product_id = decoded_msg[\"product_id\"]\n self._snapshot(decoded_msg)\n\n # Old best bid and ask doesn't exist yet, this will always set a new bbo\n self.set_if_new_bbo(product_id)\n\n elif message_type == MSG_L2UPDATE:\n\n product_id = decoded_msg[\"product_id\"]\n self.update(decoded_msg)\n\n self.set_if_new_bbo(product_id)\n\n self.event_count += 1", "def on_message(ws, msg):\n data = json.loads(msg)\n if \"results\" in data:\n # This prints out the current fragment that we are working on\n text = data['results'][0]['alternatives'][0]['transcript'].lower()\n print(text)\n # Pass it to the callback\n if CALLBACK(text):\n # If it recognized something, stop listening\n global RUNNING\n RUNNING = False", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def on_message(\n self, client: mqtt.Client, userdata: typing.Any, msg: mqtt.MQTTMessage\n ) -> None:\n self.msgs.append(msg)", "def receiveMessage(self, user, message):\n pass", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(parse_message_string, string)\n d.addCallback(self._r_handle_message_contents, protocol)", "def receive_message(self, message):\r\n self.state.receive_message(message)\r\n return", "def onMessageReceived(self, inputString):\n return", "def _messageReceived(self, message):\n topic = message[0]\n message = message[1:]\n self.messageReceived(message, topic)", "def on_message(ws, message):\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n message_dict = message_to_dict(message)\n print('[' + st + '] Event in channel: ' + message_dict['channel'] +\n '. Created by user: ' + message_dict['user'] + '. Event Type: ' +\n str(message_dict['type']) + '.')\n handle_response(message_dict)", "def handle_message(self, data):\n message = Message.from_text(data)\n if message is not None:\n print(message.username, message.action, message.channel, message.content)\n self._callback(\"message\", message) # TODO: add additional callbacks", "def _on_mqtt_message(\n self, client: mqtt.Client, userdata: str, message: mqtt.MQTTMessage\n ) -> None:\n self.log.debug(f\"Received message on topic: {message.topic}\")\n self.inbound_message_listener(Message(message.topic, message.payload))", "def messageReceived(self, message):\n raise NotImplementedError(self)", "def on_msg(self, callback, remove=False):\n self._msg_callbacks.register_callback(callback, remove=remove)", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "def on_watch_message(self, bus, msg):\n msg_struct = msg.get_structure()\n if msg_struct:\n if msg_struct.get_name() == 'GstMessageTag':\n codec_name = ((msg_struct[\"taglist\"].nth_tag_name(0)))\n codec_value = msg_struct[\"taglist\"].get_string(codec_name)\n info_name = codec_name\n c_result, info_value = codec_value\n if c_result:\n self.info_handler(info_name, info_value)\n if codec_name == \"video-codec\":\n self.info_handler(codec_name, info_value)\n r_result, width, height = self.get_resolution()\n if r_result:\n info_name = \"resolution\"\n info_value = \"[{}x{}]\".format(width, height)\n self.info_handler(info_name, info_value)\n bus.remove_signal_watch()", "def on_message(self, message):\n\n logger.debug('Data from client (%s)' % message)\n\n # A message string from the client.\n # Encode it and send it to the endpoint server as bytes.\n self.endpoint_stream.write(message.encode('utf-8'))", "def on_message(self, handler: Callable[[Request], Coroutine[Any, Any, Any]]):\n self.on_message_handler = handler", "async def on_chat_message(self, chat_message):\n pass", "def on_bus_message(self, bus, message):\n pass", "def onMessageEnd(self):", "def on_recv(self, callback):\n return self.message_client.on_recv(callback)", "async def event_message(ctx):\n\n # the bot should not react to itself\n if ctx.author.name.lower() == BOT_NICK.lower():\n return\n\n # relay message to command callbacks\n await bot.handle_commands(ctx)", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def handle_msg(self, state_id, msg):\n pass", "def sub_callbackmsg(self, msg):\n\n print (msg.message)\n self.received_msg = self.received_msg + [msg.message]\n print (self.received_msg)", "async def chat_message(self, event):\n await self.send(\n {'type': \"websocket.send\",\n 'text': event['response_data']}\n )", "def processReceivedMessage(iTag, clsName, msgID, msg): #@NoSelf", "def handle_received(self) -> None:\n self.buffer: bytes\n while self.buffer:\n try:\n request, self.buffer = parse_request(self.buffer)\n if request is None:\n _LOGGER.debug(\"Not enough data to parse request on event channel\")\n break\n\n _LOGGER.debug(\"Got message on event channel: %s\", request)\n\n # Send a positive response to satisfy the other end of the channel\n # TODO: Add public method to pyatv.http to format a message\n headers = {\n \"Content-Length\": 0,\n \"Audio-Latency\": 0,\n \"Server\": request.headers.get(\"Server\"),\n \"CSeq\": request.headers.get(\"CSeq\"),\n }\n response = (\n f\"{request.protocol}/{request.version} 200 OK\\r\\n\"\n + \"\\r\\n\".join(f\"{key}: {value}\" for key, value in headers.items())\n + \"\\r\\n\\r\\n\"\n )\n self.send(response.encode(\"utf-8\"))\n except Exception:\n _LOGGER.exception(\"Failed to handle message on event channel\")", "def on_message(self, message):\n # Not expecting any message\n if message is None:\n yield self._close_netconf()", "def onMessage(self):\n \"\"\"\n Validates that the received message is from a student and then broadcasts the message to the rest of the class.\n\n @param self: self is the instance of this object.\n @param message: the message that is received\n @param student: the student that sent the message\n \"\"\"\n pass", "async def on_socket_receive(self, msg: \"Msg | MsgProto\") -> None:", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])" ]
[ "0.8410868", "0.8254583", "0.7882204", "0.78618526", "0.7829602", "0.7802724", "0.773378", "0.7572389", "0.7512617", "0.74960405", "0.74841064", "0.74824226", "0.7457746", "0.7441775", "0.74085176", "0.73933196", "0.7379106", "0.7322716", "0.7322716", "0.730575", "0.729197", "0.729197", "0.7284595", "0.72838116", "0.72606355", "0.72436965", "0.7213562", "0.71948355", "0.71827555", "0.71697366", "0.7160725", "0.71404606", "0.70883834", "0.70781153", "0.70545596", "0.7035821", "0.7030046", "0.7030046", "0.7021371", "0.697039", "0.6944535", "0.6940851", "0.6920974", "0.69166934", "0.69108754", "0.69102657", "0.69082093", "0.68999183", "0.6898014", "0.68911606", "0.6887734", "0.68729603", "0.6863757", "0.6859718", "0.6853219", "0.68373764", "0.6832846", "0.68239325", "0.6817295", "0.6802602", "0.67969847", "0.6794432", "0.67941475", "0.6787541", "0.67614484", "0.6761284", "0.6749756", "0.6749756", "0.6749756", "0.6733205", "0.67207634", "0.6694453", "0.6689608", "0.6682256", "0.6673972", "0.6661941", "0.6643456", "0.66421777", "0.66399395", "0.6635076", "0.6633131", "0.66217536", "0.6612692", "0.66111106", "0.6608964", "0.66087776", "0.65951335", "0.65847594", "0.6583949", "0.65721256", "0.6571512", "0.6568762", "0.6565291", "0.6559426", "0.65575755", "0.655179", "0.65497774", "0.6546588", "0.65455425", "0.6541651" ]
0.68061733
59
all common punctuations in both Chinese and English, if any marker is not included, welcome to pull issues in github repo.
def filter_punctuation(input_str, remove_duplicate_space=True): ''' punctuation=string.punctuation + string.ascii_letters + \ '!?。"#$%&'()*+,-/:;<=>@[\]^_`' + \ '{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞' + \ '〟〰〾〿–—‘’‛“”„‟…‧﹏.·。《》' regex = re.compile('[%s]' % re.escape(punctuation)) ''' regex = re.compile(u'[^\u4E00-\u9FA5]')#非中文 if remove_duplicate_space: result = re.sub(' +', ' ', regex.sub(' ', input_str)) else: result = regex.sub(' ', input_str) result = re.sub("\d+", " ", result) result = strQ2B(result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue3625():\n nlp = Hindi()\n doc = nlp(u\"hi. how हुए. होटल, होटल\")\n assert [token.text for token in doc] == ['hi', '.', 'how', 'हुए', '.', 'होटल', ',', 'होटल']", "def clean_non_chinese_symbols(text):\n text = regex.sub('[!!]+', \"!\", text)\n text = regex.sub('[??]+', \"?\", text)\n text = regex.sub(\"[a-zA-Z#$%&\\'()*+,-./:;:<=>@,。★、…【】《》“”‘’[\\\\]^_`{|}~]+\", \" UNK \", text)\n return regex.sub(\"\\s+\", \" \", text)", "def is_punct(self, word, language):", "def process_special_sign(self):\r\n # 首先把全部是英文的句子找出来,没有特殊符号,没有其他东西,只有字母和数字。\r\n # 思路大概是用正则表达式确定结尾,用函数判断中间全部都是英文的句子,不允许特殊符号。\r\n # 用上面的check_sents函数,解决这个问题。\r\n all_sents = list()\r\n for i in self.set_of_result[0][\"question_text\"]:\r\n if DataCleanCheckTool.check_sents(i):\r\n all_sents.append(i)\r\n\r\n # 有些特殊情况的数据,直接抛弃掉,数量不大\r\n # 然后有一些描述词性特殊的单词的其实没有意义,直接抛掉\r\n # 还有一些带括号的,那些需要把括号中的内容抛掉\r\n # 但是因为用的是pop,每次pop之后index都变化,所以会跳着pop,因此在数据量大的情况下需要重复执行\r\n for k, v in enumerate(all_sents):\r\n if \". . .\" in v:\r\n all_sents.pop(k)\r\n elif \"...\" in v:\r\n all_sents.pop(k)\r\n elif \"adj.\" in v:\r\n all_sents.pop(k)\r\n elif \"adv.\" in v:\r\n all_sents.pop(k)\r\n elif \"n.\" in v:\r\n all_sents.pop(k)\r\n elif \"v.\" in v:\r\n all_sents.pop(k)\r\n elif \"prep.\" in v:\r\n all_sents.pop(k)\r\n elif \"sth.\" in v:\r\n all_sents.pop(k)\r\n elif \"sb.\" in v:\r\n all_sents.pop(k)\r\n\r\n # 小写开头的都可以全部抛弃掉了,不是完整的真正的句子,只是一段不完整的话。\r\n pattern = re.compile(\"^[a-z].+\")\r\n for k, v in enumerate(all_sents):\r\n try:\r\n pattern.search(v).group()\r\n all_sents.pop(k)\r\n except Exception as e:\r\n logging.exception(e)\r\n\r\n return all_sents", "def get_core_nouns(df):\n\tdf = df[df.pos.str.startswith(u'名詞')\n\t\t\t\t& (df.core_frequency>0)\n\t\t\t\t& (~df.lForm.str.contains(u'■')) # Words including personal info are masked by ■, and cannot be used.\n\t\t\t\t]\n\treturn df", "def _tokenize_chinese_chars(text):\n output = []\n is_blank = False\n for index, char in enumerate(text):\n cp = ord(char)\n if is_blank:\n output.append(char)\n if context[index - 12:index + 1].startswith(\"#idiom\"):\n is_blank = False\n output.append(SPIECE_UNDERLINE)\n else:\n if text[index:index + 6] == \"#idiom\":\n is_blank = True\n if len(output) > 0 and output[-1] != SPIECE_UNDERLINE:\n output.append(SPIECE_UNDERLINE)\n output.append(char)\n elif _is_chinese_char(cp) or is_fuhao(char):\n if len(output) > 0 and output[-1] != SPIECE_UNDERLINE:\n output.append(SPIECE_UNDERLINE)\n output.append(char)\n output.append(SPIECE_UNDERLINE)\n else:\n output.append(char)\n return \"\".join(output)", "def remove_sentences_50pc_punctuations(sentences):\r\n\tnew_sentences = []\r\n\tn = 0\r\n\tfor s in sentences:\r\n\t\tpunc = re.findall(r\"[%s]|[%s]\" % (string.punctuation, zh_punctuation), s)\r\n\t\tif len(s) > 0 and len(punc) / len(s) < 0.5:\r\n\t\t\tnew_sentences.append(s)\r\n\t\telse:\r\n\t\t\tn += 1\r\n\treturn new_sentences, n", "def punct_filter_(w):\n return w in {u'.', u',', u';', u'?', u'!', u'(', u')', u'[', u']'}", "def findPronunciation(data: str, word: str) -> str:\n # 2 cases\n # if kotoba, then pronunciation will start with a ?\n # if kanji, then multiple pronunciation starting from 〗\n # for kotoba\n # showMessage(len(word))\n\n if len(word) != 1:\n string = '?'\n\n else:\n # locate 〗\n start = find(data, '】')\n # get the first index\n start = min(start)\n\n # now check to see if string after 〗is (\n if data[start+1] == \"(\":\n string = \")\"\n else:\n string = \"】\"\n\n\n # find the indices for start and end\n indStart = find(data, string)\n indEnd = find(data, '\\n')\n # we can assume the first index is the correct index for indStart\n indStart = indStart[0]\n # now find the closest index that is larger than indStart\n possibleIndEnd = [ind for ind in indEnd if ind > indStart]\n absolute_difference_function = lambda list_value: abs(list_value - indStart)\n indEnd = min(possibleIndEnd, key=absolute_difference_function)\n\n # get pronunciation\n pronunciation = data[indStart+1:indEnd]\n\n # lastly, get rid of spaces\n pronunciation = pronunciation.replace(\" \", \"\")\n\n # if kanji, we need to separate into 2 sections\n if len(word) == 1:\n # for onyomi\n # find all strings that are katakana\n regex = {\"from\": ord(u\"\\u30a0\"), \"to\": ord(u\"\\u30ff\")}\n kata = [regex[\"from\"] <= ord(pronunciation[i]) <= regex[\"to\"] for i in range(len(pronunciation))]\n # find all the places that are listed as TRUE\n indOn = find(kata, True)\n # now find min and max of the indices\n indStart = min(indOn)\n indEnd = max(indOn)\n onyomi = pronunciation[indStart:indEnd+1]\n # lastly, replace any ・ with ,\n onyomi.replace('・', '、')\n\n # for kunyomi\n # find all strings that are hiragana\n regex = {'from': ord(u'\\u3040'), 'to': ord(u'\\u309f')}\n hira = [regex[\"from\"] <= ord(pronunciation[i]) <= regex[\"to\"] for i in range(len(pronunciation))]\n # find all the places that are listed as TRUE\n indHi = find(hira, True)\n # now find min and max of the indices\n indStart = min(indHi)\n indEnd = max(indHi)\n kunyomi = pronunciation[indStart:indEnd+1]\n # lastly, replace any ・ with ,\n kunyomi.replace('・', '、')\n\n # lastly combine the 2 strings\n pronunciation = f\"[音] {onyomi}\\n[訓] {kunyomi}\"\n\n\n return pronunciation", "def _remove_unknown_characters(self, text):\n exist = []\n missing_chars = set([])\n for each_char in text:\n if each_char not in self.char_2_imgs:\n if each_char == '・':\n exist.append(each_char)\n else:\n missing_chars.add(each_char)\n else:\n exist.append(each_char)\n\n return ''.join(exist), missing_chars", "def not_supported_languages() -> Optional[List[Text]]:\n return [\"zh\", \"ja\", \"th\"]", "def remove_punct(self,text):", "def generate_chinese_country_names():\n\n # create a dataframe containing mapping of ISO country codes and chinese names\n html = urllib.request.urlopen(\"https://zh.wikipedia.org/zh-tw/ISO_3166-1\").read()\n soup = BeautifulSoup(html, 'html.parser')\n table = soup.find('table', {'class': 'wikitable sortable'})\n\n columns = [th.text.replace('\\n', '') for th in table.find('tr').find_all('th')]\n\n trs = table.find_all('tr')[1:]\n rows = list()\n for tr in trs:\n rows.append([td.text.replace('\\n', '').replace('\\xa0', '') for td in tr.find_all('td')])\n df = pd.DataFrame(data=rows, columns=columns)\n\n # read existing country csv and find the corresponding chinese country names by ISO 3361 country codes\n df_countries = pd.read_csv(COUNTRY_CSV_PATH)\n chinese_names = list()\n for _, row in df_countries.iterrows():\n\n df_mapped = df[df['三位代碼'] == row['country'].upper()]\n if not df_mapped.empty:\n chinese_names.append(df_mapped['中文名稱'].iloc[0])\n else:\n chinese_names.append(\"\")\n df_countries.name = chinese_names\n\n # manually adjust Taiwan's name\n df_countries.loc[df_countries.country == 'twn', 'name'] = '台灣'\n\n # save result\n df_countries.to_csv(COUNTRY_CSV_PATH, index=False)", "def find_abecedarian_words():\n pass", "def _tokenize_chinese_chars(text):\n output = []\n for char in text:\n cp = ord(char)\n if _is_chinese_char(cp) or is_fuhao(char):\n if len(output) > 0 and output[-1] != SPIECE_UNDERLINE:\n output.append(SPIECE_UNDERLINE)\n output.append(char)\n output.append(SPIECE_UNDERLINE)\n else:\n output.append(char)\n return \"\".join(output)", "def _do_smart_punctuation(self, text):\r\n if \"'\" in text: # guard for perf\r\n text = self._do_smart_contractions(text)\r\n text = self._opening_single_quote_re.sub(\"&#8216;\", text)\r\n text = self._closing_single_quote_re.sub(\"&#8217;\", text)\r\n\r\n if '\"' in text: # guard for perf\r\n text = self._opening_double_quote_re.sub(\"&#8220;\", text)\r\n text = self._closing_double_quote_re.sub(\"&#8221;\", text)\r\n\r\n text = text.replace(\"---\", \"&#8212;\")\r\n text = text.replace(\"--\", \"&#8211;\")\r\n text = text.replace(\"...\", \"&#8230;\")\r\n text = text.replace(\" . . . \", \"&#8230;\")\r\n text = text.replace(\". . .\", \"&#8230;\")\r\n return text", "def text_process(mess):\n nopunc= [char for char in mess if char not in string.punctuation]\n nopunc=''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english') and len(word)>2]", "def is_punctuation(ch):\n if (ch == '.'): return False\n if (ch >= '!' and ch <= '/'): return True\n if (ch >= ':' and ch <= '@'): return True\n if (ch >= '\\u2010' and ch <= '\\u2014'): return True # various dashes\n if (is_quote_mark(ch)): return True\n return False", "def use_zh(self):\n pass", "def cleaning(self, document):\n remove_punct = ''.join(i for i in document.lower() if i not in self.punctuation)\n tokenized = [i for i in remove_punct.split() if i not in self.stopwords]\n if self.lang is not 'chinese':\n # Lemmatizes if not chinese\n tokenized = [self.lemmatize.lemmatize(i) for i in tokenized]\n return tokenized", "def _tokenize_chinese_chars(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \") # pragma: no cover\n output.append(char) # pragma: no cover\n output.append(\" \") # pragma: no cover\n else:\n output.append(char)\n return \"\".join(output)", "def replace_punctuation(raw):\r\n\tpunct = set(string.punctuation)\r\n\t\r\n\treturn ''.join([r for r in raw if r not in punct])", "def remove_diacritics(self, text, onehot_label):\n idx = np.random.randint(0, len(onehot_label))\n prevent_loop = 0\n while onehot_label[idx] == 1 or text[idx] == unidecode.unidecode(text[idx]) or text[idx] in string.punctuation:\n idx = np.random.randint(0, len(onehot_label))\n prevent_loop += 1\n if prevent_loop > 10:\n return False, text, onehot_label\n\n onehot_label[idx] = 1\n text[idx] = unidecode.unidecode(text[idx])\n return True, text, onehot_label", "def remove_punct(sample):\n sample[\"full_text\"] = \"\".join([char for char in sample[\"full_text\"] if char not in punct_dic])\n return sample", "def load_common_words(language: str, tot_num: int) -> ty.Set[str]:\n logg = logging.getLogger(f\"c.{__name__}.load_common_words\")\n logg.setLevel(\"DEBUG\")\n logg.debug(\"Start load_common_words\")\n\n lang = pycountry.languages.get(name=language)\n lang_alpha2_tag = lang.alpha_2\n\n common_words_folder = get_package_folders(\"common_words\")\n common_words_path = common_words_folder / f\"{lang_alpha2_tag}.txt\"\n\n common_words = set()\n with common_words_path.open() as common_words_file:\n for line in common_words_file:\n common_words.add(line.strip())\n if len(common_words) == tot_num:\n break\n\n logg.debug(f\"common_words: {common_words}\")\n\n return common_words", "def replace_all_punctuation(token, percent=0.5):\n puncts = 0\n for i in range(len(token)):\n if token[i] in TextPreprocessor.PUNCTUATION:\n puncts += 1\n # early stopping if no punctuation after x percent\n if int(len(token) * percent) == i and puncts == 0:\n return token\n if puncts == 0 or len(token) * percent > puncts:\n return token\n else:\n return \"\"", "def cleaning(string, EOS=False):\n\n # before cleaning up, first identify end of the sentences (EOS)\n if EOS:\n pLu = '[{}]'.format(\"\".join([chr(i) for i in range(sys.maxunicode) if chr(i).isupper()]))\n EOS = re.compile(r'([a-z]+|[ş|ı])(\\. )((' + pLu + '[a-z]?)|([0-9]+))')\n string = EOS.sub(r'\\1#\\3', string)\n\n # period at the end of the sentences are being replaced with hastag (#)\n string = string.lower()\n mapping = {}\n mapping['99_807'] = 231\n mapping['105_770'] = 105\n mapping['117_770'] = 117\n mapping['105_775'] = 105\n mapping['117_776'] = 252\n mapping['115_807'] = 351\n mapping['103_774'] = 287\n mapping['97_770'] = 97\n mapping['111_776'] = 246\n mapping['97_785'] = 97\n Alist = {97, 99, 103, 105, 111, 115, 117}\n solv_prob = []\n flag = False\n for i, c in enumerate(string):\n if flag:\n flag = False\n continue # pass this character\n if not ord(c) in Alist:\n solv_prob.append(c) # no need to check this character\n else:\n if i == len(string) - 1:\n continue\n cn = string[i + 1] # next character\n key = '{}_{}'.format(ord(c), ord(cn)) # creating string with their ordinal\n if key in mapping.keys(): # cheking if this is to be mapped\n solv_prob.append(chr(mapping[key])) # append the mapped character to the list\n flag = True # raising flag to pass next character\n continue\n else:\n solv_prob.append(c)\n\n data = ''.join(solv_prob)\n data = data.replace('iğdır', 'ığdır')\n data = data.replace('irak', 'ırak')\n # Data= [d if len(d) > 0 else '#' for d in data.splitlines()] # removing empty lines\n return data", "def test_get_texts_ignores():\n file_map = sd.get_file_map(\".\")\n texts = sd.get_texts(file_map)\n ingnores = \"[:.,;:!?\\\"-()]\\n\".split()\n for text in texts:\n for char in ingnores:\n assert text.find(char) == -1", "def sanitize(wl):\n s = []\n for word in wl:\n for symbol in ['.', '!', ',', '\\n', '\\r', '?']:\n if symbol in word:\n s.append(symbol)\n word = word.replace(symbol, '')\n \n s.append(word)\n return s", "def _tokenize_chinese_chars(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(' ')\n output.append(char)\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def pre_process_question(keyword):\n for char, repl in [(\"“\", \"\"), (\"”\", \"\"), (\"?\", \"\")]:\n keyword = keyword.replace(char, repl)\n\n keyword = keyword.split(r\".\")[-1]\n keywords = keyword.split(\" \")\n keyword = \"\".join([e.strip(\"\\r\\n\") for e in keywords if e])\n return keyword", "def mask_disc_markers(self, text: str) -> str:\n punctuations = \".?!;:-()'\\\"[]\"\n for elem in punctuations:\n text = text.replace(elem, \" \" + elem + \" \")\n text = \" \" + text + \" \"\n for dm in self.dms:\n text.replace(\" \" + dm + \" \", \" <mask> \" * len(dm.split()))\n return text", "def remove_punctuation(sample):\n punctuations = '''!\"&'()*+,-./:;<=>?[\\]^`{|}~'''\n no_punct = \"\"\n for char in sample:\n if char not in punctuations:\n no_punct = no_punct + char\n return no_punct", "def cleanTweet(text, appostrophes=True, emojis=True, html=True, url=True, misspellings=True, punctuation=True, lemming=True,\\\r\n stop=True):\r\n if appostrophes:\r\n #convert appostrophes\r\n filtered_string = decontracted(text)\r\n if emojis:\r\n #decoding, removing emojis\r\n filtered_string = filtered_string.encode(\"utf-8\").decode('ascii','ignore')\r\n if html:\r\n #cleaning of html tags\r\n htmltags = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\r\n filtered_string = re.sub(htmltags, '', filtered_string)\r\n if url:\r\n #cleaning of url\r\n url = re.compile(r'https?://\\S+|www\\.\\S+')\r\n filtered_string = re.sub(url, '', text)\r\n if misspellings:\r\n #cleaning of misspellings\r\n spell = SpellChecker()\r\n corrected_text = []\r\n misspelled_words = spell.unknown(filtered_string.split())\r\n for word in filtered_string.split():\r\n if word in misspelled_words:\r\n corrected_text.append(spell.correction(word))\r\n else:\r\n corrected_text.append(word)\r\n filtered_string = \" \".join(corrected_text)\r\n if punctuation:\r\n word_tokens = word_tokenize(filtered_string)\r\n #remove punctuations\r\n table=str.maketrans('','',string.punctuation)\r\n filtered_string.translate(table) \r\n filtered_string = [word.translate(table) for word in word_tokens]\r\n filtered_string = \" \".join(filtered_string)\r\n if lemming:\r\n #lemming of words\r\n word_tokens = word_tokenize(filtered_string)\r\n lemmatizer = WordNetLemmatizer() \r\n filtered_string = [lemmatizer.lemmatize(word) for word in word_tokens]\r\n if stop:\r\n # cleaning from stopwords\r\n stop_words=set(stopwords.words('english'))\r\n stop_word_drop = [] \r\n for word in filtered_string: \r\n if word not in stop_words: \r\n stop_word_drop.append(word) \r\n filtered_string = \" \".join(stop_word_drop)\r\n \r\n #toDos\r\n #cleaning of rare words\r\n # tokens is a list of all tokens in corpus\r\n # freq_dist = nltk.FreqDist(token)\r\n # rarewords = freq_dist.keys()[-50:]\r\n # after_rare_words = [ word for word in token not in rarewords]\r\n #cleaning of slang words\r\n #split attached words, not working and questionable because of all capital words\r\n # filtered_string = \" \".join(re.findall('[A-Z][^A-Z]*', filtered_string))\r\n return filtered_string", "def test_unicodeCombining(self):\n input = raw_unicode(\n r\"Ik ben gei\\u0308nteresseerd in de co\\u00F6rdinatie van mijn knie\\u00EBn, maar kan niet e\\u0301e\\u0301n \\u00E0 twee enqu\\u00EAtes vinden die recht doet aan mijn carri\\u00E8re op Cura\\u00E7ao\")\n output = input.split(\" \")\n output[8] = output[8][0:-1]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV[0])\n self.assertTrue(input[itmV[1]:].startswith(itmO))", "def test_bug2785373(self):\n input = \"So, one dey when I wes 17, I left.\"\n for _ in tokenize_en(input):\n pass\n input = raw_unicode(\"So, one dey when I wes 17, I left.\")\n for _ in tokenize_en(input):\n pass", "def _is_chinese_char(cp):\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if (\n (cp >= 0x4E00 and cp <= 0x9FFF)\n or (cp >= 0x3400 and cp <= 0x4DBF) #\n or (cp >= 0x20000 and cp <= 0x2A6DF) #\n or (cp >= 0x2A700 and cp <= 0x2B73F) #\n or (cp >= 0x2B740 and cp <= 0x2B81F) #\n or (cp >= 0x2B820 and cp <= 0x2CEAF) #\n or (cp >= 0xF900 and cp <= 0xFAFF)\n or (cp >= 0x2F800 and cp <= 0x2FA1F) #\n ): #\n return True\n\n return False", "def setWordKnown(self):\n self.wordKnown = ''.join(['_ ' if w not in self.guessedRight else w for w in self.getWord()])", "def _clean_up(hadith_text: str) -> str:\n punctuations = ''.join([\n # Collected from https://en.wikipedia.org/wiki/Arabic_script_in_Unicode#Punctuation_and_ornaments\n chr(int('060C', 16)), # ARABIC COMMA\n chr(int('060D', 16)), # ARABIC DATE SEPARATOR\n chr(int('060E', 16)), # ARABIC POETIC VERSE SIGN\n chr(int('060F', 16)), # ARABIC SIGN MISRA\n chr(int('061B', 16)), # ARABIC SEMICOLON\n chr(int('061E', 16)), # ARABIC TRIPLE DOT PUNCTUATION MARK\n chr(int('061F', 16)), # ARABIC QUESTION MARK\n chr(int('066D', 16)), # ARABIC FIVE POINTED STAR\n chr(int('06D4', 16)), # ARABIC FULL STOP\n chr(int('06DD', 16)), # ARABIC END OF AYAH\n chr(int('06DE', 16)), # ARABIC START OF RUB EL HIZB\n chr(int('06E9', 16)), # ARABIC PLACE OF SAJDAH\n chr(int('06FD', 16)), # ARABIC SIGN SINDHI AMPERSAND\n chr(int('FD3E', 16)), # Arabic ornate left parenthesis\n chr(int('FD3F', 16)), # Arabic ornate right parenthesis\n ])\n\n # Removing punctuations\n cleaned_text = re.sub('[' + punctuations + ']', ' ', hadith_text)\n\n # Removing any html markup\n cleaned_text = BeautifulSoup(cleaned_text, 'lxml').text\n\n # Removing multiple consecutive whitespaces, including newlines\n cleaned_text = ' '.join(cleaned_text.split())\n\n return cleaned_text", "def fix_natural_language(name):\n\tfor ch in r\"\\`*{}[]()>#+-.!$\":\n\t\tif ch in name:\n\t\t\tname = name.replace(ch,\"_\")\n\treturn name", "def test_text_cleaning(basic_multilingual, clean_multilingual):\n docs = [\"Bonjour le monde! #thisisfrench #ilovefrance\",\n \"Bonjour le monde! https://t.co/U0Zjp3tusD\"]\n docs = [Document([], text=text) for text in docs]\n \n basic_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"it\", \"it\"]\n \n assert clean_multilingual.processors[\"langid\"]._clean_text\n clean_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"fr\", \"fr\"]", "def test_codeanalysis_latin():\n code = io.open(TEST_FILE_LATIN, encoding=\"iso-8859-1\").read()\n check_results = (check_with_pyflakes(code, TEST_FILE_LATIN)\n + check_with_pep8(code, TEST_FILE_LATIN)\n + find_tasks(code))\n if PY2:\n num_results = 1\n else:\n num_results = 2\n assert len(check_results) == num_results", "def _tokenize_chinese_chars(self, text):\n output = []\n for char in text:\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n return \"\".join(output)", "def additional_text_preprocessing_with_pos(pos_dict):\n\n tags_to_lemmatize = ['a', 'n', 'v', 'r']\n\n pos_dict = TextPreprocessor.find_named_entities(pos_dict)\n if pos_dict is None:\n return None, None\n prepro = list()\n contains_spelling_mistake = False\n for t in pos_dict:\n token = t['token']\n tag = t['tag'].lower()\n if token not in TextPreprocessor.PUNCTUATION and tag != \",\":\n\n token = TextPreprocessor.replace_user_mentions(token)\n token = TextPreprocessor.replace_urls(token)\n replaced = [token]\n for i in replaced:\n\n i = TextPreprocessor.replace_all_punctuation(i)\n if i.lower() not in TextPreprocessor.STOPWORDS and i != 'URL' and i!= 'USERMENTION':\n if i != \"\" and not re.match('\\B#\\w*[a-zA-Z]+\\w*', i):\n before = i\n i = TextPreprocessor.SPELL_CHECKER.correct(i, tag)\n if i != before:\n contains_spelling_mistake = True\n if tag in tags_to_lemmatize:\n i = TextPreprocessor.lemmatize(i, tag)\n i = TextPreprocessor.stem(i, tag)\n # check again, since stemming, lemmatization or spelling correction can produce stopwords\n # if i.lower() not in TextPreprocessor.STOPWORDS:\n if i != 'URL' and i!= 'USERMENTION' and i!='':\n i = i.lower()\n if re.match(\".*[a-zA-Z]'\", i):\n i = i[:-1]\n prepro.append(i)\n return prepro, contains_spelling_mistake", "def remove_spurious_words(text):\n spurious_words = [\"Cached\", \"Similar\", '的']\n for word in spurious_words:\n text = text.replace(word, \"\")\n return re.sub('[.、”“::a-zA-Z%?=()()—「 /-]', ' ', text)", "def spacing_punctuation(text):\n for punc in all_punct:\n if punc in text:\n text = text.replace(punc, f' {punc} ')\n return text", "def spacing_punctuation(text):\n for punc in all_punct:\n if punc in text:\n text = text.replace(punc, f' {punc} ')\n return text", "def clean_tweet(tweet):\r\n reply_pattern = re.compile(\"^@([a-zA-Z0-9]*) (.*)\")\r\n regexhandler = regex.RegexHandler()\r\n # add mark if tweets starts with a mention (@user)\r\n if reply_pattern.match(tweet.tweet[\"text\"]) is not None:\r\n temp = \"MarkReply \" + tweet.tweet[\"text\"]\r\n else:\r\n temp = tweet.tweet[\"text\"]\r\n # language dependent\r\n\r\n if floodtags.core.statics.StaticData.locations:\r\n for location in floodtags.core.statics.StaticData.locations:\r\n if location in temp:\r\n temp += \" MarkLocation\"\r\n\r\n try:\r\n stemmer = SnowballStemmer(floodtags.core.statics.StaticData.language.lower())\r\n # stem words\r\n temp = \" \".join(\r\n [stemmer.stem(x) if x not in tweet.tweet[\r\n \"keywords\"] and \"MarkReply\" not in x and \"MarkLocation\" not in x else x for x in temp.split()])\r\n except ValueError:\r\n print(\"language not found:\", floodtags.core.statics.StaticData.language)\r\n # pass\r\n\r\n # store language\r\n temp = \"Mark\" + tweet.language + \" \" + temp\r\n\r\n # store keyword\r\n\r\n # replace each website with 'MarkWebsite' to create more similarity\r\n temp = regexhandler.replace(temp, 'MarkWebsite', regex.Expressions.website)\r\n # replace each photo url with 'MarkPhoto' to create more similarity\r\n for i in range(len(tweet.tweet[\"photos\"])):\r\n temp = Vectorizer.rreplace(temp, \"MarkWebsite\", \"MarkPhoto\", 1)\r\n # replace each height with 'MarkHeight' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkHeight\", regex.Expressions.waterheight)\r\n # replace each time with 'MarkTime' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkTime\", regex.Expressions.time)\r\n # replace each date with 'MarkDate' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkDate\", regex.Expressions.date)\r\n # replace each number with 'MarkNumber' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkNumber\", regex.Expressions.number)\r\n temp = re.sub('\\n', ' ', temp)\r\n results = re.findall(\"(^|[^@\\w])@(\\w{1,15})\", temp)\r\n # add mark for each user name\r\n if results is not None:\r\n for i in range(len(results)):\r\n temp += \" MarkUserName\"\r\n results = re.findall(\"#(\\S*)\", temp)\r\n # add mark for each hashtag\r\n if results is not None:\r\n for i in range(len(results)):\r\n temp += \" MarkHashTag\"\r\n # add sender as feature\r\n temp = \"Sender\" + tweet.tweet[\"source\"][\"username\"] + \" \" + temp\r\n # remove unnecessary characters and chance text to lower case\r\n return re.sub('[#\\.,:]', '', temp)", "def commonWords(self):\n #utilize similar code used in stats.py\n exclude = set(('!', '.', '?'))\n freq = Stats()\n fullText = []\n #Parse email\n for x in range(self.getSCount()):\n #Simplify email into string of words separated by single space\n sString = self[x].lower()\n sString = ''.join(char for char in sString if char not in exclude)\n sString = sString.split()\n fullText = fullText + sString\n\n #Call findFreqDic() to find frequencies of words\n freqDict = freq.findFreqDic(fullText)\n\n #Analyze 10 words\n numTopic = 10\n \n #Find most and least common calling topNSort and bottomNSort\n mostCommon = freq.topNSort(freqDict, numTopic)\n leastCommon = freq.bottomNSort(freqDict, numTopic)\n \n most = list(mostCommon.keys())\n least = list(leastCommon.keys())\n \n return most, least", "def _is_chinese_char(self, cp):\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if (\n (cp >= 0x4E00 and cp <= 0x9FFF)\n or (cp >= 0x3400 and cp <= 0x4DBF) #\n or (cp >= 0x20000 and cp <= 0x2A6DF) #\n or (cp >= 0x2A700 and cp <= 0x2B73F) #\n or (cp >= 0x2B740 and cp <= 0x2B81F) #\n or (cp >= 0x2B820 and cp <= 0x2CEAF) #\n or (cp >= 0xF900 and cp <= 0xFAFF)\n or (cp >= 0x2F800 and cp <= 0x2FA1F) #\n ): #\n return True # pragma: no cover\n\n return False", "def uCSIsCJKSymbolsandPunctuation(code):\n ret = libxml2mod.xmlUCSIsCJKSymbolsandPunctuation(code)\n return ret", "def split_japanese_text(self, text):\n\n for match in self.word_pattern.finditer(text):\n word = match.group(0)\n got_japanese = False\n for m in self.jword_pattern.finditer(word):\n w = m.group(0)\n got_japanese = True\n yield w.lower()\n if not got_japanese:\n yield word.lower()", "def spelling(p_str):\n spell = SpellChecker(language='en')\n misspelled = spell.unknown(re.sub(r'[^\\w\\s]', '', p_str).split())\n corrections = {}\n for word in misspelled:\n tmp = list(spell.candidates(word))\n tmp.insert(0, spell.correction(word))\n corrections[word] = tmp\n return(corrections)", "def _is_chinese_char(cp):\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((0x4E00 <= cp <= 0x9FFF) or #\n (0x3400 <= cp <= 0x4DBF) or #\n (0x20000 <= cp <= 0x2A6DF) or #\n (0x2A700 <= cp <= 0x2B73F) or #\n (0x2B740 <= cp <= 0x2B81F) or #\n (0x2B820 <= cp <= 0x2CEAF) or\n (0xF900 <= cp <= 0xFAFF) or #\n (0x2F800 <= cp <= 0x2FA1F)): #\n return True\n\n return False", "def detect_language(text, LANGUAGES):\n lang = None\n word_count = 0\n our_test = []\n \n for language in LANGUAGES:\n \n result = get_word_count(text, language['common_words'])\n print(result)\n #import pdb; pdb.set_trace()\n if result > word_count:\n lang = language['name']\n word_count = result\n \n return lang", "def processText(text):\n\n no_punc = [word for word in text.split() if word.isalpha()] # and word not in stopwords.words('english')]\n #removes non-letter characters and only includes words not included in stopwords\n no_punc = \" \".join(no_punc) \n clean_words = nltk.word_tokenize(no_punc) #splits the punctuation marks from the real words\n return clean_words", "def use_chinese_lang():\n with patch.object(seafes_config, 'lang', 'chinese'):\n yield", "def test_ukrainian_symbols(self):\n string = \"Минає ніч від’їзду\"\n expected = \"Minaye nich vid’yizdu\"\n self.assertEqual(transliterate(string), expected)", "def test_get_supported_locales_for_voice_datasets(self):\n pass", "def removeOwnPunctuation(self):\n\t\tself.textFile = self.removePunctuation(self.open(self.filePath)).split()", "def test_no_symbols(self):\n tweet = self.load_tweet('symbols')\n # Save a copy:\n symbols = tweet['entities']['symbols']\n del tweet['entities']['symbols']\n tweet_text = self.api.html_for_tweet(tweet)\n self.assertTrue('symbols: $AAPL and' in tweet_text)\n self.assertTrue('and $ANOTHER and $A.' in tweet_text)", "def t(eng, chinese):\n return chinese if 'zh' in get_info().user_language else eng", "def is_punctuation(text):\n return not (text.lower() in AVRO_VOWELS or\n text.lower() in AVRO_CONSONANTS)", "def geminates_checker(self, s):\n s = re.sub(r'([йцкгшщзхфвпрлджчсмтб])\\1+', r'\\1', s)\n s = re.sub(r'н{2}([йцкгшщзхфвпрлджчсмтб ])', r'н\\1', s) \n return s", "def hasPunct(str):\n for c in str:\n if c in string.punctuation:\n return True\n return False", "def test_unwanted_words(self) -> None:\n pad_open: bool = False\n for word in self.report.get_words():\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n for u_word in self.rules.unwanted_words:\n if word.text == u_word[\"word\"]:\n self.add_error(\n f\"Ordet {word.text} är inte tillåtet, \"\n f\"använd {u_word['alternative']} istället.\",\n word=word,\n )\n break", "def _hidden_in_unicode(self, txt):", "def prepare(self):\n\n for i in range(len(self.__corpora)):\n string = self.__corpora[i]\n string = sub(r'[\\n\\t]| {2,}', ' ', string.lower())\n string = sub(r'[^{0}]'.format(self.alphabet + ' '), '', string)\n\n if self.lang == 'uk':\n string = sub(r'[ґ]', 'г', string)\n\n elif self.lang == 'ru':\n string = sub(r'[ё]', 'е', string)\n\n self.__corpora[i] = string", "def _pinyin(self, rest):\n # Fix if sentence contains some english '.tr yacin太牛了'\n rest = filter(lambda x: not self.isascii(x), rest.decode('utf8'))\n def reduce_reading((char, readings)):\n \"\"\"If a character has multiple cjklib readings, use the fine-tuning\n dict from pinyin toolkit and CEDICT as a backup.\"\"\"\n if len(readings) == 1:\n return readings[0]\n else:\n try:\n return self.pinyin_toolkit_lookup[char]\n except KeyError:\n return self._dict_reading_lookup(char)\n\n readings = [self.char_lookup.getReadingForCharacter(x, 'Pinyin') for x in rest]\n res = u' '.join(map(reduce_reading, zip(rest, readings)))\n return res.encode('utf8')", "def collect_english_cats(self):\n tf.logging.info('collecting english categories')\n self.english_cats = list(\n self.frames(filter_english=True, filter_category=True))", "def verbalisePunctuation(self):\n for i, strText in enumerate(self.sentencesList):\n #For all punctuation marks\n for regex, value in list(TextRepresentation.PUNCTUATION.items()):\n strText = re.sub(regex, value, strText)\n self.sentencesList[i] = strText", "def _is_chinese_char(self, cp):\n if cp >= 19968 and cp <= 40959 or cp >= 13312 and cp <= 19903 or cp >= 131072 and cp <= 173791 or cp >= 173824 and cp <= 177983 or cp >= 177984 and cp <= 178207 or cp >= 178208 and cp <= 183983 or cp >= 63744 and cp <= 64255 or cp >= 194560 and cp <= 195103:\n return True\n return False", "def _process(self, word: str) -> List[str]:\n # if a blank arrives from splitting, just return an empty list\n if len(word.strip()) == 0:\n return []\n word = self.convert_consonantal_i(word)\n my_word = \" \" + word + \" \"\n letters = list(my_word)\n positions = []\n for dipth in self.diphthongs:\n if dipth in my_word:\n dipth_matcher = re.compile(\"{}\".format(dipth))\n matches = dipth_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n matches = self.kw_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n letters = string_utils.merge_next(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions.clear()\n if not self._contains_vowels(\"\".join(letters)):\n return [\n \"\".join(letters).strip()\n ] # occurs when only 'qu' appears by ellision\n positions = self._starting_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_right(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._starting_consonants_only(letters)\n positions = self._ending_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_left(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._ending_consonants_only(letters)\n positions = self._find_solo_consonant(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_solo_consonant(letters)\n positions = self._find_consonant_cluster(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_consonant_cluster(letters)\n return letters", "def extract_from_folders(folder):\n spanish_wordlist = set()\n to_remove = string.punctuation\n table = {ord(char): None for char in to_remove}\n\n for dir in glob.glob(os.path.join(folder, '*/')):\n for filename in glob.glob(os.path.join(dir, '*.txt')):\n with open(filename, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n words = line.split(' ')\n for word in words:\n word = word.translate(table)\n word = word.lower()\n if word.isalpha():\n spanish_wordlist.add(word)\n\n return spanish_wordlist", "def words(self):\n punctuation = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n lst = []\n for lines in self.lines:\n words = lines.split(' ')\n for word in words:\n no_punc = ''\n for c in word:\n if c not in punctuation:\n no_punc += c.lower()\n if no_punc != '' and no_punc != '\\n':\n lst.append(no_punc.strip('\\n'))\n return lst\n #no_punc += word.lower()\n #for word in no_punc.split(' ')[:-1]:\n #for word in no_punc:\n # lst.append(word)\n #line = lines.strip(os.linesep) # strips away spaces, \\t (tabs), and \\n (new-lines/enter)\n #print(no_punc)\n #print(lst)", "def is_punctuation_automata(final_word: str) -> list:\n\n punct_words = ({'+': 'symb_add', '-': 'symb_diff', '*': 'symb_mult', '/': 'symb_div', '>': 'symb_gt',\n '<': 'symb_lt', '(': 'symb_oparentesis', ')': 'symb_cparentesis', '.':'symb_dot',\n ':': 'symb_col',';':'symb_semicol',',':'symb_coma', '=': 'symb_eq',':=':'symb_assign',\n '>=':'symb_gte', '<=':'symb_lte', '<>':'symb_neq'})\n\n if final_word in punct_words:\n token = punct_words[final_word]\n return [True, token]\n else :\n return [False, \"\"]", "def _derive_country_JP(place):\n derived = []\n if _JP_FU_SUFFIX.search(place.asciiname):\n bare = _JP_FU_SUFFIX.sub(\"\", place.asciiname)\n derived += [bare, bare + \" prefecture\", bare + \" pref\"]\n elif _JP_KEN_SUFFIX.search(place.asciiname):\n bare = _JP_KEN_SUFFIX.sub(\"\", place.asciiname)\n derived += [bare, bare + \" prefecture\", bare + \" pref\",\n bare + \"-ken\", bare + \" ken\"]\n elif _JP_SHI_SUFFIX.search(place.name):\n bare = _JP_SHI_SUFFIX.sub(\"\", place.name)\n derived += [bare, bare + \"-city\", bare + \" city\"]\n elif _JP_KU_SUFFIX.search(place.name):\n bare = _JP_KU_SUFFIX.sub(\"\", place.name)\n derived += [bare, bare + \"-ku\", bare + \" ku\", bare + \" ward\"]\n\n en_names = [DerivedName(text.lower(), \"en\") for text in derived]\n _LOGGER.debug(\"derive_country_JP: en_names: %r\", en_names)\n\n if _JA_JP_SHI_SUFFIX.search(place.name):\n bare = _JA_JP_SHI_SUFFIX.sub(\"\", place.name)\n ja_names = [DerivedName(bare, \"ja\")]\n else:\n ja_names = []\n return en_names + ja_names", "def non_std_words(work):\n dictionary = enchant.Dict(\"en_US\")\n non_std_word = []\n\n for elem in work:\n lyrics = [item for sublist in elem[1] for item in sublist]\n lyrics = [i for i in lyrics if i[0] not in [',', '.', \"'\", '?', '!', '’', '&', '#', ':']]\n word_count = 1\n not_word_count = 1\n for tuples in lyrics:\n if dictionary.check(tuples[0]):\n word_count += 1\n else:\n not_word_count += 1\n\n non_std_word.append((not_word_count/(not_word_count+word_count), elem[0]))\n\n return non_std_word", "def remove_punc_sw(self, docs):\n \n new_docs = []\n \n for text in docs:\n \n for p in punc:\n text = text.replace(p,' ')\n text = text.replace('-', '')\n text = text.replace(\"’\", ' ')\n text = text.lower()\n tokens = word_tokenize(text)\n filtered_tokens = list(filter(lambda token: token not in stopwords, tokens))\n \n new_text = \" \".join(filtered_tokens)\n new_docs.append(new_text)\n \n return pd.Series(new_docs)", "def fetch_languages(self):\r\n \r\n # tokenize, clean and filter document tokens \r\n toks = [re.sub(r'[^a-zA-Z]','', tok.text.lower().strip()) for tok in self.doc]\r\n toks = [tok for tok in toks if len(tok)>1 and tok in LANGUAGES]\r\n toks = sorted(set(toks))\r\n \r\n return toks", "def get_indicators_and_clean_comments(df):\n # Count number of \\n\n df[\"ant_slash_n\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\n\", x))\n # Get length in words and characters\n df[\"raw_word_len\"] = df[\"comment_text\"].apply(lambda x: len(x.split()))\n df[\"raw_char_len\"] = df[\"comment_text\"].apply(lambda x: len(x))\n # Check number of upper case, if you're angry you may write in upper case\n df[\"nb_upper\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"[A-Z]\", x))\n # Number of F words - f..k contains folk, fork,\n df[\"nb_fk\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"[Ff]\\S{2}[Kk]\", x))\n # Number of S word\n df[\"nb_sk\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"[Ss]\\S{2}[Kk]\", x))\n # Number of D words\n df[\"nb_dk\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"[dD]ick\", x))\n # Number of occurence of You, insulting someone usually needs someone called : you\n df[\"nb_you\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\W[Yy]ou\\W\", x))\n # Just to check you really refered to my mother ;-)\n df[\"nb_mother\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\Wmother\\W\", x))\n # Just checking for toxic 19th century vocabulary\n df[\"nb_ng\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\Wnigger\\W\", x))\n # Some Sentences start with a <:> so it may help\n df[\"start_with_columns\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"^\\:+\", x))\n # Check for time stamp\n df[\"has_timestamp\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\d{2}|:\\d{2}\", x))\n # Check for dates 18:44, 8 December 2010\n df[\"has_date_long\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\D\\d{2}:\\d{2}, \\d{1,2} \\w+ \\d{4}\", x))\n # Check for date short 8 December 2010\n df[\"has_date_short\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\D\\d{1,2} \\w+ \\d{4}\", x))\n # Check for http links\n df[\"has_http\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"http[s]{0,1}://\\S+\", x))\n # check for mail\n df[\"has_mail\"] = df[\"comment_text\"].apply(\n lambda x: count_regexp_occ(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+', x)\n )\n # Looking for words surrounded by == word == or \"\"\"\" word \"\"\"\"\n df[\"has_emphasize_equal\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\={2}.+\\={2}\", x))\n df[\"has_emphasize_quotes\"] = df[\"comment_text\"].apply(lambda x: count_regexp_occ(r\"\\\"{4}\\S+\\\"{4}\", x))\n\n # Now clean comments\n df[\"clean_comment\"] = df[\"comment_text\"].apply(lambda x: prepare_for_char_n_gram(x))\n\n # Get the new length in words and characters\n df[\"clean_word_len\"] = df[\"clean_comment\"].apply(lambda x: len(x.split()))\n df[\"clean_char_len\"] = df[\"clean_comment\"].apply(lambda x: len(x))\n # Number of different characters used in a comment\n # Using the f word only will reduce the number of letters required in the comment\n df[\"clean_chars\"] = df[\"clean_comment\"].apply(lambda x: len(set(x)))\n df[\"clean_chars_ratio\"] = df[\"clean_comment\"].apply(lambda x: len(set(x))) / df[\"clean_comment\"].apply(\n lambda x: 1 + min(99, len(x)))", "def check_common_word(song: Song, result: Result) -> bool:\n\n sentence_words = slugify(song.name).split(\"-\")\n to_check = slugify(result.name).replace(\"-\", \"\")\n\n for word in sentence_words:\n if word != \"\" and word in to_check:\n return True\n\n return False", "def unknown(url):\n\t\n\t# gets the text of the page\n\thtml = request.urlopen(url).read().decode('utf8')\n\traw = BeautifulSoup(html).get_text()\n\tjunk = set(words.words())\n\t# finds the lower case words by searching for a word boundary plus one or more lower case letters\n\tlower_case_words = re.findall(r'\\b[a-z]+', raw)\n\n\t# searches through the list of lower case words and gets rid of those not in the words corpus.\n\tunknowns = [word for word in lower_case_words if word not in junk]\n\tprint(unknowns)", "def clean_text_from_general_punctuation_unicode(text):\n return re.sub(r\"([\\u2000-\\u206F])\", \" \", text)", "def clean_text_from_general_punctuation_unicode(text):\n return re.sub(r\"([\\u2000-\\u206F])\", \" \", text)", "def removePunc(text):\r\n\t\r\n\tlistFromText = []\r\n\r\n\tfor word in text.split():\r\n\t\tword = word.strip(string.punctuation) # menghapus tanda baca dalam teks\r\n\t\tlistFromText.append(word)\r\n\r\n\treturn listFromText", "def No_with_word(token_text):\n\n tmp=''\n for i,word in enumerate(token_text):\n if word==u'не':\n tmp+=(\"_\".join(token_text[i:i+2]))\n tmp+= ' '\n else:\n if token_text[i-1]!=u'не':\n tmp+=word\n tmp+=' '\n return tmp", "def get_cruftmoji(percentage):\n level = [\n # Master\n (\"\\xf0\\x9f\\x99\\x8f \\xf0\\x9f\\x8d\\xbb \\xf0\\x9f\\x8d\\x95 \\xf0\\x9f\\x91\\xbe \"\n \"\\xf0\\x9f\\x8d\\x95 \\xf0\\x9f\\x8d\\xbb \\xf0\\x9f\\x99\\x8f\"),\n # Snakes on a Plane\n \"\\xf0\\x9f\\x90\\x8d \\xf0\\x9f\\x90\\x8d \\xe2\\x9c\\x88\\xef\\xb8\\x8f\",\n # Furry Hat Pizza Party\n \"\\xf0\\x9f\\x8d\\x95 \\xf0\\x9f\\x92\\x82 \\xf0\\x9f\\x8d\\x95\",\n \"\\xf0\\x9f\\x91\\xbb\", # Ghost\n \"\\xf0\\x9f\\x92\\xa3\", # The Bomb\n \"\\xf0\\x9f\\x90\\xa9 \\xf0\\x9f\\x92\\xa8\", # Poodle Fart\n \"\\xf0\\x9f\\x92\\x80\", # Skull\n \"\\xf0\\x9f\\x93\\xbc\", # VHS Cassette\n \"\\xf0\\x9f\\x8c\\xb5\", # Cactus\n \"\\xf0\\x9f\\x92\\xa9\", # Smiling Poo\n \"\\xf0\\x9f\\x92\\xa9 \" * 3] # Smiling Poo (For 100%)\n return level[int(percentage * 10)].decode(\"utf-8\")", "def punctcheck(word):\r\n remove = string.punctuation\r\n pattern = r\"[{}]\".format(remove)\r\n\r\n while len(word) > 0 and word[0] in pattern:\r\n word = word[1:]\r\n\r\n while len(word) > 0 and word[-1] in pattern:\r\n word = word[:-1]\r\n\r\n return word", "def test_forbidden_words(self) -> None:\n pad_open: bool = False\n words: List[Word] = self.report.get_words()\n forbidden_words: List[Word] = []\n last_error: bool = False\n\n for word in words:\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n if (word.text in self.rules.forbidden_words) or any(\n [b in self.rules.forbidden_words for b in word.baseform]\n ):\n forbidden_words.append(word)\n last_error = True\n continue\n if last_error:\n last_error = False\n combo = \" \".join([w.text for w in forbidden_words])\n start, _ = self.report.get_word_postion(forbidden_words[0])\n _, end = self.report.get_word_postion(forbidden_words[-1])\n self.add_error(\n f\"Ordet {combo} får endast förekomma i citat.\", position=(start,end)\n )", "def clean_text(text):\n lowercase = tf.strings.lower(text)\n stripped_html = tf.strings.regex_replace(lowercase, \"<br />\", \" \")\n cleaned_punctuation = tf.strings.regex_replace(\n stripped_html, \"[%s]\" % re.escape(string.punctuation), \"\"\n )\n return cleaned_punctuation", "def fixaSintaxiGitHub(md):\n md = fixaBlocs(md)\n md = fixaLiniesComencenPerCometes(md)\n return md", "def remove_emoji_punc(text):\n \n allchars = [str for str in text]\n emoji_list = [c for c in allchars if c in emoji.UNICODE_EMOJI]\n clean_text = ' '.join([str for str in text.split() if not any(i in str for i in emoji_list)])\n\n \n s1 = clean_text.replace(u'’', u\"\").replace(\"'\",\"\")\n s1 = re.sub(r'[^a-z0-9 ]+', ' ', s1)\n \n return \" \".join(s1.split())", "def remove_punc(text):\n for punctuation in string.punctuation:\n text = text.replace(punctuation, ' ')\n text = re.sub(\" +\", \" \", text)\n return text", "def clean(tweet):\n #Separates the contractions and the punctuation\n\n\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<user>\", \"\")\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<url>\", \"\")\n tweet = correct_spell(tweet)\n return tweet.strip().lower()", "def test_unicodeBasic(self):\n input = raw_unicode(\n r\"Ik ben ge\\u00EFnteresseerd in de co\\u00F6rdinatie van mijn knie\\u00EBn, maar kan niet \\u00E9\\u00E9n \\u00E0 twee enqu\\u00EAtes vinden die recht doet aan mijn carri\\u00E8re op Cura\\u00E7ao\")\n output = input.split(\" \")\n output[8] = output[8][0:-1]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV[0])\n self.assertTrue(input[itmV[1]:].startswith(itmO))", "def test_drop_punctuation():\n assert TextCleaner().transform([[\"'test!?\"]])[\"corpus\"][0] == \"test\"", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def _add_dependency_if_punct(self):\n if self.pos == 'PUNCT':\n try:\n # Getting unicode category to determine the direction.\n # Concatenates to the following if it belongs to Ps or Pi category.\n # Ps: Punctuation, open (e.g. opening bracket characters)\n # Pi: Punctuation, initial quote (e.g. opening quotation mark)\n # Otherwise, concatenates to the previous word.\n # See also https://en.wikipedia.org/wiki/Unicode_character_property\n category = unicodedata.category(self.word)\n self.dependency = category in ('Ps', 'Pi')\n except:\n pass", "def remove_punct(data):\n punct = \"!\\\"#$%&()*+-./:;<=>?@[\\]^_`{|}~\\n\"\n for i in range(len(punct)):\n data = np.char.replace(data, punct[i], ' ')\n data = np.char.replace(data, \" \", \" \")\n data = np.char.replace(data, ',', '')\n return data" ]
[ "0.65874964", "0.5815723", "0.57310444", "0.5641857", "0.5631367", "0.5505692", "0.55017436", "0.54730123", "0.54569554", "0.54323596", "0.5372543", "0.5367185", "0.5364872", "0.5327742", "0.52777237", "0.5227759", "0.5223804", "0.5223304", "0.51749724", "0.51537734", "0.51266253", "0.51056606", "0.5100743", "0.5093053", "0.50847673", "0.508319", "0.50778997", "0.5073556", "0.5070026", "0.50698036", "0.50674", "0.5066761", "0.505706", "0.50531155", "0.5048138", "0.5047588", "0.50429744", "0.5036165", "0.5033678", "0.5019197", "0.50081664", "0.50059295", "0.49952418", "0.49859357", "0.49811545", "0.49645218", "0.49645218", "0.49630424", "0.4959226", "0.49575555", "0.49556583", "0.49523628", "0.4948684", "0.49459943", "0.49453694", "0.49428162", "0.49289098", "0.49225748", "0.49077553", "0.4906685", "0.4904676", "0.48997682", "0.48995194", "0.48903567", "0.48821613", "0.48790962", "0.48756883", "0.4875229", "0.4872983", "0.48714206", "0.486849", "0.48666906", "0.4862838", "0.48618343", "0.48610377", "0.48599723", "0.485745", "0.48472586", "0.48454708", "0.48448706", "0.4842798", "0.48411134", "0.48375204", "0.4837453", "0.4837453", "0.48374167", "0.4834072", "0.48311764", "0.48269904", "0.48245555", "0.481664", "0.4812194", "0.48093188", "0.48063856", "0.48049557", "0.48032978", "0.4803189", "0.47944084", "0.47942325", "0.4791082" ]
0.49559984
50
Converting fullwidth characters to halfwidth characters
def strQ2B(ustring): rstring = "" for uchar in ustring: inside_code = ord(uchar) if inside_code == 12288: inside_code = 32 elif (inside_code >= 65281 and inside_code <= 65374): inside_code -= 65248 rstring += chr(inside_code) return rstring
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fullwidth(st):\n ret = \"\"\n if not st: return ret\n for c in st:\n i = ord(c)\n if c == \" \":\n ret += chr(0x3000)\n elif 0x21 <= i <= 0x7f:\n ret += chr(i - 0x21 + 0xff01)\n else:\n ret += c\n return ret", "def _full_to_half(s):\n n = []\n for char in s:\n num = ord(char)\n if num == 0x3000:\n num = 32\n elif 0xFF01 <= num <= 0xFF5E:\n num -= 0xfee0\n char = chr(num)\n n.append(char)\n return ''.join(n)", "def full2half(uc):\n return unicodedata.normalize('NFKC', uc)", "def full2half(uc):\n return unicodedata.normalize('NFKC', uc)", "def full_to_half(s):\n n = []\n for char in s:\n num = ord(char)\n if num == 0x3000:\n num = 32\n elif 0xFF01 <= num <= 0xFF5E:\n num -= 0xfee0\n char = chr(num)\n n.append(char)\n return ''.join(n)", "def normalize_alef_bw(s):\n\n return _ALEF_NORMALIZE_BW_RE.sub(u'A', s)", "def textwide(s, tf):\r\n width = 350 ## default ok for Arial or Helvetica\r\n if gv[\"font\"] == \"Times-roman\":\r\n width = 330\r\n if gv[\"font\"] == \"Courier\":\r\n width = 390\r\n if gv[\"fontfixed\"] is False:\r\n localfontsize = int(gv[\"fontsize\"]*gv[\"globalscale\"])\r\n else:\r\n localfontsize = int(gv[\"fontsize\"])\r\n return tf*localfontsize * len(s)*width/(1000*(gv[\"fixedUR\"][0] - gv[\"fixedLL\"][0]))", "async def aesthetic(self, ctx, *, text):\n out = \"\"\n for char in text:\n out += utils.fullwidth_transform.get(char, char)\n await ctx.send(out)", "def normalize_teh_marbuta_bw(s):\n\n return s.replace(u'p', u'h')", "def swap_halves(s):\r\n\r\n half_s = len(s) // 2\r\n \r\n first_half = s[0: half_s]\r\n new_word = s[half_s:] + first_half \r\n\r\n return new_word", "def _winfix(s):\n return s.replace('2L', '2').replace('3L', '3').replace('4L', '4').replace('5L', '5')", "def _padboth(width, s):\n fmt = \"{0:^%ds}\" % width\n return fmt.format(s)", "def _fixed_width_str(self, x, fill=' '):\n x_str = str(x)\n l = len(x_str)\n pad = self.width - l\n if pad < 0:\n raise Exception(\"Your string is too long!\")\n return fill * pad + x_str", "def _wadifyString(s):\n\n if len(s) < 8:\n s += \"\\x00\" * (8 - len(s))\n return s", "def get_char_width(self, char: str) -> float:\n pass", "def truncate(string):", "def s_convert(w):\n try:\n if w[-1] in ['s', 'S']:\n return convert(w.upper())[:-1] + '\\u03c2'\n return convert(w.upper())\n except:\n logging.error('could not convert %s' % w)", "def double_char(s):\n x = 0\n doubler = ''\n while(x < len(s)):\n doubler = doubler + (s[x] * 2)\n x = x + 1\n \n \n return doubler", "def normalize_alef_safebw(s):\n\n return _ALEF_NORMALIZE_SAFEBW_RE.sub(u'A', s)", "def fix_ending(x):\n x = strip_stoich_wrapper(x)\n x = re.sub(r'(?<=[a-zA-Z])\\-(?=[a-zA-Z]$)', ' ', x)\n return x", "def just(s: str) -> str:\n return s.ljust(50, \"_\")", "def double_char(s):\n return \"\".join(letter * 2 for letter in s)", "def bwt(s):\n s = \"\\002\" + s + \"\\003\"\n table = sorted(s[i:] + s[:i] for i in range(len(s)))\n last_column = [row[-1:] for row in table]\n return \"\".join(last_column)", "def strmode(mode):\n return mode.rjust(6, '0')", "def urlify2(w, length):\n chars = []\n while i < len(w):\n c = w[i]\n if c == ' ':\n chars.append('%20') \n else:\n chars.append(c)\n i += 1\n url_w = build_string(chars)\n return url_w", "def generate_bar_widths(s):\n # implement this function!\n bar_widths = \"111\"\n for i in range(int(len(s) / 2)):\n if s[i] == \"0\":\n bar_widths += \"3211\"\n elif s[i] == \"1\":\n bar_widths += \"2221\"\n elif s[i] == \"2\":\n bar_widths += \"2122\"\n elif s[i] == \"3\":\n bar_widths += \"1411\"\n elif s[i] == \"4\":\n bar_widths += \"11411\"\n elif s[i] == \"5\":\n bar_widths += \"1231\"\n elif s[i] == \"6\":\n bar_widths += \"1114\"\n elif s[i] == \"7\":\n bar_widths += \"1312\"\n elif s[i] == \"8\":\n bar_widths += \"1213\"\n elif s[i] == \"9\":\n bar_widths += \"3112\"\n bar_widths += \"11111\"\n for i in range(int(len(s)/2) + 1, len(s)):\n if s[i] == \"0\":\n bar_widths += \"3211\"\n elif s[i] == \"1\":\n bar_widths += \"2221\"\n elif s[i] == \"2\":\n bar_widths += \"2122\"\n elif s[i] == \"3\":\n bar_widths += \"1411\"\n elif s[i] == \"4\":\n bar_widths += \"11411\"\n elif s[i] == \"5\":\n bar_widths += \"1231\"\n elif s[i] == \"6\":\n bar_widths += \"1114\"\n elif s[i] == \"7\":\n bar_widths += \"1312\"\n elif s[i] == \"8\":\n bar_widths += \"1213\"\n elif s[i] == \"9\":\n bar_widths += \"3112\"\n bar_widths += \"111\"\n return bar_widths", "def _adjust_char_values(self, print_mode):\n\n if print_mode & self.FONT_MASK != 0:\n # FontB\n self._char_height = 17\n char_width = 9\n else:\n # FontA\n self._char_height = 24\n char_width = 12\n\n # Double Width Mode\n if print_mode & self.DOUBLE_WIDTH_MASK != 0:\n self._max_column /= 2\n char_width *= 2\n\n # Double Height Mode\n if print_mode & self.DOUBLE_HEIGHT_MASK != 0:\n self._char_height *= 2\n\n self._max_column = (384 / char_width)", "def wrap(text, width):\n retstr = \"\"\n for word in text.split(' '):\n if len(retstr)-retstr.rfind('\\n')-1 + len(word.split('\\n',1)[0]) >= width:\n retstr += ' \\n' + word\n else:\n retstr += ' ' + word\n return retstr", "def normalize_teh_marbuta_hsb(s):\n\n return s.replace(u'\\u0127', u'h')", "def normalize(self, what):\n txt = strippedtxt(what, [\"\\002\", \"\\003\"])\n txt = re.sub(\"\\s+\", \" \", what)\n txt = stripcolor(txt)\n txt = txt.replace(\"\\002\", \"*\")\n txt = txt.replace(\"<b>\", \"*\")\n txt = txt.replace(\"</b>\", \"*\")\n txt = txt.replace(\"<i>\", \"\")\n txt = txt.replace(\"</i>\", \"\")\n txt = txt.replace(\"&lt;b&gt;\", \"*\")\n txt = txt.replace(\"&lt;/b&gt;\", \"*\")\n txt = txt.replace(\"&lt;i&gt;\", \"\")\n txt = txt.replace(\"&lt;/i&gt;\", \"\")\n return txt", "def underline(string, character=\"=\"):\n return character * len(string)", "def tabify(st: str, width: int) -> str:\n assert width % 8 == 0\n if len(st) >= width:\n # Ensure some empty space at the end\n return f'{st}\\t'\n padding = width - len(st)\n tabcount = padding // 8\n if padding % 8 > 0:\n tabcount += 1\n return st + ('\\t' * tabcount)", "def unH(s):\n return ''.join([chr(int(s[i:i+2],16)) for i in range(2, len(s),2)])", "def wrap(s, width, hyphen_break=False, break_chrs=''):\n assert type(s) == str\n return [pad_str(substr, width) for substr in\n break_to_width(s, width, hyphen_break=hyphen_break,\n break_chrs=break_chrs)]", "def format_steering(left, right):\n left = int(left * 64 + 64)\n right = int(right * 64 + 64)\n left = min(max(1, left), 127)\n right = min(max(1, right), 127)\n return left, right", "def pretty_hebrew(val):\n return 'font-size:20px; font-family: Times New Roman; text-align: right; max-width: 500px'", "def get_pad2(s1, s2):\n return \" \" * (40 - len(s1) - len(s2))", "def horizontal_char(self):\n ...", "def _preprocess(self, sent: str) -> str:\n sent = sent.replace(\" \", \"▁\")\n return \" \".join([c for c in sent])", "def get_chanstr(self, full=False):\n\n # if whole number, strip off decimal part\n # see also http://stackoverflow.com/q/11227620/974555\n chanstr = str(self.centre_frequency/decimal.Decimal(\"1e9\"))\n if '.' in chanstr:\n chanstr = chanstr.rstrip('0').rstrip('.')\n\n if full:\n widthstr = \"-\" + str(self.width/decimal.Decimal(\"1e9\"))\n if self.sideband == 0:\n sbstr = \"\"\n else:\n sbstr = \"±\" + str(self.sideband/decimal.Decimal(\"1e9\"))\n return chanstr + sbstr + widthstr\n else:\n return chanstr", "def wrappedchars(string,chars):\n index = string.index(chars)\n if index != 0:\n chars = string[index-1] + chars\n else:\n chars = \" \" + chars\n if index + len(chars) + 1 <= len(chars):\n chars += string[index + len(chars) + 1]\n else:\n chars += \" \"\n return chars", "def center(text, width=72, pad_character=\" \"):\n\n length = len(text)\n prefix = (width - length) // 2\n suffix = width - prefix - length\n return (prefix * \" \") + text + (suffix * \" \")", "def short(text):\n rep = {\n ' *health *center': '',\n ' *health *ceanter': '',\n ' +H[./]*C': '',\n ' *health *post': '',\n ' *heslth *post': '',\n ' *Haelth *Post': '',\n ' *Health *Poat': '',\n ' *hospital': '',\n ' +h[./]*p': '',\n ' {2,}': ''}\n\n return reduce(lambda a, kv: re.sub(*kv, a, flags=re.I), rep.items(), text)", "def pad_normalize(s):\n\n if len(s) < length:\n s += '0' * (length-len(s))\n else:\n s = s[:length]\n return [float(int(s[i], 16)/255) for i in range(0, length)]", "def uCSIsHalfwidthandFullwidthForms(code):\n ret = libxml2mod.xmlUCSIsHalfwidthandFullwidthForms(code)\n return ret", "def centered_text(text: str, length: int = -1) -> str:\n\n t = text.strip()\n\n # If length is longer than the console width, then squeeze it to fit\n num_col = shutil.get_terminal_size((80, 20)).columns\n if length > num_col:\n length = num_col\n\n if len(t) >= length:\n return t\n\n space_tot = length - len(t)\n space_num = space_tot // 2\n\n space = \" \"*space_num\n\n if space_tot % 2 == 0:\n return space + t + space\n else:\n # Places the extra space at the end of the string\n return space + t + space + \" \"", "def _get_string_cfdi(text, size=100):\n if not text:\n return None\n text = text.replace('|', ' ').replace('/', '').replace('-', '').replace('_', '')\n return text.strip()[:size]", "def _pad_shorter(sequence: str) -> str:\n return sequence.ljust(3, \"X\")", "def get_suffix(word, length):\n if length <= 0:\n return \"\"\n if length <= len(word):\n start = len(word) - length\n return word[start:]\n else:\n return word.rjust(length, \"*\")", "def PROPER(text):\n return text.title()", "def pad_ansi(text, width, char, left=False):\n current_width = len(ANSI_PATTERN.sub('', text))\n parts = [text, (width - current_width) * char]\n if left:\n parts = reversed(parts)\n return ''.join(parts)", "def normalize(self, text: str) -> str:", "def center_string_box(string, x_start, x_end):\n return (x_end - x_start) // 2 - len(string) // 2", "def _squish_name(self, name, space):\n if len(name) <= space:\n return name\n if space < 3:\n raise ValueError(\"too much squishing!\")\n return name[:space - 2] + \"~\" + name[-1]", "def get_prefix(word, length):\n if length <= 0:\n return \"\"\n if length <= len(word):\n return word[:length]\n else:\n return word.ljust(length, \"*\")", "def print_space(self,text,width,w=1,h=1):\n texlen = len(text)\n if texlen > width:\n text = text[:width]\n self.lesprint(text,width)", "def divide_str(string):\n new_str = ''\n for i in range(0, int(len(string)), 2):\n new_str += string[i:i + 2] + ' '\n return new_str", "def segment(text: str) -> str:\n import regex\n\n # Chinese\n text = regex.sub(r\"(\\p{Han})\", r\" \\1 \", text)\n # Korean\n text = regex.sub(r\"(\\p{Hangul})\", r\" \\1 \", text)\n # Japenese\n text = regex.sub(r\"(\\p{Hiragana})\", r\" \\1 \", text)\n text = regex.sub(r\"(\\p{Katakana})\", r\" \\1 \", text)\n\n text = text.replace(\" \", \" \").strip()\n return text;", "def bin_to_char(exp):\n new_exp = \"\"\n for i in range(0,len(exp)):\n if exp[i] == \"1\":\n new_exp += \"#\"\n else:\n new_exp += \" \"\n return new_exp", "def simplifying(text):\n str(text)\n max_chars = 20\n size = len(text) // max_chars\n i = 0\n fragments = []\n while i <= size:\n fragments.append(text[max_chars * i:max_chars * (i + 1)])\n i += 1\n return '\\n'.join(fragments)", "def text_width(text):\n # Really crude guess would be: return len(text)/2\n return sum(GLYPH_WIDTHS.get(c, .5) for c in text)", "def urlify(w, length):\n return w.strip().replace(' ', '%20')", "def normalize_alef_xmlbw(s):\n\n return _ALEF_NORMALIZE_XMLBW_RE.sub(u'A', s)", "def break_to_width(s, width, hyphen_break=False, break_chrs=''):\n if width == 1:\n return list(s)\n output = []\n while len(s) > 0:\n\n # Remainder is short enough\n if len(s) <= width:\n output.append(s)\n return output\n\n # Try to line break in an acceptable place\n space_ind = right_index_of(s, ' ', width)\n break_ind = find_break_acceptable_index(s, break_chrs, width)\n\n # If not possible, force break line\n if space_ind is None and break_ind is None:\n if hyphen_break:\n ind = width - 1\n output.append(s[:ind] + ('-' if width > 1 else ''))\n s = s[ind:]\n else:\n output.append(s[:width])\n s = s[width:]\n\n else:\n # If a nonspace char was found, we want to break *after* it\n if break_ind is not None:\n break_ind += 1\n\n # Break on the later point\n if break_ind is None or space_ind > break_ind:\n output.append(s[:space_ind])\n s = s[space_ind+1:] # Skip over break spaces\n else:\n output.append(s[:break_ind])\n s = s[break_ind:]\n\n s = s.strip()\n return output", "def char2bf(char):\n\n result_code = \"\"\n ascii_value = ord(char)\n #print(ascii_value)\n factor = int(ascii_value / 10)\n #print(factor)\n remaining = int(ascii_value % 10)\n #print(remaining)\n\n result_code += \"%s\\n\" % (\"+\" * 10)\n result_code += \"[\\n\"\n result_code += \" >\\n\"\n result_code += \" %s\\n\" % (\"+\" * factor)\n result_code += \" <\\n\"\n result_code += \" -\\n\"\n result_code += \"]\\n\"\n result_code += \">\\n\"\n result_code += \"%s\\n\" % (\"+\" * remaining)\n result_code += \".\\n\"\n result_code += \"[-]\\n\"\n #print(result_code)\n return result_code", "def dump_format(width):\n characters = width // 4\n remainder = width % 4\n characters += min(1, remainder)\n format_string = \"{:0\" + str(characters) + \"x}\"\n return format_string", "def get_width(self):\n return \"%s\" % self.width", "def fill_with_spaces(line: string, width: int) -> string:\n size = len(line)\n spaces_left = width - size\n return line + (' ' * spaces_left)", "def inverse_captcha_halfway(in_str):\n def get_reference_char(idx):\n return get_value_at_index(idx + int(len(in_str) / 2), in_str)\n\n return _inverse_captcha(in_str, get_reference_char)", "def make_spaces_ascii(artist_str):\n while artist_str.find(' ') != -1:\n artist_str = artist_str.replace(' ', '%20')\n return artist_str", "def normalize_teh_marbuta_safebw(s):\n\n return s.replace(u'p', u'h')", "def normalize_teh_marbuta_xmlbw(s):\n\n return s.replace(u'p', u'h')", "def ascii2title(title):\n L = title\n L= np.asarray(L)\n L[L>256] = 32\n y = ''.join(chr(int(i)) for i in L)\n return y", "def hflip(self):\n for x in range(0, self.width // 2):\n for y in range(0, self.height):\n self._chars[x][y], self._chars[self.width - 1 - x][y] = self._chars[self.width - 1 - x][y], self._chars[x][y]\n self._fginfo[x][y], self._fginfo[self.width - 1 - x][y] = self._fginfo[self.width - 1 - x][y], self._fginfo[x][y]\n self._bginfo[x][y], self._bginfo[self.width - 1 - x][y] = self._bginfo[self.width - 1 - x][y], self._bginfo[x][y]\n self._strDirty = True", "def render_word(self, min_length=3, max_length=12):\n while True:\n word = \"\".join(self.render(lambda o: len(o) > 1 and o[-1] == \" \", lambda n: n[0] == \" \"))\n if min_length <= len(word.strip()) <= max_length:\n return word.strip()", "def w2c(word):\r\n return chr(word & 0xff) + chr((word >> 8) & 0xff)", "def normalizeSpaces(strText, bDouble=False):\n if bDouble:\n strText = re.sub(r\"[ ]+\", r\" \", strText, flags=re.UNICODE)\n # Remove double spaces from groups\n return re.sub(r\"([(|]) ([|)])\", r\"\\g<1> \\g<2>\", strText, flags=re.UNICODE)\n\n return re.sub(r\"[ ]+\", r\" \", strText, flags=re.UNICODE)", "def encode(s):\n return ' '.join(encode_to_words(s))", "def normalize_alef_maksura_hsb(s):\n\n return s.replace(u'\\u00fd', u'y')", "def get_char_width(self, char: str) -> float:\n return self.state.size * self.state.font.get_char_width(char)", "def normalize_alef_hsb(s):\n\n return _ALEF_NORMALIZE_HSB_RE.sub(u'A', s)", "def normalize(w):\n\n nfkd = unicodedata.normalize('NFKD', w)\n return ''.join(x for x in nfkd if unicodedata.category(x)[0] == 'L').lower()", "def shorten_rtept(rtept):\n return rtept.upper()[:6].strip()", "def longswapchar(word: str) -> Iterator[str]:\n\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(word))):\n yield word[:first] + word[second] + word[first+1:second] + word[first] + word[second+1:]", "def normalize_space (text):\n return RE_WS.sub (' ', text.strip ())", "def normalize(t):\n text = list(t) \n lt = len(text)\n for i in range(lt):\n c = text[i]\n \n # correct \\n, \\r chars\n if i+1 < lt-1:\n c2 = text[i+1]\n else:\n c2 = None\n if c == u'\\r' and c2 == u'\\n':\n continue\n elif c == u'\\r' and c2 != u'\\n':\n c = u'\\n'\n elif c == u'\\n' and c2 == u'\\r': # FFFFFUUUUUUUUUUUUUUuuuuu....\n continue\n \n text[i] = _farsi_unicode_norm.get(c, c)\n return u''.join(text)", "def normalize_text(w):\n return str(w, \"utf-8\").lower().replace(\"-\", \"\")", "def truncate_description(description):\n if len(description) <= 160 :\n return description\n\n cut_desc = \"\"\n character_counter = 0\n for i, letter in enumerate(description) :\n character_counter += 1\n if character_counter > 160 :\n if letter == ' ' :\n return cut_desc+\"...\"\n else :\n return cut_desc.rsplit(' ',1)[0]+\"...\"\n cut_desc += description[i]\n return cut_desc", "def one_pass(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n\n i, res = 0, ''\n while i < len(s):\n if i + 2 < len(s) and s[i + 2] == '#':\n res += alpha_map[s[i:i + 2]]\n i += 3\n else:\n res += alpha_map[s[i]]\n i += 1\n return res", "def singleencode(self, word):\n replace = {u'\\u0d15\\u0d4d\\u200d': u'\\u0d7f',\n u'\\u0d23\\u0d4d\\u200d': u'\\u0d7a',\n u'\\u0d28\\u0d4d\\u200d': u'\\u0d7b',\n u'\\u0d30\\u0d4d\\u200d': u'\\u0d7c',\n u'\\u0d32\\u0d4d\\u200d': u'\\u0d7d',\n u'\\u0d33\\u0d4d\\u200d': u'\\u0d7e'}\n for character in replace:\n word = word.replace(character, replace[character])\n return word", "def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))", "def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))", "def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))", "def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))", "def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))", "def _create_swidths(font, format, base):\n swidths = (base.int32 * len(font.glyphs))(*(\n pixel_to_swidth(_g.scalable_width, font.point_size, font.dpi.x)\n for _g in font.glyphs\n ))\n table_bytes = (\n bytes(le.uint32(format))\n + bytes(base.uint32(len(swidths)))\n + bytes(swidths)\n )\n return table_bytes, format", "def split_half(str):\n split_pairs = str[:len(str)//2], str[len(str)//2:]\n return split_pairs", "def write_width(self, font, string):\n width = 0\n for character in string:\n try:\n char_index = font.MAP.index(character)\n width += font.WIDTHS[char_index]\n\n except ValueError:\n pass\n\n return width", "def majuscule(string):\n\n res = \"\"\n toChange = True\n\n for letter in string:\n value_letter = ord(letter)\n isLetter = 65 <= value_letter and value_letter <= 92 or 96 <= value_letter and value_letter <= 122\n if isLetter:\n if toChange:\n res += chr(ord(letter) - 32)\n else:\n res += letter\n toChange = not toChange\n else:\n res += letter\n\n print(res)", "def visible_width(string):\n if '\\033' in string:\n string = RE_COLOR_ANSI.sub('', string)\n\n # Convert to unicode.\n try:\n string = string.decode('u8')\n except (AttributeError, UnicodeEncodeError):\n pass\n\n width = 0\n for char in string:\n if unicodedata.east_asian_width(char) in ('F', 'W'):\n width += 2\n else:\n width += 1\n\n return width", "def _padright(width, s):\n fmt = \"{0:<%ds}\" % width\n return fmt.format(s)" ]
[ "0.72672564", "0.72366166", "0.72254", "0.72254", "0.7138966", "0.6003596", "0.58500266", "0.57598126", "0.56762266", "0.5665028", "0.5609316", "0.5608659", "0.5595366", "0.55903506", "0.55307037", "0.55272573", "0.55239594", "0.54834574", "0.5470254", "0.5453405", "0.5435975", "0.5399575", "0.5390778", "0.5374225", "0.536733", "0.5367232", "0.5364578", "0.5348534", "0.53395534", "0.5324985", "0.5319384", "0.5317284", "0.530286", "0.53017354", "0.5284131", "0.528153", "0.5280858", "0.5279542", "0.5278806", "0.52531046", "0.52480286", "0.5247008", "0.5244148", "0.52401096", "0.52316225", "0.52304006", "0.5221441", "0.52045095", "0.51897615", "0.51728797", "0.51681507", "0.5165308", "0.51626563", "0.5159784", "0.51371413", "0.5132997", "0.5132821", "0.5131265", "0.5125819", "0.51257217", "0.5125419", "0.51205564", "0.5108473", "0.5104568", "0.5099856", "0.50996155", "0.5099202", "0.5093601", "0.50875765", "0.50801194", "0.507931", "0.5076939", "0.5072946", "0.50592107", "0.50561047", "0.5041908", "0.5035964", "0.50329196", "0.5020994", "0.50177044", "0.50029725", "0.4995292", "0.49896118", "0.4975638", "0.49683052", "0.49669468", "0.49649465", "0.49636596", "0.495542", "0.49521175", "0.49304652", "0.49304652", "0.49304652", "0.49304652", "0.49304652", "0.49300656", "0.49250388", "0.49209204", "0.4918753", "0.4918648", "0.49174646" ]
0.0
-1
utility to check if node names are unique
def check_onnx_node_name_uniqueness(onnx_model): onnx_node_names = [node.name for node in onnx_model.graph.node] assert len(onnx_node_names) == len(set(onnx_node_names)), f'list size mismatch, check if names are unique'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return len({node.get_name() for node in self.get_nodeset()}) != len(self.get_nodeset())", "def checkNamesUniqueness(names):\n topNames, deeperNames = getLevelNames(names)\n## print topNames\n## print deeperNames\n for name in topNames[:-1]:\n if topNames.count(name) > 1:\n raise ValueError(\"\"\"\\Names at every level must be unique!\"\"\")\n if deeperNames:\n checkNamesUniqueness(deeperNames)", "def test_node_name(self):\n xmlns = {\n \"a\": \"_a\",\n \"g\": \"_g\"\n }\n self.assertEqual(\n utils._node_name(\"a\", \"g\", xmlns),\n (False, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"a@a\", \"g\", xmlns),\n (False, \"a\", \"{_a}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@@a\", \"g\", xmlns),\n (True, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@a@a\", \"g\", xmlns),\n (True, \"a\", \"{_a}a\")\n )\n # something not equal to _\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"1@a@a\", \"g\", xmlns),\n # too many @\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"@@a@a\", \"g\", xmlns),", "def test_unique_atom_names(self, molecule):\n # The dataset we load in has atom names, so let's strip them first\n # to ensure that we can fail the uniqueness check\n for atom in molecule.atoms:\n atom.name = \"\"\n assert not (molecule.has_unique_atom_names)\n # Then genreate unique atom names using the built in algorithm\n molecule.generate_unique_atom_names()\n # Check that the molecule has unique atom names\n assert molecule.has_unique_atom_names\n # Check molecule.has_unique_atom_names is working correctly\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n molecule.atoms[1].name = molecule.atoms[0].name # no longer unique\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names", "def test_unique_atom_names(self, molecule):\n # The dataset we load in has atom names, so let's strip them first\n # to ensure that we can fail the uniqueness check\n for atom in molecule.atoms:\n atom.name = \"\"\n assert not (molecule.has_unique_atom_names)\n # Then genreate unique atom names using the built in algorithm\n molecule.generate_unique_atom_names()\n # Check that the molecule has unique atom names\n assert molecule.has_unique_atom_names\n # Check molecule.has_unique_atom_names is working correctly\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n molecule.atoms[1].name = molecule.atoms[0].name # no longer unique\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n assert all(\"x\" in a.name for a in molecule.atoms)", "def make_unique_node(graph, name):\n if name not in graph:\n return name\n ctr = 1\n while True:\n name_ = name + '_' * ctr\n if name_ not in graph:\n return name_\n ctr += 1", "def check_name_duplication(self, other):\n self_names = set(\n [node.get(\"name\") for node in self.root.findall(\"./*[@name]\")])\n other_names = set(\n [node.get(\"name\") for node in other.root.findall(\"./*[@name]\")])\n if len(set.intersection(self_names, other_names)):\n raise NameDuplicationError()", "def is_node_name_ok(node_name):\n # 节点名不可包含`/`特殊字符\n node_name = node_name.strip('/')\n return node_name.find('/') == -1", "def _ValidateUniqueNames(pools):\n used_names = set()\n for pool in pools:\n name = pool.nodePool\n if name in used_names:\n raise exceptions.InvalidArgumentException(\n '--pools', 'Pool name \"%s\" used more than once.' % name)\n used_names.add(name)", "def verify_unique_names(items):\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException(\"Error: Duplicate sequence names found.\", ErrorType.INVALID_SEQUENCE_DATA)", "def _repair_names_check_unique(names: Iterable[str]) -> Iterable[str]:\n for name in names:\n if names.count(name) > 1:\n raise NameNonUniqueError(f\"Names must be unique: {name}\")\n if name == \"\" or name is numpy.nan:\n raise NameNonUniqueError(f\"Names can't be empty: {name}\")\n if re.search(r\"(?:(?<!_)_{2}\\d+|(?<!_)__)+$\", str(name)):\n raise NameNonUniqueError(\n f\"Names can't be of the form `__` or `_j`: {name}\"\n )\n return names", "def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True", "def check_unique(self):\n pass", "def is_unique(w):\n chars = {}\n for c in w:\n if c in chars:\n return False\n chars[c] = True\n return True", "def test_duplicate_name_refs(renderer):\n assert renderer.name_ref(User.age) == renderer.name_ref(User.age) == \"#n0\"", "def has_duplicates(tree):\n taxa = [tip.name for tip in tree.tips()]\n if '' in taxa or None in taxa:\n raise ValueError('Empty taxon name(s) found.')\n return len(set(taxa)) < len(taxa)", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def has_node(self, u: Hashable) -> bool:\n return u in self._names", "def is_unique2(w):\n if len(w) <= 1:\n return True\n\n w_sorted = sort_string(w)\n i = 0\n while i < len(w_sorted) - 1:\n if w_sorted[i] == w_sorted[j]:\n return False\n i += 1\n return True", "def check_unique_names_for_geometry(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n non_unique_names = []\n\n geo_shapes = pm.ls(type=\"mesh\")\n progress_controller.maximum = len(geo_shapes)\n\n for shape in geo_shapes:\n if (\n shape.getParent().isUniquelyNamed() is False\n and shape.getParent().isReferenced() is False\n ):\n non_unique_names.append(shape.getParent())\n progress_controller.increment()\n\n if non_unique_names:\n mc.select(non_unique_names)\n raise PublishError(\n \"Some geometry objects are not <b>Uniquely Named</b><br><br>\"\n \"%s<br><br>Please rename them.\"\n )\n progress_controller.complete()", "def is_real_name(name):\n name = \"<\" + name.strip() + \">\"\n for real_name in names:\n if name == real_name:\n return True\n return False", "def _uniqueness_check(self, cls, unique_in = None, **attr):\n # under the same datasource, only 1 subsystem, 1 neuropil, 1 tract of the name can exist\n # under the same neuropil, only 1 neuron of the name can exist\n # multiple (collections of) synapses can exist between two neurons\n if cls == 'Species':\n tmp = self.sql_query(\n \"\"\"select from Species where (name = \"{name}\" or \"{name}\" in synonyms) and stage = \"{stage}\" and sex = \"{sex}\" \"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Species {name} at {stage} stage ({sex}) already exists with rid = {rid}\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Species {name} (as its synonym) at {stage} stage ({sex}) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = obj._id, formalname = obj.name))\n elif cls == 'DataSource':\n objs = self.find_objs('DataSource', name=attr['name'], version=attr['version'])\n #if self.exists(cls, name = attr['name'], version = attr['version']):\n if len(objs):\n raise NodeAlreadyExistError(\"\"\"{} Node with attributes {} already exists with rid = {}\"\"\".format(\n cls, ', '.join([\"\"\"{} = {}\"\"\".format(key, value) \\\n for key, value in attr.items()]), objs[0]._id))\n elif cls == 'Neurotransmitter':\n tmp = self.sql_query(\n \"\"\"select from Neurotransmitter where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n name = attr['name']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Neurotransmitter {name} already exists with rid = {rid}\"\"\".format(\n name = attr['name'], rid = objs[0]._id))\n return objs\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Neurotransmitter {name} (as its synonym) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], rid = obj._id, formalname = obj.name))\n elif cls in ['Subsystem', 'Neuropil', 'Subregion', 'Tract']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'], formalname = obj.name,\n ds = unique_in.name,\n version = unique_in.version, rid = obj._id))\n # Alternatively, try:\n # tmp = self.sql_query(\n # \"\"\"select from {cls} where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n # cls = cls, name = attr['name']))\n # ds = tmp.owned_by(cls = 'DataSource').has(rid = datasource)\n # if len(ds):\n # tmp1 = tmp.has(name = attr['name'])\n # if len(tmp1.owned_by(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = datasource.name,\n # version = datasource.version))\n # else:\n # all_synonym_objs = (tmp - tmp1).node_objs\n # for obj in objs:\n # if len(QueryWrapper.from_rids(obj._id).has(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = datasource.name,\n # version = datasource.version))\n\n # Alternatively 2, try: (will be slow when it has a lot of Owns edges)\n # tmp = sql_query(\n # \"\"\"\n # select from (select expand(out('Owns')[@class = \"{cls}\"]) from {rid}) where name = \"{name}\" or \"{name}\" in synonyms\n # \"\"\"\n # )\n # elif cls in ['Subregion']:\n # if not isinstance(unique_in, models.Neuropil):\n # raise TypeError('To check the uniqueness of a {} instance, unique_in must be a Neuropil object'.format(cls))\n # tmp = self.sql_query(\n # \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='ucls' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n # rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n # if len(tmp):\n # objs = tmp.node_objs\n # if attr['name'] in [obj.name for obj in objs]:\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = unique_in.name))\n # else:\n # for obj in objs:\n # if name in obj.synonyms:\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = unique_in.name))\n elif cls in ['Neuron', 'NeuronFragment']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where uname = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists with rid = {rid}, under DataSource {ds} version {version}\"\"\".format(\n cls = cls, name = attr['name'], rid = objs[0]._id,\n ds = unique_in.name,\n version = unique_in.version))\n elif cls == 'Circuit':\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n elif cls == 'ArborizationData':\n if not isinstance(unique_in, (models.Neuron, models.Synapse)):\n raise TypeError('To check the uniqueness of a ArborizationData instance, unique_in must be a Neuron or a Synapse object')\n tmp = self.sql_query(\n \"\"\"select from (select expand(out(HasData)) from {rid}) where @class = 'ArborizationData' \"\"\".format(rid = unique_in._id))\n if len(tmp):\n raise NodeAlreadyExistError(\"\"\"ArborizationData already exists for {node} {uname} with rid = {rid}. Use NeuroArch.update_{node}_arborization to update the record\"\"\".format(\n node = unique_in.element_type.lower(), rid = tmp.node_objs[0]._id, uname = unique_in.uname))\n else:\n raise TypeError('Model type not understood.')\n return True", "def check_name_uniqueness(cls, user_id, name):\n data_with_same_name = Data.objects.only('id').filter(user_id=user_id, name = name)\n return len(data_with_same_name) == 0", "def make_unique(name, reserved_names):\n while name in reserved_names:\n name += '_'\n\n return name", "def check_root_node_name___fix():\n from stalker import Asset\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n t = v.task\n asset_name = None\n if isinstance(t.parent, Asset):\n asset_name = t.parent.name\n\n root_nodes = auxiliary.get_root_nodes()\n root_node_name = root_nodes[0].name()\n\n if asset_name is not None:\n correct_node_name = asset_name\n correct_node_name = correct_node_name.replace(\" \", \"_\")\n if correct_node_name[0].isdigit():\n correct_node_name = \"_%s\" % correct_node_name\n if correct_node_name[-1].isdigit():\n correct_node_name += \"_grp\"\n else:\n correct_node_name = root_node_name\n if correct_node_name[0].isdigit():\n correct_node_name = \"_%s\" % correct_node_name\n if correct_node_name[-1].isdigit():\n correct_node_name += \"_grp\"\n\n root_nodes[0].rename(correct_node_name)", "def unique_elem(o: list[str]) -> bool:\n return True if len(Counter(remove_spaces(o))) == len(o) else False", "def is_distinct(n):\n nstr = str(n)\n return len(nstr) == len(set(nstr))", "def invalid_nodes_name(input_str):\n splited = input_str.replace(' ', '').split(',')\n name_format_re = r\"(?P<name>[\\w\\/]+):(?P<postfix>\\d+)\"\n invalid_node_name_list = list()\n for name in splited:\n if re.match(name_format_re, name):\n continue\n invalid_node_name_list.append(name)\n return invalid_node_name_list", "def is_unique_string(s):\n return len(s) == len(set(s))", "def check_name(self, node):\n assert \"name\" in node, \"Package node does not contain attribute 'node'\"\n assert len(node[\"name\"]) >= 1, \"Expecting at least one 'name' value\"\n # TODO: add more thorough checks", "def is_unique(x):\n return len(set(x)) == len(x)", "def check_duplicate_class_names(class_names):\n duplicates = get_duplicates(class_names)\n if duplicates:\n logger.error(f'Only globally unique class names are allowed. Found duplicates {duplicates}')\n raise SystemExit(0)", "def _check_unique_insesitive(self, cr, uid, ids, context=None):\n for category in self.browse(cr, uid, ids, context=context):\n if len(self.search(cr, uid, [('name','=ilike',category.name)], context=context)) > 1:\n raise osv.except_osv(_('Constraint Error'), _(\"The Name Must Be Unique!\"))\n return True", "def name_is_unique(self, name):\n unique = True\n for client in self.clients:\n unique = unique and (False if name == client.get_name() else True)\n return unique", "def _all_names_unused(elts, unused_name_nodes):\n for elt in elts:\n if isinstance(elt, (ast.List, ast.Tuple)):\n if not _all_names_unused(elt.elts, unused_name_nodes):\n return False\n if elt not in unused_name_nodes:\n return False\n return True", "def isUnique(self, word):\n abbr = self.get_abbr(word)\n if abbr not in self.abbr:\n return True\n elif len(self.abbr[abbr]) == 1 and word == self.abbr[abbr][0]:\n return True\n else:\n return False", "def try_create_uniqe_name(self,name=None,plan_id=None):\n if self.valid_name(name):\n for i in range (1,20):\n new_name=name+\"_\"+str(i)\n if self.unique_name(name=new_name,plan_id=plan_id):\n return new_name\n return False\n else:\n return False", "def simple_unique_characters(word):\n return len(set(word)) == len(word)", "def _generate_node_name(self):\r\n while 1:\r\n name = \"node\" + str(self._name_sequence)\r\n if name not in self.nodes.keys():\r\n break\r\n self._name_sequence += 1\r\n\r\n return name", "def check_unique_shot_names(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n shot_nodes = pm.ls(type=\"shot\")\n progress_controller.maximum = len(shot_nodes)\n\n shot_names = []\n shots_with_non_unique_shot_names = []\n for shot in shot_nodes:\n shot_name = shot.shotName.get()\n if shot_name in shot_names:\n shots_with_non_unique_shot_names.append(shot)\n else:\n shot_names.append(shot_name)\n progress_controller.increment()\n\n progress_controller.complete()\n if len(shots_with_non_unique_shot_names) > 0:\n raise PublishError(\n \"The following shots have non-unique shot names:<br>\"\n \"<br>\"\n \"%s\"\n % (\n \", \".join(\n map(lambda x: x.shotName.get(), shots_with_non_unique_shot_names)\n )\n )\n )", "def prevent_duplicate_names(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"prevent_duplicate_names\")", "def isUnique(self, word):\n abbr = self.getAbbr(word)\n return abbr not in self.d or len(self.d[abbr]) == 1 and self.d[abbr][0] == word", "def validate_unique_mof_names():\n names = list(FRAMEWORKS_DF['name'].str.lower()) + list(FRAMEWORKS_DF['alternative names'].dropna().str.lower())\n names = [ n for l in names for n in l.split(',') if l ]\n names = [ n.lower().replace('-', ' ') for n in names ]\n\n duplicates = [item for item, count in collections.Counter(list(names)).items() if count > 1]\n\n if duplicates:\n print('Warning: Duplicate CURATED-MOF names detected: {}'.format(duplicates))\n sys.exit(1)\n\n print('No duplicate CURATED-MOF names found.')", "def _raise_if_duplicates(counts: Dict[str, int]) -> None:\n duplicates: List[str] = []\n for nickname, count in counts.items():\n if count > 1:\n duplicates.append(nickname)\n if len(duplicates) > 0:\n # TODO This is not always nickname\n raise ValueError(f'\\'nickname\\' not unique {duplicates}')", "def is_unique_n_set(string: str) -> bool:\n\n return len(set(string)) == len(string)", "def _make_names_unique(animations):\n counts = {}\n for a in animations:\n c = counts.get(a['name'], 0) + 1\n counts[a['name']] = c\n if c > 1:\n a['name'] += '_' + str(c - 1)\n\n dupes = set(k for k, v in counts.items() if v > 1)\n for a in animations:\n if a['name'] in dupes:\n a['name'] += '_0'", "def test_set_unique_node_names(self):\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def make_name_unique(xml_data):\r\n # VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check)\r\n\r\n # tags that really need unique names--they store (or should store) state.\r\n need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter',\r\n 'videosequence', 'poll_question', 'vertical')\r\n\r\n attr = xml_data.attrib\r\n tag = xml_data.tag\r\n id = lambda x: x\r\n # Things to try to get a name, in order (key, cleaning function, remove key after reading?)\r\n lookups = [('url_name', id, False),\r\n ('slug', id, True),\r\n ('name', Location.clean, False),\r\n ('display_name', Location.clean, False)]\r\n\r\n url_name = None\r\n for key, clean, remove in lookups:\r\n if key in attr:\r\n url_name = clean(attr[key])\r\n if remove:\r\n del attr[key]\r\n break\r\n\r\n def looks_like_fallback(url_name):\r\n \"\"\"Does this look like something that came from fallback_name()?\"\"\"\r\n return (url_name is not None\r\n and url_name.startswith(tag)\r\n and re.search('[0-9a-fA-F]{12}$', url_name))\r\n\r\n def fallback_name(orig_name=None):\r\n \"\"\"Return the fallback name for this module. This is a function instead of a variable\r\n because we want it to be lazy.\"\"\"\r\n if looks_like_fallback(orig_name):\r\n # We're about to re-hash, in case something changed, so get rid of the tag_ and hash\r\n orig_name = orig_name[len(tag) + 1:-12]\r\n # append the hash of the content--the first 12 bytes should be plenty.\r\n orig_name = \"_\" + orig_name if orig_name not in (None, \"\") else \"\"\r\n xml_bytes = xml.encode('utf8')\r\n return tag + orig_name + \"_\" + hashlib.sha1(xml_bytes).hexdigest()[:12]\r\n\r\n # Fallback if there was nothing we could use:\r\n if url_name is None or url_name == \"\":\r\n url_name = fallback_name()\r\n # Don't log a warning--we don't need this in the log. Do\r\n # put it in the error tracker--content folks need to see it.\r\n\r\n if tag in need_uniq_names:\r\n error_tracker(\"PROBLEM: no name of any kind specified for {tag}. Student \"\r\n \"state will not be properly tracked for this module. Problem xml:\"\r\n \" '{xml}...'\".format(tag=tag, xml=xml[:100]))\r\n else:\r\n # TODO (vshnayder): We may want to enable this once course repos are cleaned up.\r\n # (or we may want to give up on the requirement for non-state-relevant issues...)\r\n # error_tracker(\"WARNING: no name specified for module. xml='{0}...'\".format(xml[:100]))\r\n pass\r\n\r\n # Make sure everything is unique\r\n if url_name in self.used_names[tag]:\r\n # Always complain about modules that store state. If it\r\n # doesn't store state, don't complain about things that are\r\n # hashed.\r\n if tag in need_uniq_names:\r\n msg = (\"Non-unique url_name in xml. This may break state tracking for content.\"\r\n \" url_name={0}. Content={1}\".format(url_name, xml[:100]))\r\n error_tracker(\"PROBLEM: \" + msg)\r\n log.warning(msg)\r\n # Just set name to fallback_name--if there are multiple things with the same fallback name,\r\n # they are actually identical, so it's fragile, but not immediately broken.\r\n\r\n # TODO (vshnayder): if the tag is a pointer tag, this will\r\n # break the content because we won't have the right link.\r\n # That's also a legitimate attempt to reuse the same content\r\n # from multiple places. Once we actually allow that, we'll\r\n # need to update this to complain about non-unique names for\r\n # definitions, but allow multiple uses.\r\n url_name = fallback_name(url_name)\r\n\r\n self.used_names[tag].add(url_name)\r\n xml_data.set('url_name', url_name)", "def isUnique(self, word):\n if len(word) < 3:\n abbrev = word\n else:\n abbrev = word[0] + str(len(word) - 2) + word[-1]\n if not abbrev in self.abbrev_dict:\n return True\n elif word in self.abbrev_dict[abbrev] and len(self.abbrev_dict[abbrev]) == 1:\n return True\n else:\n return False", "def check_unique_name(first_letters, count, name, unique_list, suffix=False):\n if suffix:\n while name in unique_list:\n count += 1\n end_count = \"%03d\" % count\n name = name[:-3] + end_count\n else:\n while name in unique_list:\n count += 1\n end_count = \"%06d\" % count\n name = first_letters + \"_\" + end_count\n\n return name, count", "def validate_unique_cof_names():\n names = FRAMEWORKS_DF['Name'].str.lower()\n names = names.str.replace('-',' ')\n\n duplicates = [item for item, count in collections.Counter(list(names)).items() if count > 1]\n\n if duplicates:\n print('Warning: Duplicate CURATED-COF names detected: {}'.format(duplicates))\n sys.exit(1)\n\n print('No duplicate CURATED-COF names found.')", "def unique_id(graph: nx.MultiDiGraph, prefix: str = \"\"):\n # TODO thread safety?\n unique_id.count = max(unique_id.count, graph.number_of_nodes()) + 1\n if prefix and not graph.has_node(prefix):\n return str(prefix)\n while graph.has_node(prefix + str(unique_id.count)):\n unique_id.count += 1\n return prefix + str(unique_id.count)", "def is_unique(strr):\n def _contains(string, char):\n for c in string:\n if c == char: return True\n return False\n for index in range(len(strr)):\n if _contains(strr[:index], strr[index]): return False\n return True", "def try_create_uniqe_title(self,title,owner):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,owner):\n return new_title\n return False\n else:\n return False", "def validate_custom_name(self, name):\n if not re.match( r'(/?[a-zA-Z_][a-zA-Z0-9_]*)+$', name):\n raise ValueError('Invalid name for node (%s)' % name)\n return", "def node_name(self, node):\r\n # There should not be more\r\n if not node:\r\n raise ValueError(\"No node provided\")\r\n\r\n names = [key for key,value in self.nodes.items() if value==node]\r\n\r\n if len(names) == 1:\r\n return names[0]\r\n elif len(names) > 1:\r\n raise Exception(\"There are more references to the same node\")\r\n else: # if len(names) == 0\r\n raise Exception(\"Can not find node '%s'\" % node)", "def is_unique(s):\n\ta = s.to_numpy() # s.values (pandas<0.24)\n\treturn (a[0] == a).all()", "def get_node_names(self):\n return set({node.get_name() for node in self.get_nodeset()}) # return the set of names", "def isUnique(self, word):\n abbr = self.gen_abbr(word)\n\n if abbr not in self.dict:\n return True\n elif len(self.dict[abbr]) == 1 and word in self.dict[abbr]:\n return True\n else:\n return False", "def is_unique(string):\n for i in range(0, len(string)):\n for j in range(0, i):\n if string[j] == string[i]:\n return False\n return True", "def prevent_duplicate_names(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_duplicate_names\")", "def prevent_duplicate_names(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_duplicate_names\")", "def is_unique(str):\n\n # char_count = {}\n\n # for char in str:\n # count = char_count.get(char, 0)\n\n # if count == 1:\n # return False\n\n # else:\n # char_count[char] = 1\n\n # return True\n\n return len(str) == len(set(str))", "def nodeToLongName(node):\n\n pass", "def check_dataset_duplicate_ids(self, dataset):\n ids = [a['_id'] for a in dataset]\n # Construct list of duplicates\n dupe_ids = [a for n, a in enumerate(ids) \n if a in ids[:n]]\n if len(dupe_ids) > 0:\n # Get list of names for the duplicate pandas\n dupe_names = [a['en.name'] for a in dataset \n if a['_id'] in dupe_ids]\n raise IdError(\"ERROR: duplicate ids for en.names: %s\" \n % str(dupe_names))", "def test_is_an_element_name():\n for el in roentgen.elements['name']:\n assert(is_an_element(el))", "def _repair_names_unique(\n names: Sequence[str],\n quiet: bool = False,\n sanitizer: Callable = None,\n base0_: bool = None,\n) -> List[str]:\n base = int(not base0_)\n min_names = _repair_names_minimal(names)\n neat_names = [\n re.sub(r\"(?:(?<!_)_{1,2}\\d+|(?<!_)__)+$\", \"\", name)\n for name in min_names\n ]\n if callable(sanitizer):\n neat_names = [sanitizer(name) for name in neat_names]\n\n new_names = []\n changed_names = []\n for i, name in enumerate(neat_names):\n if neat_names.count(name) > 1 or name == \"\":\n name = f\"{name}__{i + base}\"\n if name != names[i]:\n changed_names.append((names[i], name))\n new_names.append(name)\n if not quiet:\n _log_changed_names(changed_names)\n return new_names", "def listNodesWithIncorrectNames(*args, **kwargs)->None:\n pass", "def check_duplicate_image_name(image_paths):\n image_names = [os.path.basename(os.path.splitext(p)[0]) for p in image_paths]\n\n num_images = len(image_names)\n\n num_unique = len(set(image_names))\n\n if num_images != num_unique:\n raise ValueError('Found %d duplicate images.' % (num_images - num_unique))\n\n logging.info('Found no duplicates in %d images.', num_images)", "def has_name(self):\n return self.unpack_word(0x2) != 0", "def GetUniqueName( name, elems ):\n digits = []\n for c in reversed( name ):\n if c.isdigit():\n digits.append( c )\n else:\n break\n \n stem = name[0:len( name ) - len( digits )]\n val = ''.join( digits )[::-1] or 0\n i = int( val )\n \n while True:\n i += 1\n newName = ''.join( [stem, str( i )] )\n if newName not in elems:\n break\n \n return newName", "def nodes_row_similar(all_rows: set[tuple[str, str, str, int, int, int]],\n identifier: tuple[str, str, str, int, int, int]) -> bool:\n for row in all_rows:\n if row[0] == identifier[0] and row[1] == identifier[1]:\n return True\n return False", "def checkunique(data):\n for i in range(len(data)-1):\n if data[i]==data[i+1]:\n return False\n return True", "def unique_name(name, nlist, max=1000):\n out = name\n if name in nlist:\n for i in range(1, max+1):\n out = \"%s_%i\" % (name, i)\n if out not in nlist:\n break\n return out", "def _make_unique(name, idx):\n p = re.compile(\".[aA-zZ]+_x[0-9]+\")\n if p.match(name):\n tags = name[1:].split(\"_x\")\n return \">%s_%s_x%s\" % (tags[0], idx, tags[1])\n return name.replace(\"@\", \">\")", "def locality_not_unique(cls, gdf):\n check_name = \"locality_not_unique\"\n gdf['Local_Qualifer_Name'] = gdf['LocalityName'] + \\\n ', ' + gdf['QualifierName']\n # remove all the notana values from the dataframe.\n nodes = gdf[gdf[\"Local_Qualifer_Name\"].notna()]\n # check for duplicates in the locality qualifier name column.\n boolean = nodes.duplicated(subset=['Local_Qualifer_Name'])\n nodes_dup = nodes[boolean]\n # TODO this might work, it returns 8000 localities out of 28000 that are\n # not unique... not sure that all is correct.\n a = nodes_dup.loc[~nodes_dup.duplicated(keep=False),\n 'Locality_Qualifier_Name'].unique()\n # TODO check that the returned are correct\n\n failed_nodes = ''\n rep.report_failing_nodes(gdf, check_name, failed_nodes)\n return failed_nodes\n raise NotImplementedError", "def is_unique(self, field):\n return field.scheme.is_unique", "def get_unique_vertex_name(self, node: Node) -> str:\n\n if node not in self.uniqueVertexMap_:\n self.uniqueVertexMap_[node] = self.uniqueVertexNo_\n self.uniqueVertexNo_ += 1\n\n return f\"v{self.uniqueVertexMap_[node]}\"", "def has_uniquely_named_variables(formula: Formula) -> bool:\r\n forbidden_variables = set(formula.free_variables())\r\n def has_uniquely_named_variables_helper(formula: Formula) -> bool:\r\n if is_unary(formula.root):\r\n return has_uniquely_named_variables_helper(formula.first)\r\n elif is_binary(formula.root):\r\n return has_uniquely_named_variables_helper(formula.first) and \\\r\n has_uniquely_named_variables_helper(formula.second)\r\n elif is_quantifier(formula.root):\r\n if formula.variable in forbidden_variables:\r\n return False\r\n forbidden_variables.add(formula.variable)\r\n return has_uniquely_named_variables_helper(formula.predicate)\r\n else:\r\n assert is_relation(formula.root) or is_equality(formula.root)\r\n return True\r\n\r\n return has_uniquely_named_variables_helper(formula)", "def unique_names(names):\n return sorted(set(names))", "def isNodeExists(self, longName):\n return self.getComponentByLongName(longName) != None", "def isUnique(self, word):\n if len(word) <= 1:\n n = word\n else:\n n = word[0] + str(len(word) - 2) + word[-1] #Get the abbrviation.\n if n not in self.abbrdict or (self.abbrdict[n] == 1 and word in self.origdict): #If it is not in abbrdict or the abbrevation count is 1 and the word has appeared in dictionary, return true.\n return True\n else: #Otherwise, return false.\n return False", "def check_names(sections):\n return _check_nentries(sections, \"NAMES\", \"NAMES\")", "def is_unique3(test_str):\n\n for i in range(len(test_str) - 1):\n for j in range(i+1, len(test_str)):\n if test_str[i] == test_str[j]:\n return False\n return True", "def is_unique2(test_str):\n\n chars = set(test_str)\n\n if len(test_str) == len(chars):\n return True\n\n return False", "def check_if_input_digits_are_unique(self):\n number_of_occurrence = []\n for element in self.input:\n count_of_element_in_input = self.input.count(element)\n number_of_occurrence.append(count_of_element_in_input)\n if sum(number_of_occurrence) == len(str(self.input)):\n return True\n else:\n return False", "def check_header_dups(header,\r\n errors):\r\n\r\n for curr_elem in range(len(header)):\r\n if header.count(header[curr_elem]) != 1:\r\n errors.append('%s found in header %d times. ' %\r\n (header[curr_elem], header.count(header[curr_elem])) +\r\n 'Header fields must be unique.\\t%d,%d' % (0, curr_elem))\r\n\r\n return errors", "def name_collision(x):\r\n return x", "def name_collision(x):\r\n return x", "def test_get_num_unique_name(self):\n\n list1 = self.test_num_unique_name\n list2 = get_num_unique_name(self.test_sorted_tuple, self.test_dict)\n self.assertEqual(list1, list2)", "def update_element_name(self, items, new_name):\n if new_name != '':\n for i in items:\n if i.text() == new_name:\n #print(\"Name already exists\")\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Information)\n msgBox.setText(\"Element with this name already exists.\")\n msgBox.setWindowTitle(\"QMessageBox Example\")\n msgBox.setStandardButtons(QMessageBox.Ok)\n msgBox.exec()\n return False\n return new_name\n else:\n if self.list_of_elements.count() == 0:\n new_name = self.element_name+\"_\"+str(0)\n return new_name\n\n for i in range(0, self.list_of_elements.count()+1):\n new_name = self.element_name+\"_\"+str(i)\n exists = self.list_of_elements.findItems(new_name,\n QtCore.Qt.MatchExactly)\n if len(exists) == 0:\n return new_name\n return False", "def is_unique(a_string):\n\n if len(a_string) is 0:\n print \"String is empty.\"\n return False\n chars = []\n for char in a_string:\n if char not in chars:\n chars.append(char)\n else:\n return False\n return True", "def too_short(self, node_list):\n for node in node_list:\n if node.path_length < 1:\n return True\n return False", "def _check_name(self):\n\t\tpass", "def check_node_names_with_bad_characters(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n nodes_to_check = mc.ls()\n progress_controller.maximum = len(nodes_to_check)\n\n nodes_with_bad_name = []\n for node_name in nodes_to_check:\n if \":\" not in node_name and any(\n map(lambda x: x == \"?\" or ord(x) > 127, node_name)\n ):\n nodes_with_bad_name.append(node_name)\n progress_controller.increment()\n\n progress_controller.complete()\n\n if nodes_with_bad_name:\n pm.select(nodes_with_bad_name)\n raise PublishError(\n \"There are nodes with <b>unknown characters</b> in their names:\"\n \"<br><br>\"\n \"%s\" % \"<br>\".join(nodes_with_bad_name[:MAX_NODE_DISPLAY])\n )", "def is_unique(self):\r\n return self._unique", "def is_named(ucs):\n try:\n return bool(unicodedata.name(ucs))\n except ValueError:\n return False", "def is_unique1(test_str):\n\n letters = set()\n for char in test_str:\n if char in letters:\n return False\n else:\n letters.add(char)\n return True", "def _match_identical_nodes(self):\n\n for job_name_b in self._topo_b_nodes:\n for job_name_a in self._unresolved_a_nodes:\n if self._is_node_identical(job_name_a, job_name_b):\n self._identical_nodes[job_name_b] = job_name_a\n self._unresolved_a_nodes.remove(job_name_a)\n self._unresolved_b_nodes.remove(job_name_b)\n break" ]
[ "0.73011124", "0.6985261", "0.6725731", "0.6644279", "0.6635118", "0.6553637", "0.6456373", "0.6453211", "0.6446892", "0.6387194", "0.6358027", "0.6313981", "0.62614363", "0.6214909", "0.6204638", "0.619209", "0.6174945", "0.6174945", "0.61397904", "0.6063441", "0.60601556", "0.60353905", "0.59972554", "0.5992505", "0.5979854", "0.597693", "0.59703624", "0.5966362", "0.59629637", "0.59268355", "0.5926286", "0.5926021", "0.59190947", "0.5900906", "0.58616066", "0.5849168", "0.58358306", "0.5831331", "0.5826059", "0.5792747", "0.5778997", "0.57630426", "0.5761694", "0.57558715", "0.5752776", "0.5730392", "0.57148653", "0.57047284", "0.57034427", "0.5703399", "0.56728727", "0.5667827", "0.5650886", "0.56487715", "0.5648058", "0.5631507", "0.5630748", "0.56193256", "0.5617304", "0.5615571", "0.5612964", "0.5604657", "0.5604657", "0.56031907", "0.56029844", "0.5601571", "0.55871415", "0.55773884", "0.5557286", "0.5549224", "0.5545014", "0.55447066", "0.5543965", "0.5543071", "0.55430186", "0.5542116", "0.5520306", "0.55196327", "0.55086356", "0.55024076", "0.5489702", "0.5477569", "0.5473676", "0.5473616", "0.5469343", "0.54666734", "0.54607666", "0.54560155", "0.5437097", "0.5437097", "0.54319865", "0.54290533", "0.5411918", "0.54107445", "0.54032826", "0.53960234", "0.53937584", "0.537639", "0.537085", "0.53707486" ]
0.8149716
0
test onxx based utility to find mapping between onnx node names and io tensors
def test_onnx_node_name_to_input_output_names_util(self): model = models.resnet18(pretrained=False) dummy_input = torch.randn(1, 3, 224, 224) torch.onnx.export(model, dummy_input, './data/resnet18.onnx') onnx_utils.OnnxSaver.set_node_names('./data/resnet18.onnx', model, dummy_input, is_conditional=False, module_marker_map={}) onnx_model = onnx.load('./data/resnet18.onnx') # Get Dict mapping node name to the input and output names node_to_io_dict,_ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model) node_0 = onnx_model.graph.node[0] assert node_0.input == node_to_io_dict[node_0.name].inputs assert node_0.output == node_to_io_dict[node_0.name].outputs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors", "def map_output_and_node(cls, onnx_model: onnx.ModelProto):\n output2node = dict()\n for node in onnx_model.graph.node:\n for output_name in node.output:\n output2node[output_name] = node\n return output2node", "def test_single_pytorch_module_mapping_to_many_onnx_nodes(self):\n\n AimetLogger.set_level_for_all_areas(logging.DEBUG)\n\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model_name = 'multilayer_lstm'\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n\n torch.onnx.export(model, dummy_input, './data/' + model_name + '.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/' + model_name + '.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/' + model_name + '.onnx')\n\n lstm_nodes = [node for node in onnx_model.graph.node if node.op_type == 'LSTM']\n assert 3 == len(lstm_nodes)\n\n node_to_io_dict, _ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n assert isinstance(node_to_io_dict['lstm#root_node'], list)\n assert 3 == len(node_to_io_dict['lstm#root_node'])", "def test_set_unique_node_names(self):\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def graph_node_names_details(model):\n\n node_details = namedtuple('node_details', ['node', 'outputs'])\n node_names_details = {}\n for initializer in model.initializer():\n initializer_name = initializer.name\n each_node = node_details(node=initializer, outputs=[])\n if initializer_name not in node_names_details:\n each_node.outputs.extend(get_initializer_children_names(model, initializer))\n node_names_details[initializer_name] = each_node\n for node in model.nodes():\n node_name = node.name\n output_names = node.output\n # onnx output has different name from node name\n for output_name in output_names:\n if output_name not in node_names_details:\n node_names_details[output_name] = node_name\n each_node = node_details(node=node, outputs=[])\n if node_name not in node_names_details:\n each_node.outputs.extend(get_node_children_names(model, node))\n node_names_details[node_name] = each_node\n for graph_input in model.graph().input:\n outputs = []\n node_name = graph_input.name\n for k, v in node_names_details.items():\n try:\n if node_name in v.node.input:\n outputs.append(k)\n except BaseException:\n continue\n each_node = node_details(node=graph_input, outputs=outputs)\n # if node_name not in node_names_details:\n node_names_details[node_name] = each_node\n\n return node_names_details", "def node_mapping(self):\n ...", "def test_non_leaf_module_names(self):\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = HierarchicalMultiplyModule()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n expected_names = [\n # names compatible with torch 1.9.1 version (should be removed in the future)\n 'layer.mul1.mul',\n 'layer.mul1.Mul_7',\n 'layer.mul2.mul',\n 'layer.mul2.Mul_15',\n 'layer.Mul_18',\n \n # names compatible with torch 1.13.1 version \n '/layer/mul1/Mul',\n '/layer/mul2/Mul',\n '/layer/Mul'\n ]\n for node in onnx_model.graph.node:\n assert 'Constant' in node.name or node.name in expected_names\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def test_naming_for_model_with_deep_graph(self):\n\n model = models.resnet152(pretrained=False)\n dummy_input = torch.randn(1, 3, 224, 224)\n\n onnx_path= './data/' + model.__class__.__name__ + '.onnx'\n with onnx_simply(True):\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input,\n is_conditional=False, module_marker_map={})\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_names(onnx_model)\n\n counts = defaultdict(int)\n top_level_nodes = tuple(['conv1', 'bn1', 'relu', 'maxpool', 'avgpool', 'Flatten_', '/Flatten', 'fc'])\n for node in onnx_model.graph.node:\n if node.name.startswith(top_level_nodes):\n continue\n elif '.' in node.name:\n layer_name = '.'.join(node.name.split('#')[0].split('.')[:-1])\n counts[layer_name] += 1\n elif node.name.startswith('/'):\n layer_name = '.'.join(node.name.split('/')[1:-1])\n counts[layer_name] += 1\n\n for name, counts in counts.items():\n if 'downsample' in name:\n assert counts == 2\n else:\n print(name, counts)\n assert counts == 10\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def _get_input_output_node_names(nodes):\n input_names, output_names = set(), set()\n extension_output_names = set()\n for node in nodes:\n tf_node = node if isinstance(node,\n TensorflowNode) else TensorflowNode(node)\n output_names.add(node.name)\n # Add outputs for Split, Switch TensorArrayV3\n if tf_node.op_type == \"Split\":\n for i in range(1, tf_node.attr[\"num_split\"]):\n output_names.add(tf_node.name + \":{}\".format(i))\n if tf_node.op_type == \"Switch\":\n output_names.add(tf_node.name + \":1\")\n extension_output_names.add((tf_node.name, tf_node.name + \":1\"))\n if tf_node.op_type == \"TensorArrayV3\":\n output_names.add(tf_node.name + \":1\")\n extension_output_names.add((tf_node.name, tf_node.name + \":1\"))\n input_names.update(\n set([inp if inp[0] != \"^\" else inp[1:] for inp in tf_node.inputs]))\n inputs = input_names - output_names\n outputs = output_names - input_names\n while extension_output_names:\n ext_names = extension_output_names.pop()\n for name in ext_names:\n if name in outputs:\n outputs -= set(ext_names)\n break\n inputs.discard(None)\n return list(inputs), list(outputs)", "def get_nodes(\n nodes: Dict[str, Node] = None,\n io_mapping: Dict[str, Dict] = None\n ) -> Tuple[Dict[str, Node], Dict[str, Dict]]:\n raise NotImplementedError", "def test_get_node_outputs(self):\n pass", "def test_get_node_sensors(self):\n pass", "def test_get_hyperflex_node_by_moid(self):\n pass", "def test_set_node_name_for_matmul_add_linear(self, export_args):\n class Linear(torch.nn.Module):\n def __init__(self):\n super(Linear, self).__init__()\n self.linear = torch.nn.Linear(3, 2)\n\n def forward(self, inp):\n x = self.linear(inp)\n return x\n\n model = Linear()\n # Using an input to linear op with dimension != 2 causes torch to use matmul->add instead of gemm op\n onnx_path = './data/MyModel.onnx'\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n expected_node_names = ['linear', 'linear#1.end']\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n for name in expected_node_names:\n assert name in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n # Check that gemm still works as expected\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n assert 'linear' in actual_node_names\n assert 'linear#1' not in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def test_export_dict_input_output(self):\n\n\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = InputOutputDictModel()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n\n # Add an empty dictionary as the last element to not treat as named arguments.\n # see torch.onnx.export() API for more details.\n dummy_input = (\n {'a': torch.randn(1, 10, 10, 10),\n 'b': torch.randn(1, 10, 10, 10),\n 'c': torch.randn(1, 10, 10, 10)\n }, {}\n )\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n for node in onnx_model.graph.node:\n print(node.name)\n assert node.name.startswith('layer')", "def get_node_outputs(node_path):\n \n item = ix.get_item(node_path)\n\n obj_array = ix.api.OfItemArray(1)\n obj_array[0] = item\n item_outputs = ix.api.OfItemVector()\n\n ix.application.get_factory().get_items_outputs(obj_array, item_outputs, False)\n\n node_outputs = []\n for item_ in range(item_outputs.get_count()):\n\n for i in range(item_outputs[item_].get_attribute_count()):\n\n attr= item_outputs[item_].get_attribute(i)\n\n if attr.get_texture():\n\n if str(attr.get_texture()) == item.get_full_name():\n\n #attrs[attr] = target_node.get_full_name()\n node_outputs.append(attr)\n return node_outputs", "def onnx_model_node_loader(model_path):\n # these imports are done in the function because they are slow\n import onnx\n from onnx_tf.backend import prepare\n onnx_model = onnx.load(model_path) # load onnx model\n tf_model_rep = prepare(onnx_model, gen_tensor_dict=True)\n label_input_node = tf_model_rep.inputs[0]\n label_output_node = tf_model_rep.outputs[0]\n dtype_input_node = tf_model_rep.tensor_dict[f'{label_input_node}'].dtype\n\n return onnx_model, dtype_input_node, label_output_node", "def check_onnx_node_name_uniqueness(onnx_model):\n onnx_node_names = [node.name for node in onnx_model.graph.node]\n assert len(onnx_node_names) == len(set(onnx_node_names)), f'list size mismatch, check if names are unique'", "def nodes_names_map(self):\n return {nd.name: nd for nd in self.nodes}", "def test_get_hyperflex_node_profile_by_moid(self):\n pass", "def nodes(evt, node=None):\n nodenames = []\n\n if node is None:\n root = evt.retrieveObject('')\n node = root.registry()\n\n if node.object():\n nodenames.append(node.identifier())\n for l in evt.leaves(node):\n # skip a location that takes forever to load\n # XXX How to detect these automatically??\n if 'Swum' in l.identifier():\n continue\n \n temp = evt[l.identifier()]\n nodenames += nodes(evt, l)\n else:\n nodenames.append(node.identifier())\n\n return nodenames", "def generate_mxp_graph(model_name, activations, stats, first_node_name, last_node_name, io_info,\n input_type, ignore_strides=False, inline_depthwise=False, verbose=False):\n network = {}\n network['layers'] = []\n network['test_input'] = None\n network['test_output'] = None\n network['scale'] = 1.0\n\n model = onnx.load(model_name)\n nodes = model.graph.node\n inits = model.graph.initializer\n\n idx = get_node_index(nodes, first_node_name)\n if idx == None:\n if verbose:\n print('{} does not exist\\nopen {} in Netron + check spelling'.format(first_node_name, mname))\n assert(idx != None)\n\n last_idx = get_node_index(nodes, last_node_name)\n if last_idx == None:\n if verbose:\n print('{} does not exist\\nopen {} in Netron + check spelling'.format(last_node_name, mname))\n assert(last_idx != None)\n\n while True:\n node = nodes[idx]\n if verbose:\n print(node.name, node.op_type)\n src_node = get_node_source(nodes, node.input[0])\n if src_node == None:\n input_id = node.input[0]\n else:\n input_id = src_node.output[0]\n output_id = node.output[0]\n\n\n if len(network['layers']) == 0:\n previous = None\n else:\n previous = network['layers'][-1]\n for layer in network['layers']:\n if layer['output_id'] == input_id:\n previous = layer\n\n input_shapes, output_shapes = get_shapes(activations, stats, node)\n assert len(output_shapes) == 1, \"Multi-output nodes not supported\"\n output_shape = output_shapes[0]\n if node.op_type == \"Conv\":\n c, m, n = input_shapes[0]\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n assert(get_attr(node, 'pads') == None or not any(get_attr(node, 'pads')))\n\n group = get_attr(node, 'group')\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n dilations = np.asarray(get_attr(node, 'dilations')).tolist()\n if not group:\n group = 1\n if not strides:\n strides = [1, 1]\n if not dilations:\n dilations = [1, 1]\n\n use_strided = 0\n assert(strides == [1, 1] or strides == [2, 2] or strides == [4, 4])\n\n if DO_STRIDES and not ignore_strides:\n if (strides[0] > 1 or strides[1] > 1) and group == 1: # TODO handle depthwise as well\n assert(previous['output_size'] == int(np.prod(input_shapes[0])))\n use_strided = 1\n previous['output_strides'] = strides\n if verbose:\n print('adding output strides to previous node')\n\n m = m + (m % strides[0])\n n = n + (n % strides[1])\n if int(np.prod(input_shapes[0])) != int(c*m*n):\n if verbose:\n print('adjusting size for strided maps')\n previous['output_size'] = int(c*4*m//strides[0]*n//strides[1])\n previous['output_shape'] = (c*4,m//strides[0],n//strides[1])\n\n w = get_tensor(inits, node.input[1])\n kernels, channels, _, _ = w.shape\n if len(node.input) == 3:\n b = get_tensor(inits, node.input[2])\n\n conv_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(c*m*n),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels * group,\n 'kernels': kernels,\n 'kernel_shape': kernel_shape,\n 'dilations': dilations,\n 'strides': strides,\n 'group': group,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'use_cvi': 0,\n 'use_depthwise': 0,\n 'use_strided': use_strided,\n \"biases\": [],\n \"weights\": [],\n \"sublayers\": [],\n }\n\n w = w.flatten().tolist()\n conv_layer['weights'] = base64.b64encode(struct.pack(\"f\"*len(w), *w)).decode()\n\n if len(node.input) == 3:\n b = b.flatten().tolist()\n else:\n b = [0 for _ in range(kernels)]\n conv_layer['biases'] = base64.b64encode(struct.pack(\"f\"*len(b), *b)).decode()\n\n network['layers'].append(conv_layer)\n\n elif node.op_type == \"Gemm\":\n w = get_tensor(inits, node.input[1])\n output_size, input_size = w.shape\n\n if len(node.input) == 3:\n b = get_tensor(inits, node.input[2])\n\n gemm_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(np.prod(input_shapes[0])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape': output_shape,\n 'gemm_input_size': input_size,\n 'gemm_output_size': output_size,\n 'input_id': input_id,\n 'output_id': output_id,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"biases\": [],\n \"weights\": [],\n \"sublayers\": [],\n }\n\n w = w.flatten().tolist()\n gemm_layer['weights'] = base64.b64encode(struct.pack(\"f\"*len(w), *w)).decode()\n\n if len(node.input) == 3:\n b = b.flatten().tolist()\n else:\n b = [0 for _ in range(output_size)]\n gemm_layer['biases'] = base64.b64encode(struct.pack(\"f\"*len(b), *b)).decode()\n network['layers'].append(gemm_layer)\n\n elif node.op_type in multipath_nodes:\n node_inputs = get_previous_nodes(nodes, node)\n shapes = input_shapes\n\n if node.op_type == \"Sum\":\n assert(all([x == shapes[0] for x in shapes[1:]]))\n elif node.op_type == \"Concat\":\n assert(all([x[1:] == shapes[0][1:] for x in shapes[1:]]))\n\n buf = node_inputs[0].name\n if node.op_type == \"Concat\":\n buf = output_id\n\n buffer_offset = 0\n for n, node_input in enumerate(node_inputs):\n noutput = node_input.output[0]\n for l, layer in enumerate(network['layers']):\n if layer['output_id'] == noutput: # if layer pointing to this node\n network['layers'][l]['output_id'] = buf # rename layer's output\n network['layers'][l]['buffer_offset'] = buffer_offset # and offset appropriately\n if layer['input_id'] == noutput:\n network['layers'][l]['input_id'] = buf #TODO\n\n buffer_offset += int(np.prod(input_shapes[n]))\n\n if node.op_type == \"Sum\":\n channels, m, n = shape3d(output_shape)\n sum_layer = {\n 'op_type': \"Sum\",\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': node_inputs[0].name,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'num_inputs': len(node.input),\n \"sublayers\": [],\n }\n network['layers'].append(sum_layer)\n\n elif node.op_type == \"Identity\":\n shapes = input_shapes\n\n channels, m, n = shape3d(output_shape)\n identity_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n }\n network['layers'].append(identity_layer)\n\n elif node.op_type == \"LRN\":\n shapes = input_shapes\n channels, m, n = shape3d(output_shape)\n lrn_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'alpha': get_attr(node, 'alpha'),\n 'beta': get_attr(node, 'beta'),\n 'bias': get_attr(node, 'bias'),\n 'size': get_attr(node, 'size'),\n 'scale': 1.0,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n }\n network['layers'].append(lrn_layer)\n\n elif node.op_type == \"Scale\":\n scale_sublayer = {\n 'op_type': 'Scale',\n 'name': node.name,\n \"use_replay\": 1,\n 'scale': get_attr(node, 'scale'),\n }\n previous['sublayers'].append(scale_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type in [\"GlobalAveragePool\", \"GlobalMaxPool\"]:\n assert(previous['n'] == previous['m'])\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n pads = pads6(node)\n pool_sublayer = {\n 'op_type': node.op_type.replace('Global', ''),\n 'name': node.name,\n 'use_replay': 0,\n 'kernel_shape': [previous['m'], previous['n']],\n 'strides': [previous['m'], previous['n']],\n 'pads': pads,\n }\n previous['sublayers'].append(pool_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n\n elif node.op_type in [\"MaxPool\", \"AveragePool\"]:\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n\n if node.op_type == \"AveragePool\": #TODO quick fix for tf average pool quirk\n if kernel_shape[0] * kernel_shape[1] == previous['m'] * previous['n']:\n kernel_shape = [previous['m'], previous['n']]\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n if strides is None:\n strides = [ 1 for _ in kernel_shape]\n pads = pads6(node)\n pool_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'kernel_shape': kernel_shape,\n 'strides': strides,\n 'pads': pads,\n }\n previous['sublayers'].append(pool_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n elif node.op_type == \"PRelu\":\n slope = get_tensor(inits, node.input[1])\n slope = slope.flatten().tolist()\n prelu_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'slope': slope,\n }\n previous['sublayers'].append(prelu_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"LeakyRelu\":\n alpha = get_attr(node, 'alpha')\n if alpha is None:\n alpha = .01\n leaky_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'alpha': alpha\n }\n previous['sublayers'].append(leaky_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Relu\":\n relu_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n }\n previous['sublayers'].append(relu_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Clip\":\n clip_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'min': float(get_tensor(inits,node.input[1])),\n 'max': float(get_tensor(inits,node.input[2])),\n }\n previous['sublayers'].append(clip_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Pad\":\n pads = pads6(get_tensor(inits,node.input[1]).tolist())\n value = int(get_tensor(inits,node.input[2]))\n if value < -1:\n value = -1\n if value > 1:\n value = 1\n pad_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'value': value,\n 'pads': pads,\n }\n previous['sublayers'].append(pad_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n elif node.op_type in [\"Add\", \"Mul\", \"Sub\", \"Div\"]:\n\n skip = False\n if node.op_type == \"Mul\":\n next_nodes = get_node_inputs(nodes, node.output[0])\n if node.name == nodes[-1].name:\n if verbose:\n print('removing final scale node')\n skip = True\n\n elif previous['op_type'] in [\"LRN\"]:\n if verbose:\n print('skipping mul after lrn')\n array = get_tensor(inits, node.input[1])\n if array is None:\n array = get_tensor(inits, node.input[0])\n previous['scale'] = float(array[0])\n print('skipping mul after lrn', previous['scale'], previous['input_id'], previous['output_id'])\n\n skip = True\n\n elif next_nodes[0].op_type in [\"Softmax\"]:\n if verbose:\n print('skipping mul before softmax')\n skip = True\n\n array = get_tensor(inits, node.input[1])\n if array is None:\n array = get_tensor(inits, node.input[0])\n c = activations[node.input[1]].shape[1]\n else:\n c = input_shapes[0][0]\n\n if node.op_type == \"Add\": # TODO for scalar Add\n dims = len(np.squeeze(array).shape)\n if dims == 0:\n array = np.ones((c, 1)) * array\n\n dims = len(np.squeeze(array).shape)\n if c == 1 and dims == 0:\n dims = 1\n\n array = array.flatten().tolist()\n # force_broadcast_2 = False\n # if force_broadcast_2:\n # # if c != 1 and dims == 0:\n # if c != 1 and dims == 0 and node.op_type != \"Mul\": # TODO forcing to broadcast 2 not broadcast 3\n # dims = 1\n # array = [array[0] for _ in range(c)]\n\n if not skip:\n arithmetic_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'dims': dims,\n 'array': array,\n }\n previous['sublayers'].append(arithmetic_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type in [\"Abs\", \"Max\", \"Mean\", \"Min\", \"Neg\", \"Not\"]:\n unary_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n }\n previous['sublayers'].append(unary_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n\n elif node.op_type == \"Reshape\":\n dims = get_tensor(inits, node.input[1])\n\n if len(dims) == 4 and dims[-1] == 2:\n idx += 6\n node = nodes[idx]\n output_id = node.output[0]\n _, output_shapes = get_shapes(activations, stats, node)\n output_shape = output_shapes[0]\n channels, m, n = shape3d(output_shape)\n reorg_layer = {\n 'op_type': \"Reorg\",\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape': output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n \"stride\": int(dims[-1]),\n }\n network['layers'].append(reorg_layer)\n else:\n previous['output_id'] = output_id\n\n elif node.op_type in [\"Flatten\",'Cast']:\n previous['output_id'] = output_id\n elif node.op_type == \"Resize\":\n scales = get_tensor(inits, node.input[2])\n assert(scales[0] == 1 and scales[1] == 1)\n scale = float(scales[2])\n mode = get_attr(node, 'mode').decode()\n assert(mode == 'nearest' or mode == 'linear')\n shapes = input_shapes[:1]\n channels, m, n = shape3d(output_shape)\n in_size= [d for d in one_elem(input_shapes)[1:]]\n replay = 0 if in_size == [1,1] else 1\n resize_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': replay,\n 'input_size': int(np.prod(one_elem(input_shapes))),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'mode' :mode,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': [float(scales[2]),float(scales[3])],\n }\n network['layers'].append(resize_layer)\n elif node.op_type == \"ArgMax\":\n input_shape = one_elem(input_shapes)\n channels, m, n = shape3d(input_shape)\n argmax_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': [float(scales[2]),float(scales[3])],\n }\n network['layers'].append(argmax_layer)\n\n elif node.op_type == \"Softmax\":\n prev = get_previous_nodes(nodes, node)[0]\n if prev.op_type == \"Mul\":\n scale = get_tensor(inits, prev.input[1])\n scale = scale.flatten().tolist()\n else:\n scale = [1.0]\n if len(scale) > 1:\n raise NotImplementedError(\"Broadcast scale not implemented for softmax\")\n\n shapes = input_shapes\n channels, m, n = shape3d(output_shape)\n softmax_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': scale,\n 'size': len(scale),\n }\n network['layers'].append(softmax_layer)\n\n # softmax_sublayer = {u'op_type': u'Softmax', 'scale': 1.0}\n # previous['sublayers'].append(softmax_sublayer)\n # previous['output_id'] = output_id\n # print('warning SOFTMAX ignored!... fine if last layer and sorting outputs')\n\n elif node.op_type == \"Transpose\":\n shapes = input_shapes\n\n channels, m, n = shape3d(output_shape)\n permutation =[p-1 for p in get_attr(node, 'perm')[1:]]\n transpose_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'permutation':permutation,\n \"sublayers\": [],\n }\n network['layers'].append(transpose_layer)\n else:\n raise RuntimeError('Unknown node type:{} '.format(node.op_type))\n\n idx += 1\n if idx > last_idx:\n break\n\n unsigned_network_inputs = input_type == np.uint8\n\n if CVI_1x1:\n network = mxp_gemm_to_conv(network)\n\n network = mxp_set_replay(network, io_info)\n network = mxp_set_cvi(network)\n network = mxp_set_unsigned(network, unsigned_network_inputs)\n\n if inline_depthwise:\n network = mxp_inline_depthwise(network)\n\n network = mxp_describe_layers(network)\n network = mxp_number_buffers(network)\n buffers = mxp_size_buffers(network)\n network = mxp_number_sublayers(network)\n\n network['num_layers'] = len(network['layers'])\n network['buffers'] = buffers\n\n return network", "def test_get_node_type_name(self):\n pass", "def map_input_and_node(cls, onnx_model: onnx.ModelProto):\n\n input2node: Dict[str, List] = dict()\n for node in onnx_model.graph.node:\n for idx, input_name in enumerate(node.input):\n if input_name not in input2node:\n input2node[input_name] = []\n input2node[input_name].append([node, idx])\n return input2node", "def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def", "def load_onnx(model_name):\n onnx_path = '%s.onnx' % model_name\n if not os.path.isfile(onnx_path):\n print('ERROR: file (%s) not found! You might want to run yolo_to_onnx.py first to generate it.' % onnx_path)\n return None\n else:\n with open(onnx_path, 'rb') as f:\n return f.read()", "def map(self, app, node):", "def check_onnx_model(onnx_path):\n # Load the ONNX model\n model = onnx.load(onnx_path)\n\n # Check that the IR is well formed\n onnx.checker.check_model(model)\n\n # Print a human readable representation of the graph\n onnx.helper.printable_graph(model.graph)", "def test_create_gene_ontology(self):\n\n # Here are mappings for just a few yeast genes.\n\n mapping = {}\n mapping['STE7'] = ['GO:0000187']\n mapping['PBS2'] = ['GO:0000187']\n mapping['NOP8'] = [\n 'GO:0003676', 'GO:0003723', 'GO:0042254', 'GO:0005634', 'GO:0005730'\n ]\n\n # Build the ontology, then see if it looks correct.\n\n root = dc.models.tensorgraph.models.ontology.create_gene_ontology(\n mapping, min_node_features=1)\n assert len(root.feature_ids) == 0\n\n def find_features(node, features):\n features.update(node.feature_ids)\n for child in node.children:\n find_features(child, features)\n\n all_features = set()\n find_features(root, all_features)\n assert len(all_features) == 3\n for key in mapping:\n assert key in all_features", "def test_nmap_get_sensordef(self):\n test_sensordef = {\n \"kind\": self.test_nmap.get_kind(),\n \"name\": \"NMAP\",\n \"description\": \"Checks the availability of systems.\",\n \"help\": \"Checks the availability of systems on a network and logs this to a separate \"\n \"logfile on the miniprobe.\",\n \"tag\": \"mpnmapsensor\",\n \"groups\": [\n {\n \"name\": \"nmapspecific\",\n \"caption\": \"NMAP specific\",\n \"fields\": [\n {\n \"type\": \"integer\",\n \"name\": \"timeout\",\n \"caption\": \"Timeout (in ms)\",\n \"required\": \"1\",\n \"default\": 50,\n \"minimum\": 10,\n \"maximum\": 1000,\n \"help\": \"If the reply takes longer than this value the request is aborted \"\n \"and an error message is triggered. Max. value is 1000 ms. (=1 sec.)\"\n },\n {\n \"type\": \"edit\",\n \"name\": \"ip\",\n \"caption\": \"IP-Address(es)\",\n \"required\": \"1\",\n \"default\": \"\",\n \"help\": \"Specify the ip-address or a range of addresses using one of the following notations:[br]Single: 192.168.1.1[br]CIDR: 192.168.1.0/24[br]- separated: 192.168.1.1-192.168.1.100\"\n }\n ]\n }\n ]\n }\n assert_equal(self.test_nmap.get_sensordef(), test_sensordef)", "def debug_cntk_outputnodes():\n\tz = load_model(MODEL)\n\tprint (\"Load complete.\");\n\tfor index in range(len(z.outputs)):\n\t\tprint(\"Index {} for output: {}.\".format(index, z.outputs[index].name))", "def test_get_nodes(self):\n wp22_rdf_graph = parse_rdf(WP22)\n wp706_rdf_graph = parse_rdf(WP706)\n wp1871_rdf_graph = parse_rdf(WP1871)\n wp2799_rdf_graph = parse_rdf(WP2799)\n\n nodes_wp22 = _get_nodes(wp22_rdf_graph)\n nodes_wp706 = _get_nodes(wp706_rdf_graph)\n nodes_wp1871 = _get_nodes(wp1871_rdf_graph)\n nodes_wp2799 = _get_nodes(wp2799_rdf_graph)\n\n self.assertEqual(len(nodes_wp22), 17)\n self.assertEqual(len(nodes_wp706), 186)\n self.assertEqual(len(nodes_wp1871), 115)\n self.assertEqual(len(nodes_wp2799), 141)", "def test_src_node() -> dict:\n return {\"aetitle\": \"pacsanini_testing\", \"ip\": 11114}", "def test_rdf2nx(example_ns, SCHEMA, simple_rdf_graph):\n KNOWN_EDGE = (URIRef(example_ns.Protagonist), URIRef(example_ns.Antagonist))\n namespaces = {\"schema\": SCHEMA, \"ex\": example_ns, \"base\": example_ns}\n nx_graph = RDF2NX.convert(rdf_graph=simple_rdf_graph, namespaces=namespaces)\n\n try:\n protagonist = nx_graph.nodes[example_ns.Protagonist]\n except KeyError:\n raise KeyError(\"Protagonist node not found in fixture graph.\")\n\n p_height = protagonist.get(\"ex:height\", None)\n assert (\n type(p_height) == float\n ), \"XSD Datatype failed to map to python type correctly.\"\n\n p_type = type(protagonist.get(\"type\", None))\n assert not isinstance(\n p_type, type(None)\n ), f\"Failed to get type of node from node keys: {protagonist.keys()}\"\n assert p_type == URIRef, \"URIRef node attribute is not URI.\"\n\n assert KNOWN_EDGE in nx_graph.edges(data=False) and KNOWN_EDGE[\n ::-1\n ] in nx_graph.edges(data=False), \"Known relations missing in the networkx graph.\"\n\n # Run once more with rdf namespace and check type\n namespaces = {\"rdf\": RDF, **namespaces}\n nx_graph = RDF2NX.convert(rdf_graph=simple_rdf_graph, namespaces=namespaces)\n\n try:\n protagonist = nx_graph.nodes[example_ns.Protagonist]\n except KeyError:\n raise KeyError(\"Protagonist node not found in fixture graph.\")\n\n p_type = type(protagonist.get(\"rdf:type\", None))\n assert not isinstance(\n p_type, type(None)\n ), f\"Failed to get rdf:type of node from node keys: {protagonist.keys()}\"", "def test_get_node_hardware(self):\n pass", "def get_output_node_names(self, node_name):\n # (str) -> list\n node = self.get_node(node_name)\n return node.tops", "def network_nodes_species(self):\n G, mapping = self.network()\n waste, resources, intmed_products = self.amenities()\n\n node_dict = {}\n\n for nd in G:\n # print(nd)\n if isinstance(nd, int):\n node_dict[nd] = \"r\"\n elif nd in self.commodity:\n node_dict[nd] = \"Xc\"\n elif nd in waste:\n node_dict[nd] = \"w\"\n elif nd in resources:\n node_dict[nd] = \"Xr\"\n elif nd in intmed_products:\n node_dict[nd] = \"InPr\"\n\n return node_dict", "def test_nodes_at_link():\n grid = HexModelGrid((3, 2))\n\n assert_array_equal(grid.nodes_at_link[:, 0], grid.node_at_link_tail)\n assert_array_equal(grid.nodes_at_link[:, 1], grid.node_at_link_head)\n\n assert np.may_share_memory(grid.nodes_at_link, grid.node_at_link_tail)\n assert np.may_share_memory(grid.nodes_at_link, grid.node_at_link_head)", "def get_mds_dimension_names(node):\n ndims=len(get_mds_shape(node))\n own_name=get_mds_shortname(node)\n dimension_names=[]\n for i in range(ndims):\n dimension=node.dim_of(i)\n try:\n name=get_mds_shortname(get_mds_node_reference(dimension))\n if len(get_mds_shape(dimension))>1:\n name=name+\"_index\"\n except:\n name=own_name+\"_index\"\n dimension_names.append(name)\n return dimension_names", "def _ProjectImpl(self, tensor_names: List[Text]) -> \"TFXIO\":", "def test_lookup(graph):\n node1 = graph.lookup(0)\n assert str(node1) == \"<1, 2>\"\n\n node2 = graph.lookup(3)\n assert str(node2) == \"<1, 2>\"\n\n node3 = graph.lookup(1)\n assert str(node3) == \"<0, 2, 3>\"", "def getNodeNames(self, includeDisabled=False):", "def test_IODimensions(self):\n tasks = [(1,1,100,True),(10,1,100,True),(1,10,100,True),(10,10,100,True),\n (1,1,100,False),(10,1,100,False),(1,10,100,False),(10,10,100,False)]\n for t in tasks:\n N_in ,N_out, N_samples, tf = t\n X = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n y = np.random.randn(N_samples,N_out) if N_out > 1 else np.random.randn(N_samples)\n Xp = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n esn = ESN(N_in,N_out,teacher_forcing=tf)\n prediction_tr = esn.fit(X,y)\n prediction_t = esn.predict(Xp)\n self.assertEqual(prediction_tr.shape,(N_samples,N_out))\n self.assertEqual(prediction_t.shape,(N_samples,N_out))", "def get_node_info(node_name, nodes_file_name):\r\n\r\n data = []\r\n node = []\r\n mvtype = ''\r\n\r\n with open(nodes_file_name) as n:\r\n for num, line in enumerate(n):\r\n if node_name in line:\r\n data = line.split()\r\n if node_name == data[0]:\r\n node.append(float(data[1]))\r\n node.append(float(data[2]))\r\n if 'terminal' in line:\r\n mvtype = 'terminal'\r\n elif 'terminal_NI' in line:\r\n mvtype = 'terminal_NI'\r\n else:\r\n mvtype = 'non-terminal'\r\n break\r\n\r\n pl_file_name = nodes_file_name.replace('.nodes', '.pl')\r\n with open(pl_file_name) as p:\r\n for num, line in enumerate(p):\r\n if node_name in line:\r\n data = line.split()\r\n if node_name == data[0]:\r\n node.append(float(data[1]))\r\n node.append(float(data[2]))\r\n break\r\n\r\n node.append(mvtype)\r\n return node", "def test_iou_multi(self):\n t = init_tensors()\n metrics_dict = create_metrics_dict(3)\n metrics_dict = iou(t['pred_multi'], \n t['lbl_multi'], \n batch_size=2, \n num_classes=3,\n metric_dict=metrics_dict,\n ignore_index=-1)\n assert \"{:.6f}\".format(metrics_dict['iou'].val) == \"0.185185\"", "def test_get_hyperflex_node_list(self):\n pass", "def test_model_with_input_last_onnx_node(self):\n\n roi_model = RoiModel(height=7, width=7, scale=0.25)\n x = torch.rand(1, 1, 6, 6)\n rois = torch.tensor([ [0, -2.0, -2.0, 22.0, 22.0], ])\n dummy_input = (x, rois)\n onnx_utils.OnnxSaver.set_node_names('./data/roi.onnx', roi_model, dummy_input, is_conditional=False,\n module_marker_map={},\n onnx_export_args=(onnx_utils.OnnxExportApiArgs(opset_version=11))\n )\n onnx_model = onnx.load('./data/roi.onnx')\n end_nodes = [ n.name for n in onnx_model.graph.node if 'end' in n.name]\n assert len(end_nodes) == 1", "def test_get_hyperflex_cluster_by_moid(self):\n pass", "def test_get_node_properties(self):\n pass", "def nodes_mapped(instance):\n G, mapping = instance.network()\n node_dict = instance.network_nodes_species()\n\n node_dict_mapped = {}\n\n for old_label, new_label in mapping.items():\n for node, ammentity in node_dict.items():\n if old_label == node:\n node_dict_mapped[new_label] = ammentity\n\n return node_dict_mapped", "def test_get_node_state(self):\n pass", "def test_make_otu_table_taxonomy(self):\r\n otu_map_lines = \"\"\"0\tABC_0\tDEF_1\r\n1\tABC_1\r\nx\tGHI_2\tGHI_3\tGHI_77\r\nz\tDEF_3\tXYZ_1\"\"\".split('\\n')\r\n taxonomy = {'0': ['Bacteria', 'Firmicutes'],\r\n 'x': ['Bacteria', 'Bacteroidetes']}\r\n obs = make_otu_table(\r\n otu_map_lines,\r\n taxonomy,\r\n constructor=DenseOTUTable)\r\n exp = \"\"\"{\"rows\": [{\"id\": \"0\", \"metadata\": {\"taxonomy\": [\"Bacteria\", \"Firmicutes\"]}}, {\"id\": \"1\", \"metadata\": {\"taxonomy\": [\"None\"]}}, {\"id\": \"x\", \"metadata\": {\"taxonomy\": [\"Bacteria\", \"Bacteroidetes\"]}}, {\"id\": \"z\", \"metadata\": {\"taxonomy\": [\"None\"]}}], \"format\": \"Biological Observation Matrix 0.9dev\", \"data\": [[1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 3.0, 0.0], [0.0, 1.0, 0.0, 1.0]], \"columns\": [{\"id\": \"ABC\", \"metadata\": null}, {\"id\": \"DEF\", \"metadata\": null}, {\"id\": \"GHI\", \"metadata\": null}, {\"id\": \"XYZ\", \"metadata\": null}], \"generated_by\": \"QIIME 1.4.0-dev, svn revision 2532\", \"matrix_type\": \"dense\", \"shape\": [4, 4], \"format_url\": \"http://biom-format.org\", \"date\": \"2011-12-21T00:19:30.961477\", \"type\": \"OTU table\", \"id\": null, \"matrix_element_type\": \"float\"}\"\"\"\r\n self.assertEqual(\r\n parse_biom_table(obs.split('\\n')),\r\n parse_biom_table(exp.split('\\n')))", "def create_mock_data(number_of_nodes, edge_per_node, in_channels):\n graph = nx.watts_strogatz_graph(number_of_nodes, edge_per_node, 0.5)\n edge_index = torch.LongTensor(np.array([edge for edge in graph.edges()]).T)\n X = torch.FloatTensor(np.random.uniform(-1, 1, (number_of_nodes, in_channels)))\n return X, edge_index", "def test_nmap_get_kind(self):\n assert_equal(self.test_nmap.get_kind(), 'mpnmap')", "def map_name_and_data(cls, onnx_model: onnx.ModelProto):\n params = {}\n for init in onnx_model.graph.initializer:\n params[init.name] = numpy_helper.to_array(init)\n for node in onnx_model.graph.node:\n # If two zero_points are identity, one is a reference to the other\n # after optimized by onnx.\n if node.op_type == 'Identity' and len(node.input) == 1 and \\\n node.input[0] in params:\n params[node.output[0]] = copy.deepcopy(params[node.input[0]])\n if node.op_type == 'Constant':\n for attr in node.attribute:\n if attr.name == 'value':\n params[node.output[0]] = numpy_helper.to_array(attr.t)\n return params", "def test_get_pci_device_by_moid(self):\n pass", "def get_ntype_featnames(ntype_name, schema_map):\n node_data = schema_map[constants.STR_NODE_DATA]\n feats = node_data.get(ntype_name, {})\n return [feat for feat in feats]", "def nodes(topology):\n return topology.nodes()", "def test_get_node_internal_ip_address(self):\n pass", "def get_ioport_names():\n return sorted(set(get_input_names()) & set(get_output_names()))", "def test_reduce_mean_00():\n\n class ReduceMeanTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"ReduceMean\", inputs=[\"v0\"], outputs=[\"v1\"])\n inputs = [info(\"v0\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"v1\", TensorProto.FLOAT, (1, 1, 1, 1))]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n v0 = np.random.rand(1, 3, 4, 5).astype(np.float32)\n\n outputs = [\"v1\"]\n ReduceMeanTester({\"v0\": v0}, outputs).run()", "def get_node_info(rdb, fk_array, ntype, hint, taxid):\n hint = None if hint == '' or hint is None else hint.upper()\n taxid = None if taxid == '' or taxid is None else str(taxid)\n if ntype == '':\n ntype = None\n\n if ntype is None:\n res_arr = rdb.mget(['::'.join(['stable', str(fk), 'type']) for fk in fk_array])\n fk_prop = [fk for fk, res in zip(fk_array, res_arr) if res is not None\n and res.decode() == 'Property']\n fk_gene = [fk for fk, res in zip(fk_array, res_arr) if res is not None\n and res.decode() == 'Gene']\n if fk_prop and fk_gene:\n raise ValueError(\"Mixture of property and gene nodes.\")\n ntype = 'Property' if fk_prop else 'Gene'\n\n if ntype == \"Gene\":\n stable_array = conv_gene(rdb, fk_array, hint, taxid)\n elif ntype == \"Property\":\n stable_array = fk_array\n else:\n raise ValueError(\"Invalid ntype\")\n\n return list(zip(fk_array, *node_desc(rdb, stable_array)))", "def test_parse_taxonomy_to_otu_metadata_alt_labels(self):\r\n def f(v):\r\n return 1. + float(v)\r\n example_tax = \\\r\n \"\"\"412 PC.635_647\t0.0\r\n319 PC.355_281\t0.970\r\n353 PC.634_154\t0.830\r\n17 PC.607_302\t0.960\r\n13\t0.870\r\n338 PC.593_1314\t0.990\"\"\"\r\n actual = parse_taxonomy_to_otu_metadata(\r\n example_tax.split('\\n'),\r\n labels=['something'],\r\n process_fs=[f])\r\n expected = {'412': {'something': 1.0},\r\n '319': {'something': 1.970},\r\n '353': {'something': 1.830},\r\n '17': {'something': 1.960},\r\n '13': {'something': 1.870},\r\n '338': {'something': 1.990}}\r\n self.assertEqual(actual, expected)", "def test_get_node_hardware_fast(self):\n pass", "def explore_shapes(node):\n nodeshape=node.getShape()\n print( \"shape: \",nodeshape)\n for i in range(len(nodeshape)):\n axis=get_mds_axis(node,i,strict=False)\n try:\n print( axis.getNodeName(),axis.getShape())\n except:\n print(\"blank: \",axis.getShape())", "def test_get_node_sled(self):\n pass", "def test_get_pci_link_by_moid(self):\n pass", "def test_combine_mappings(self):\r\n\r\n self.tmp_dir = mkdtemp(dir=\"./\", suffix=\"/\")\r\n\r\n combine_mappings(\r\n fasta,\r\n denoiser_mapping,\r\n denoised_seqs,\r\n otu_picker_map,\r\n self.tmp_dir)\r\n\r\n observed_otu_map = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_otu_map.txt\")))\r\n\r\n expected_otu_map = \"\"\"1:\\tS1_1\\tS1_2\\tS2_4\\tS2_5\r\n2:\\tS2_3\\tS1_6\r\n\"\"\"\r\n self.assertEqual(observed_otu_map, expected_otu_map)\r\n\r\n observed_fasta = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_all.fasta\")))\r\n expected_fasta = \"\"\">S1_1 Read1\r\nAAA\r\n>S1_2 Read2\r\nTTT\r\n>S2_3 Read3\r\nGGG\r\n\"\"\"\r\n self.assertEqual(observed_fasta, expected_fasta)", "def test_node_name(self):\n xmlns = {\n \"a\": \"_a\",\n \"g\": \"_g\"\n }\n self.assertEqual(\n utils._node_name(\"a\", \"g\", xmlns),\n (False, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"a@a\", \"g\", xmlns),\n (False, \"a\", \"{_a}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@@a\", \"g\", xmlns),\n (True, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@a@a\", \"g\", xmlns),\n (True, \"a\", \"{_a}a\")\n )\n # something not equal to _\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"1@a@a\", \"g\", xmlns),\n # too many @\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"@@a@a\", \"g\", xmlns),", "def export_onnx():\r\n model = DivideBy255()\r\n X = torch.randn(1, 3, 256, 256, dtype=torch.float)\r\n onnx_name = \"DivideBy255.onnx\"\r\n\r\n print(f\"Generating {onnx_name}\")\r\n torch.onnx.export(\r\n model,\r\n (X),\r\n onnx_name,\r\n opset_version=10,\r\n do_constant_folding=True,\r\n # verbose=True,\r\n # input_names=['Identity_1', 'Identity'],\r\n output_names=['input_1']\r\n )", "def display_nodes(nodes):\n for node in nodes:\n print(f'{node.name} has an IP address of {node.address}.')", "def get_annotation_names(viewer):\n\n layer_nodes_name = None\n layer_edges_name = None\n for layer in viewer.layers:\n if isinstance(layer, napari.layers.points.points.Points):\n layer_nodes_name = layer.name\n elif isinstance(layer, napari.layers.shapes.shapes.Shapes):\n layer_edges_name = layer.name\n if layer_nodes_name is not None and layer_edges_name is not None:\n break\n return layer_nodes_name, layer_edges_name", "def test_external_registry_mappings(self):\n res = self.manager.get_external_mappings(\"obofoundry\", \"bioportal\")\n self.assertIsInstance(res, MappingsDiff)\n self.assertEqual(\"obofoundry\", res.source_metaprefix)\n self.assertEqual(\"bioportal\", res.target_metaprefix)\n self.assertIn(\"gaz\", res.mappings)\n self.assertEqual(\"GAZ\", res.mappings[\"gaz\"])\n # This is an obsolete OBO Foundry ontology so it won't get uploaded to BioPortal\n self.assertIn(\"loggerhead\", res.source_only)\n # This is a non-ontology so it won't get in OBO Foundry\n self.assertIn(\"DCTERMS\", res.target_only)", "def test_get_node_status(self):\n pass", "def get_target_nodes(self):\n url = 'https://raw.githubusercontent.com/ChandlerBang/Pro-GNN/master/nettack/{}_nettacked_nodes.json'.format(self.name)\n json_file = osp.join(self.root,\n '{}_nettacked_nodes.json'.format(self.name))\n\n if not osp.exists(json_file):\n self.download_file(url, json_file)\n # with open(f'/mnt/home/jinwei2/Projects/nettack/{dataset}_nettacked_nodes.json', 'r') as f:\n with open(json_file, 'r') as f:\n idx = json.loads(f.read())\n return idx[\"attacked_test_nodes\"]", "def test_good_node():\n node_a = Node({'A':['B','C']})\n assert node_a.name == 'A'\n assert node_a.connections == ['B','C']", "def import_onnx(onnx_model: \"onnx.ModelProto\") -> Graph:\n from onnx_graphsurgeon.importers.onnx_importer import OnnxImporter\n\n return OnnxImporter.import_graph(onnx_model.graph, opset=OnnxImporter.get_opset(onnx_model))", "def test_get_mosaics_names(self):\n pass", "def input_nodes(self):\n pass", "def test_reduce_mean_02():\n\n class ReduceMeanTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"ReduceMean\", inputs=[\"v0\"], outputs=[\"v1\"], axes=[1, 2])\n inputs = [info(\"v0\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"v1\", TensorProto.FLOAT, [1, 1, 1, 5])]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n v0 = np.random.rand(1, 3, 4, 5).astype(np.float32)\n\n outputs = [\"v1\"]\n ReduceMeanTester({\"v0\": v0}, outputs).run()", "def get_output_nodes(self):\n \n\n self.buildings = self.dataset.groups['buildings']\n self.building_nodes = self.buildings.groups['nodes']\n\n eta_output_added = getattr(self.building_nodes,'eta_output_added')\n uv_output_added = getattr(self.building_nodes,'uv_output_added')\n\n eta = []\n uv = []\n nodeIds = []\n time = []\n \n if(eta_output_added or uv_output_added ):\n time = self.building_nodes.variables['time'][:].tolist()\n nodeIds = self.building_nodes.variables['id'][:].tolist()\n if eta_output_added: eta = self.building_nodes.variables['eta'][:].tolist()\n if uv_output_added: uv = self.building_nodes.variables['uv'][:].tolist()\n\n \n return nodeIds,eta, uv, time", "def test_get_hyperflex_cluster_profile_by_moid(self):\n pass", "def get_images_and_labels_nc():\n refs = get_ref_df()\n images = {}\n for _, data in refs.iterrows():\n if data['ProbeFileName'] in images:\n continue\n im = data['ProbeFileName']\n images[im] = 1 if data['IsTarget'] == 'Y' else 0\n return images", "def nodes(xmrs):\n nodes = []\n _props = xmrs.properties\n varsplit = sort_vid_split\n for p in xmrs.eps():\n sortinfo = None\n iv = p.intrinsic_variable\n if iv is not None:\n sort, _ = varsplit(iv)\n sortinfo = _props(iv)\n sortinfo[CVARSORT] = sort\n nodes.append(\n Node(p.nodeid, p.pred, sortinfo, p.lnk, p.surface, p.base, p.carg)\n )\n return nodes", "def test_parse_otu_map(self):\r\n otu_map_f = \"\"\"otu1\ts1_0\ts2_1\ts1_99\r\n2\ts1_9\ts5_2 comment\ts3_99\t1_3\ts1_75\r\notu3\ts8_7\ts2_5\"\"\".split('\\n')\r\n expected_map = {(0, 0): 2, (0, 1): 1,\r\n (1, 0): 2, (1, 2): 1, (1, 3): 1, (1, 4): 1,\r\n (2, 5): 1, (2, 1): 1}\r\n expected_sids = ['s1', 's2', 's5', 's3', '1', 's8']\r\n expected_oids = ['otu1', '2', 'otu3']\r\n actual = parse_otu_map(otu_map_f)\r\n self.assertDictEqual(actual[0], expected_map)\r\n self.assertItemsEqual(actual[1], expected_sids)\r\n self.assertItemsEqual(actual[2], expected_oids)", "def test_read_net_namespace(self):\n pass", "def test_read_cluster_network(self):\n pass", "def create_node_name(input_node, mode=tuple):\n key = input_node.fullname\n if len(input_node.out_ports()) > 1:\n port_number = input_node.in_port(0).get_source().out\n key = (input_node.fullname, port_number) if mode == tuple else f\"{input_node.fullname}.{port_number}\"\n return key", "def nodules_connection(label_data, label_header):\n\n\n las_labels = measure.label(label_data,\n neighbors=8,\n background=0,\n return_num=True)\n\n las_labels_nzero = np.nonzero(las_labels[0])\n [xdif, ydif, zdif] = [np.amax(las_labels_nzero[0])-np.amin(las_labels_nzero[0]),\n np.amax(las_labels_nzero[1])-np.amin(las_labels_nzero[1]),\n np.amax(las_labels_nzero[2])-np.amin(las_labels_nzero[2])]\n\n # conversion pixels to mm\n dims = label_header['pixdim']\n if label_header['xyzt_units'] == 10:\n #dimensions in mm\n print('xyzt_units=10')\n xdif=dims[1]*xdif\n ydif=dims[2]*ydif\n zdif=dims[3]*zdif\n\n\n return las_labels,[xdif,ydif,zdif]", "def test_make_otu_table_no_taxonomy(self):\r\n otu_map_lines = \"\"\"0\tABC_0\tDEF_1\r\n1\tABC_1\r\nx\tGHI_2\tGHI_3\tGHI_77\r\nz\tDEF_3\tXYZ_1\"\"\".split('\\n')\r\n obs = make_otu_table(otu_map_lines, constructor=DenseOTUTable)\r\n exp = \"\"\"{\"rows\": [{\"id\": \"0\", \"metadata\": null}, {\"id\": \"1\", \"metadata\": null}, {\"id\": \"x\", \"metadata\": null}, {\"id\": \"z\", \"metadata\": null}], \"format\": \"Biological Observation Matrix 0.9dev\", \"data\": [[1, 1, 0, 0], [1, 0, 0, 0], [0, 0, 3, 0], [0, 1, 0, 1]], \"columns\": [{\"id\": \"ABC\", \"metadata\": null}, {\"id\": \"DEF\", \"metadata\": null}, {\"id\": \"GHI\", \"metadata\": null}, {\"id\": \"XYZ\", \"metadata\": null}], \"generated_by\": \"QIIME 1.4.0-dev, svn revision 2532\", \"matrix_type\": \"dense\", \"shape\": [4, 4], \"format_url\": \"http://biom-format.org\", \"date\": \"2011-12-21T00:49:15.978315\", \"type\": \"OTU table\", \"id\": null, \"matrix_element_type\": \"float\"}\"\"\"\r\n self.assertEqual(\r\n parse_biom_table(obs.split('\\n')),\r\n parse_biom_table(exp.split('\\n')))", "def test_ipam_prefixes_read(self):\n pass", "def hierarchical( x, output_prefix, labels_to_register=[2,3,4,5], is_test=False, verbose=True ):\n if verbose:\n print(\"Read\")\n tfn = get_data('T_template0', target_extension='.nii.gz' )\n tfnw = get_data('T_template0_WMP', target_extension='.nii.gz' )\n tlrfn = get_data('T_template0_LR', target_extension='.nii.gz' )\n bfn = antspynet.get_antsxnet_data( \"croppedMni152\" )\n\n ##### read images and do simple bxt ops\n templatea = ants.image_read( tfn )\n if verbose:\n print(\"bxt\")\n templatea = ( templatea * antspynet.brain_extraction( templatea, 't1' ) ).iMath( \"Normalize\" )\n templateawmprior = ants.image_read( tfnw )\n templatealr = ants.image_read( tlrfn )\n templateb = ants.image_read( bfn )\n templateb = ( templateb * antspynet.brain_extraction( templateb, 't1' ) ).iMath( \"Normalize\" )\n imgbxt = brain_extraction( x )\n img = x * imgbxt\n\n if verbose:\n print(\"rbp\")\n\n # this is an unbiased method for identifying predictors that can be used to\n # rank / sort data into clusters, some of which may be associated\n # with outlierness or low-quality data\n templatesmall = ants.resample_image( templateb, (91,109,91), use_voxels=True )\n rbp = random_basis_projection( img, templatesmall )\n\n if verbose:\n print(\"intensity\")\n\n ##### intensity modifications\n img = ants.iMath( img, \"Normalize\" ) * 255.0\n img = ants.denoise_image( img, imgbxt, noise_model='Gaussian')\n img = ants.n4_bias_field_correction( img ).iMath(\"Normalize\")\n\n # optional - quick look at result\n bxt_png = output_prefix + \"_brain_extraction_dnz_n4_view.png\"\n ants.plot(img,axis=2,ncol=8,nslices=24, crop=True, black_bg=False,\n filename = bxt_png )\n\n if verbose:\n print(\"hemi\")\n\n # assuming data is reasonable quality, we should proceed with the rest ...\n mylr = label_hemispheres( img, templatea, templatealr )\n\n if verbose:\n print(\"parcellation\")\n\n ##### hierarchical labeling\n myparc = deep_brain_parcellation( img, templateb,\n do_cortical_propagation = not is_test, verbose=False )\n\n ##### accumulate data into data frames\n hemi = map_segmentation_to_dataframe( \"hemisphere\", myparc['hemisphere_labels'] )\n tissue = map_segmentation_to_dataframe( \"tissues\", myparc['tissue_segmentation'] )\n dktl = map_segmentation_to_dataframe( \"lobes\", myparc['dkt_lobes'] )\n dktp = map_segmentation_to_dataframe( \"dkt\", myparc['dkt_parcellation'] )\n dktc = None\n if not is_test:\n dktc = map_segmentation_to_dataframe( \"dkt\", myparc['dkt_cortex'] )\n\n tissue_seg_png = output_prefix + \"_seg.png\"\n ants.plot( img, myparc['tissue_segmentation'], axis=2, nslices=21, ncol=7,\n alpha=0.6, filename=tissue_seg_png,\n crop=True, black_bg=False )\n\n if verbose:\n print(\"WMH\")\n\n ##### below here are more exploratory nice to have outputs\n myhypo = t1_hypointensity(\n img,\n myparc['tissue_segmentation'], # segmentation\n myparc['tissue_probabilities'][3], # wm posteriors\n templatea,\n templateawmprior )\n\n if verbose:\n print(\"registration\")\n\n ##### traditional deformable registration as a high-resolution complement to above\n wm_tractsL = None\n wm_tractsR = None\n wmtdfL = None\n wmtdfR = None\n reg = None\n if labels_to_register is not None:\n reg = hemi_reg(\n input_image = img,\n input_image_tissue_segmentation = myparc['tissue_segmentation'],\n input_image_hemisphere_segmentation = mylr,\n input_template=templatea,\n input_template_hemisphere_labels=templatealr,\n output_prefix = output_prefix + \"_SYN\",\n labels_to_register = labels_to_register,\n is_test=is_test )\n if verbose:\n print(\"wm tracts\")\n ##### how to use the hemi-reg output to generate any roi value from a template roi\n wm_tracts = ants.image_read( get_data( \"wm_major_tracts\", target_extension='.nii.gz' ) )\n wm_tractsL = ants.apply_transforms( img, wm_tracts, reg['synL']['invtransforms'],\n interpolator='genericLabel' ) * ants.threshold_image( mylr, 1, 1 )\n wm_tractsR = ants.apply_transforms( img, wm_tracts, reg['synR']['invtransforms'],\n interpolator='genericLabel' ) * ants.threshold_image( mylr, 2, 2 )\n wmtdfL = map_segmentation_to_dataframe( \"wm_major_tracts\", wm_tractsL )\n wmtdfR = map_segmentation_to_dataframe( \"wm_major_tracts\", wm_tractsR )\n\n if verbose:\n print(\"hippocampus\")\n\n ##### specialized labeling for hippocampus\n ntries = 10\n if is_test:\n ntries = 1\n hippLR = deep_hippo( img, templateb, ntries )\n\n mydataframes = {\n \"hemispheres\":hemi,\n \"tissues\":tissue,\n \"dktlobes\":dktl,\n \"dktregions\":dktp,\n \"dktcortex\":dktc,\n \"wmtracts_left\":wmtdfL,\n \"wmtracts_right\":wmtdfR,\n \"wmh\":myhypo['wmh_summary']\n }\n\n outputs = {\n \"brain_n4_dnz\": img,\n \"brain_n4_dnz_png\": bxt_png,\n \"brain_extraction\": imgbxt,\n \"tissue_seg_png\": tissue_seg_png,\n \"rbp\": rbp,\n \"left_right\": mylr,\n \"dkt_parc\": myparc,\n \"registration\":reg,\n \"hippLR\":hippLR,\n \"white_matter_hypointensity\":myhypo,\n \"wm_tractsL\":wm_tractsL,\n \"wm_tractsR\":wm_tractsR,\n \"dataframes\": mydataframes\n }\n\n return outputs", "def test_format_otu_map_prefix(self):\r\n actual = sorted(format_otu_map(self.otu_map1, 'my.otu.'))\r\n expected = sorted(['my.otu.0\\tseq1\\tseq2\\tseq5\\n',\r\n 'my.otu.1\\tseq3\\tseq4\\n',\r\n 'my.otu.2\\tseq6\\tseq7\\tseq8\\n'])\r\n self.assertEqual(actual, expected)", "def getJumpTablesFromFunc(func_ea):", "def get_nodeName(taxonomy, nodeId):", "def map_to_homo_nid(self, ids, ntype):\n ...", "def map_to_homo_nid(self, ids, ntype):\n ...", "def map():", "def get_output_names(hf):\n return sorted(map(str, hf['/output/data'].keys()))", "def get_input_node_names(self, node_name):\n # (str) -> list\n node = self.get_node(node_name)\n return node.bottoms" ]
[ "0.65607023", "0.6310749", "0.6268617", "0.6265941", "0.6193158", "0.60374105", "0.5948364", "0.5849513", "0.57718235", "0.57630116", "0.5761633", "0.5725777", "0.57182246", "0.56293416", "0.5621835", "0.56069314", "0.55362374", "0.55332834", "0.5524223", "0.5512426", "0.5477677", "0.5398579", "0.5384504", "0.53371155", "0.53194445", "0.5220795", "0.5217265", "0.52167755", "0.52163047", "0.5205206", "0.51816994", "0.51334", "0.5132416", "0.5124825", "0.51131713", "0.50887007", "0.5083827", "0.50822246", "0.50810933", "0.50806975", "0.5078235", "0.5077233", "0.50660235", "0.5061927", "0.50611347", "0.5055411", "0.5036149", "0.5029177", "0.5025956", "0.50250566", "0.50237465", "0.5021974", "0.50212693", "0.5018771", "0.5014207", "0.50072527", "0.4999943", "0.49804553", "0.4978868", "0.49764818", "0.4976019", "0.49707448", "0.4967864", "0.49646023", "0.49638963", "0.49471614", "0.49457514", "0.4945665", "0.49419722", "0.49410054", "0.4940016", "0.49275628", "0.4925599", "0.49206257", "0.49120927", "0.49100584", "0.49087158", "0.4897347", "0.48934278", "0.4888073", "0.48817545", "0.48804563", "0.48783934", "0.4875328", "0.4869777", "0.4867076", "0.4863706", "0.48622727", "0.48614165", "0.4860732", "0.48600692", "0.48592138", "0.48555255", "0.4845803", "0.48405743", "0.48399803", "0.48399803", "0.48398453", "0.4839807", "0.4836941" ]
0.7777733
0
test onxx based utility to find mapping between onnx node names and io tensors when more than one onnx node maps to the same torch module
def test_single_pytorch_module_mapping_to_many_onnx_nodes(self): AimetLogger.set_level_for_all_areas(logging.DEBUG) class TwoLayerLstmModel(torch.nn.Module): """ Model using torch.nn.LSTM module """ def __init__(self): super(TwoLayerLstmModel, self).__init__() self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3) def forward(self, x, hx=None): return self.lstm(x, hx) model_name = 'multilayer_lstm' model = TwoLayerLstmModel() dummy_input = torch.randn(10, 1, 3) torch.onnx.export(model, dummy_input, './data/' + model_name + '.onnx') onnx_utils.OnnxSaver.set_node_names('./data/' + model_name + '.onnx', model, dummy_input, is_conditional=False, module_marker_map={}) onnx_model = onnx.load('./data/' + model_name + '.onnx') lstm_nodes = [node for node in onnx_model.graph.node if node.op_type == 'LSTM'] assert 3 == len(lstm_nodes) node_to_io_dict, _ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model) assert isinstance(node_to_io_dict['lstm#root_node'], list) assert 3 == len(node_to_io_dict['lstm#root_node'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_onnx_node_name_to_input_output_names_util(self):\n model = models.resnet18(pretrained=False)\n dummy_input = torch.randn(1, 3, 224, 224)\n torch.onnx.export(model, dummy_input, './data/resnet18.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/resnet18.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/resnet18.onnx')\n\n # Get Dict mapping node name to the input and output names\n node_to_io_dict,_ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n\n node_0 = onnx_model.graph.node[0]\n assert node_0.input == node_to_io_dict[node_0.name].inputs\n assert node_0.output == node_to_io_dict[node_0.name].outputs", "def test_set_unique_node_names(self):\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors", "def test_non_leaf_module_names(self):\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = HierarchicalMultiplyModule()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n expected_names = [\n # names compatible with torch 1.9.1 version (should be removed in the future)\n 'layer.mul1.mul',\n 'layer.mul1.Mul_7',\n 'layer.mul2.mul',\n 'layer.mul2.Mul_15',\n 'layer.Mul_18',\n \n # names compatible with torch 1.13.1 version \n '/layer/mul1/Mul',\n '/layer/mul2/Mul',\n '/layer/Mul'\n ]\n for node in onnx_model.graph.node:\n assert 'Constant' in node.name or node.name in expected_names\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def map_output_and_node(cls, onnx_model: onnx.ModelProto):\n output2node = dict()\n for node in onnx_model.graph.node:\n for output_name in node.output:\n output2node[output_name] = node\n return output2node", "def graph_node_names_details(model):\n\n node_details = namedtuple('node_details', ['node', 'outputs'])\n node_names_details = {}\n for initializer in model.initializer():\n initializer_name = initializer.name\n each_node = node_details(node=initializer, outputs=[])\n if initializer_name not in node_names_details:\n each_node.outputs.extend(get_initializer_children_names(model, initializer))\n node_names_details[initializer_name] = each_node\n for node in model.nodes():\n node_name = node.name\n output_names = node.output\n # onnx output has different name from node name\n for output_name in output_names:\n if output_name not in node_names_details:\n node_names_details[output_name] = node_name\n each_node = node_details(node=node, outputs=[])\n if node_name not in node_names_details:\n each_node.outputs.extend(get_node_children_names(model, node))\n node_names_details[node_name] = each_node\n for graph_input in model.graph().input:\n outputs = []\n node_name = graph_input.name\n for k, v in node_names_details.items():\n try:\n if node_name in v.node.input:\n outputs.append(k)\n except BaseException:\n continue\n each_node = node_details(node=graph_input, outputs=outputs)\n # if node_name not in node_names_details:\n node_names_details[node_name] = each_node\n\n return node_names_details", "def test_export_dict_input_output(self):\n\n\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = InputOutputDictModel()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n\n # Add an empty dictionary as the last element to not treat as named arguments.\n # see torch.onnx.export() API for more details.\n dummy_input = (\n {'a': torch.randn(1, 10, 10, 10),\n 'b': torch.randn(1, 10, 10, 10),\n 'c': torch.randn(1, 10, 10, 10)\n }, {}\n )\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n for node in onnx_model.graph.node:\n print(node.name)\n assert node.name.startswith('layer')", "def _get_input_output_node_names(nodes):\n input_names, output_names = set(), set()\n extension_output_names = set()\n for node in nodes:\n tf_node = node if isinstance(node,\n TensorflowNode) else TensorflowNode(node)\n output_names.add(node.name)\n # Add outputs for Split, Switch TensorArrayV3\n if tf_node.op_type == \"Split\":\n for i in range(1, tf_node.attr[\"num_split\"]):\n output_names.add(tf_node.name + \":{}\".format(i))\n if tf_node.op_type == \"Switch\":\n output_names.add(tf_node.name + \":1\")\n extension_output_names.add((tf_node.name, tf_node.name + \":1\"))\n if tf_node.op_type == \"TensorArrayV3\":\n output_names.add(tf_node.name + \":1\")\n extension_output_names.add((tf_node.name, tf_node.name + \":1\"))\n input_names.update(\n set([inp if inp[0] != \"^\" else inp[1:] for inp in tf_node.inputs]))\n inputs = input_names - output_names\n outputs = output_names - input_names\n while extension_output_names:\n ext_names = extension_output_names.pop()\n for name in ext_names:\n if name in outputs:\n outputs -= set(ext_names)\n break\n inputs.discard(None)\n return list(inputs), list(outputs)", "def test_set_node_name_for_matmul_add_linear(self, export_args):\n class Linear(torch.nn.Module):\n def __init__(self):\n super(Linear, self).__init__()\n self.linear = torch.nn.Linear(3, 2)\n\n def forward(self, inp):\n x = self.linear(inp)\n return x\n\n model = Linear()\n # Using an input to linear op with dimension != 2 causes torch to use matmul->add instead of gemm op\n onnx_path = './data/MyModel.onnx'\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n expected_node_names = ['linear', 'linear#1.end']\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n for name in expected_node_names:\n assert name in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n # Check that gemm still works as expected\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n assert 'linear' in actual_node_names\n assert 'linear#1' not in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def node_mapping(self):\n ...", "def test_get_hyperflex_node_by_moid(self):\n pass", "def test_naming_for_model_with_deep_graph(self):\n\n model = models.resnet152(pretrained=False)\n dummy_input = torch.randn(1, 3, 224, 224)\n\n onnx_path= './data/' + model.__class__.__name__ + '.onnx'\n with onnx_simply(True):\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input,\n is_conditional=False, module_marker_map={})\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_names(onnx_model)\n\n counts = defaultdict(int)\n top_level_nodes = tuple(['conv1', 'bn1', 'relu', 'maxpool', 'avgpool', 'Flatten_', '/Flatten', 'fc'])\n for node in onnx_model.graph.node:\n if node.name.startswith(top_level_nodes):\n continue\n elif '.' in node.name:\n layer_name = '.'.join(node.name.split('#')[0].split('.')[:-1])\n counts[layer_name] += 1\n elif node.name.startswith('/'):\n layer_name = '.'.join(node.name.split('/')[1:-1])\n counts[layer_name] += 1\n\n for name, counts in counts.items():\n if 'downsample' in name:\n assert counts == 2\n else:\n print(name, counts)\n assert counts == 10\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def get_nodes(\n nodes: Dict[str, Node] = None,\n io_mapping: Dict[str, Dict] = None\n ) -> Tuple[Dict[str, Node], Dict[str, Dict]]:\n raise NotImplementedError", "def test_get_hyperflex_node_profile_by_moid(self):\n pass", "def get_node_outputs(node_path):\n \n item = ix.get_item(node_path)\n\n obj_array = ix.api.OfItemArray(1)\n obj_array[0] = item\n item_outputs = ix.api.OfItemVector()\n\n ix.application.get_factory().get_items_outputs(obj_array, item_outputs, False)\n\n node_outputs = []\n for item_ in range(item_outputs.get_count()):\n\n for i in range(item_outputs[item_].get_attribute_count()):\n\n attr= item_outputs[item_].get_attribute(i)\n\n if attr.get_texture():\n\n if str(attr.get_texture()) == item.get_full_name():\n\n #attrs[attr] = target_node.get_full_name()\n node_outputs.append(attr)\n return node_outputs", "def test_get_node_outputs(self):\n pass", "def check_onnx_node_name_uniqueness(onnx_model):\n onnx_node_names = [node.name for node in onnx_model.graph.node]\n assert len(onnx_node_names) == len(set(onnx_node_names)), f'list size mismatch, check if names are unique'", "def nodes(evt, node=None):\n nodenames = []\n\n if node is None:\n root = evt.retrieveObject('')\n node = root.registry()\n\n if node.object():\n nodenames.append(node.identifier())\n for l in evt.leaves(node):\n # skip a location that takes forever to load\n # XXX How to detect these automatically??\n if 'Swum' in l.identifier():\n continue\n \n temp = evt[l.identifier()]\n nodenames += nodes(evt, l)\n else:\n nodenames.append(node.identifier())\n\n return nodenames", "def test_get_node_sensors(self):\n pass", "def onnx_model_node_loader(model_path):\n # these imports are done in the function because they are slow\n import onnx\n from onnx_tf.backend import prepare\n onnx_model = onnx.load(model_path) # load onnx model\n tf_model_rep = prepare(onnx_model, gen_tensor_dict=True)\n label_input_node = tf_model_rep.inputs[0]\n label_output_node = tf_model_rep.outputs[0]\n dtype_input_node = tf_model_rep.tensor_dict[f'{label_input_node}'].dtype\n\n return onnx_model, dtype_input_node, label_output_node", "def generate_mxp_graph(model_name, activations, stats, first_node_name, last_node_name, io_info,\n input_type, ignore_strides=False, inline_depthwise=False, verbose=False):\n network = {}\n network['layers'] = []\n network['test_input'] = None\n network['test_output'] = None\n network['scale'] = 1.0\n\n model = onnx.load(model_name)\n nodes = model.graph.node\n inits = model.graph.initializer\n\n idx = get_node_index(nodes, first_node_name)\n if idx == None:\n if verbose:\n print('{} does not exist\\nopen {} in Netron + check spelling'.format(first_node_name, mname))\n assert(idx != None)\n\n last_idx = get_node_index(nodes, last_node_name)\n if last_idx == None:\n if verbose:\n print('{} does not exist\\nopen {} in Netron + check spelling'.format(last_node_name, mname))\n assert(last_idx != None)\n\n while True:\n node = nodes[idx]\n if verbose:\n print(node.name, node.op_type)\n src_node = get_node_source(nodes, node.input[0])\n if src_node == None:\n input_id = node.input[0]\n else:\n input_id = src_node.output[0]\n output_id = node.output[0]\n\n\n if len(network['layers']) == 0:\n previous = None\n else:\n previous = network['layers'][-1]\n for layer in network['layers']:\n if layer['output_id'] == input_id:\n previous = layer\n\n input_shapes, output_shapes = get_shapes(activations, stats, node)\n assert len(output_shapes) == 1, \"Multi-output nodes not supported\"\n output_shape = output_shapes[0]\n if node.op_type == \"Conv\":\n c, m, n = input_shapes[0]\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n assert(get_attr(node, 'pads') == None or not any(get_attr(node, 'pads')))\n\n group = get_attr(node, 'group')\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n dilations = np.asarray(get_attr(node, 'dilations')).tolist()\n if not group:\n group = 1\n if not strides:\n strides = [1, 1]\n if not dilations:\n dilations = [1, 1]\n\n use_strided = 0\n assert(strides == [1, 1] or strides == [2, 2] or strides == [4, 4])\n\n if DO_STRIDES and not ignore_strides:\n if (strides[0] > 1 or strides[1] > 1) and group == 1: # TODO handle depthwise as well\n assert(previous['output_size'] == int(np.prod(input_shapes[0])))\n use_strided = 1\n previous['output_strides'] = strides\n if verbose:\n print('adding output strides to previous node')\n\n m = m + (m % strides[0])\n n = n + (n % strides[1])\n if int(np.prod(input_shapes[0])) != int(c*m*n):\n if verbose:\n print('adjusting size for strided maps')\n previous['output_size'] = int(c*4*m//strides[0]*n//strides[1])\n previous['output_shape'] = (c*4,m//strides[0],n//strides[1])\n\n w = get_tensor(inits, node.input[1])\n kernels, channels, _, _ = w.shape\n if len(node.input) == 3:\n b = get_tensor(inits, node.input[2])\n\n conv_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(c*m*n),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels * group,\n 'kernels': kernels,\n 'kernel_shape': kernel_shape,\n 'dilations': dilations,\n 'strides': strides,\n 'group': group,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'use_cvi': 0,\n 'use_depthwise': 0,\n 'use_strided': use_strided,\n \"biases\": [],\n \"weights\": [],\n \"sublayers\": [],\n }\n\n w = w.flatten().tolist()\n conv_layer['weights'] = base64.b64encode(struct.pack(\"f\"*len(w), *w)).decode()\n\n if len(node.input) == 3:\n b = b.flatten().tolist()\n else:\n b = [0 for _ in range(kernels)]\n conv_layer['biases'] = base64.b64encode(struct.pack(\"f\"*len(b), *b)).decode()\n\n network['layers'].append(conv_layer)\n\n elif node.op_type == \"Gemm\":\n w = get_tensor(inits, node.input[1])\n output_size, input_size = w.shape\n\n if len(node.input) == 3:\n b = get_tensor(inits, node.input[2])\n\n gemm_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(np.prod(input_shapes[0])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape': output_shape,\n 'gemm_input_size': input_size,\n 'gemm_output_size': output_size,\n 'input_id': input_id,\n 'output_id': output_id,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"biases\": [],\n \"weights\": [],\n \"sublayers\": [],\n }\n\n w = w.flatten().tolist()\n gemm_layer['weights'] = base64.b64encode(struct.pack(\"f\"*len(w), *w)).decode()\n\n if len(node.input) == 3:\n b = b.flatten().tolist()\n else:\n b = [0 for _ in range(output_size)]\n gemm_layer['biases'] = base64.b64encode(struct.pack(\"f\"*len(b), *b)).decode()\n network['layers'].append(gemm_layer)\n\n elif node.op_type in multipath_nodes:\n node_inputs = get_previous_nodes(nodes, node)\n shapes = input_shapes\n\n if node.op_type == \"Sum\":\n assert(all([x == shapes[0] for x in shapes[1:]]))\n elif node.op_type == \"Concat\":\n assert(all([x[1:] == shapes[0][1:] for x in shapes[1:]]))\n\n buf = node_inputs[0].name\n if node.op_type == \"Concat\":\n buf = output_id\n\n buffer_offset = 0\n for n, node_input in enumerate(node_inputs):\n noutput = node_input.output[0]\n for l, layer in enumerate(network['layers']):\n if layer['output_id'] == noutput: # if layer pointing to this node\n network['layers'][l]['output_id'] = buf # rename layer's output\n network['layers'][l]['buffer_offset'] = buffer_offset # and offset appropriately\n if layer['input_id'] == noutput:\n network['layers'][l]['input_id'] = buf #TODO\n\n buffer_offset += int(np.prod(input_shapes[n]))\n\n if node.op_type == \"Sum\":\n channels, m, n = shape3d(output_shape)\n sum_layer = {\n 'op_type': \"Sum\",\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': node_inputs[0].name,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'num_inputs': len(node.input),\n \"sublayers\": [],\n }\n network['layers'].append(sum_layer)\n\n elif node.op_type == \"Identity\":\n shapes = input_shapes\n\n channels, m, n = shape3d(output_shape)\n identity_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n }\n network['layers'].append(identity_layer)\n\n elif node.op_type == \"LRN\":\n shapes = input_shapes\n channels, m, n = shape3d(output_shape)\n lrn_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'alpha': get_attr(node, 'alpha'),\n 'beta': get_attr(node, 'beta'),\n 'bias': get_attr(node, 'bias'),\n 'size': get_attr(node, 'size'),\n 'scale': 1.0,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n }\n network['layers'].append(lrn_layer)\n\n elif node.op_type == \"Scale\":\n scale_sublayer = {\n 'op_type': 'Scale',\n 'name': node.name,\n \"use_replay\": 1,\n 'scale': get_attr(node, 'scale'),\n }\n previous['sublayers'].append(scale_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type in [\"GlobalAveragePool\", \"GlobalMaxPool\"]:\n assert(previous['n'] == previous['m'])\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n pads = pads6(node)\n pool_sublayer = {\n 'op_type': node.op_type.replace('Global', ''),\n 'name': node.name,\n 'use_replay': 0,\n 'kernel_shape': [previous['m'], previous['n']],\n 'strides': [previous['m'], previous['n']],\n 'pads': pads,\n }\n previous['sublayers'].append(pool_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n\n elif node.op_type in [\"MaxPool\", \"AveragePool\"]:\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n\n if node.op_type == \"AveragePool\": #TODO quick fix for tf average pool quirk\n if kernel_shape[0] * kernel_shape[1] == previous['m'] * previous['n']:\n kernel_shape = [previous['m'], previous['n']]\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n if strides is None:\n strides = [ 1 for _ in kernel_shape]\n pads = pads6(node)\n pool_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'kernel_shape': kernel_shape,\n 'strides': strides,\n 'pads': pads,\n }\n previous['sublayers'].append(pool_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n elif node.op_type == \"PRelu\":\n slope = get_tensor(inits, node.input[1])\n slope = slope.flatten().tolist()\n prelu_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'slope': slope,\n }\n previous['sublayers'].append(prelu_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"LeakyRelu\":\n alpha = get_attr(node, 'alpha')\n if alpha is None:\n alpha = .01\n leaky_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'alpha': alpha\n }\n previous['sublayers'].append(leaky_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Relu\":\n relu_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n }\n previous['sublayers'].append(relu_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Clip\":\n clip_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'min': float(get_tensor(inits,node.input[1])),\n 'max': float(get_tensor(inits,node.input[2])),\n }\n previous['sublayers'].append(clip_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Pad\":\n pads = pads6(get_tensor(inits,node.input[1]).tolist())\n value = int(get_tensor(inits,node.input[2]))\n if value < -1:\n value = -1\n if value > 1:\n value = 1\n pad_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'value': value,\n 'pads': pads,\n }\n previous['sublayers'].append(pad_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n elif node.op_type in [\"Add\", \"Mul\", \"Sub\", \"Div\"]:\n\n skip = False\n if node.op_type == \"Mul\":\n next_nodes = get_node_inputs(nodes, node.output[0])\n if node.name == nodes[-1].name:\n if verbose:\n print('removing final scale node')\n skip = True\n\n elif previous['op_type'] in [\"LRN\"]:\n if verbose:\n print('skipping mul after lrn')\n array = get_tensor(inits, node.input[1])\n if array is None:\n array = get_tensor(inits, node.input[0])\n previous['scale'] = float(array[0])\n print('skipping mul after lrn', previous['scale'], previous['input_id'], previous['output_id'])\n\n skip = True\n\n elif next_nodes[0].op_type in [\"Softmax\"]:\n if verbose:\n print('skipping mul before softmax')\n skip = True\n\n array = get_tensor(inits, node.input[1])\n if array is None:\n array = get_tensor(inits, node.input[0])\n c = activations[node.input[1]].shape[1]\n else:\n c = input_shapes[0][0]\n\n if node.op_type == \"Add\": # TODO for scalar Add\n dims = len(np.squeeze(array).shape)\n if dims == 0:\n array = np.ones((c, 1)) * array\n\n dims = len(np.squeeze(array).shape)\n if c == 1 and dims == 0:\n dims = 1\n\n array = array.flatten().tolist()\n # force_broadcast_2 = False\n # if force_broadcast_2:\n # # if c != 1 and dims == 0:\n # if c != 1 and dims == 0 and node.op_type != \"Mul\": # TODO forcing to broadcast 2 not broadcast 3\n # dims = 1\n # array = [array[0] for _ in range(c)]\n\n if not skip:\n arithmetic_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'dims': dims,\n 'array': array,\n }\n previous['sublayers'].append(arithmetic_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type in [\"Abs\", \"Max\", \"Mean\", \"Min\", \"Neg\", \"Not\"]:\n unary_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n }\n previous['sublayers'].append(unary_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n\n elif node.op_type == \"Reshape\":\n dims = get_tensor(inits, node.input[1])\n\n if len(dims) == 4 and dims[-1] == 2:\n idx += 6\n node = nodes[idx]\n output_id = node.output[0]\n _, output_shapes = get_shapes(activations, stats, node)\n output_shape = output_shapes[0]\n channels, m, n = shape3d(output_shape)\n reorg_layer = {\n 'op_type': \"Reorg\",\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape': output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n \"stride\": int(dims[-1]),\n }\n network['layers'].append(reorg_layer)\n else:\n previous['output_id'] = output_id\n\n elif node.op_type in [\"Flatten\",'Cast']:\n previous['output_id'] = output_id\n elif node.op_type == \"Resize\":\n scales = get_tensor(inits, node.input[2])\n assert(scales[0] == 1 and scales[1] == 1)\n scale = float(scales[2])\n mode = get_attr(node, 'mode').decode()\n assert(mode == 'nearest' or mode == 'linear')\n shapes = input_shapes[:1]\n channels, m, n = shape3d(output_shape)\n in_size= [d for d in one_elem(input_shapes)[1:]]\n replay = 0 if in_size == [1,1] else 1\n resize_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': replay,\n 'input_size': int(np.prod(one_elem(input_shapes))),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'mode' :mode,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': [float(scales[2]),float(scales[3])],\n }\n network['layers'].append(resize_layer)\n elif node.op_type == \"ArgMax\":\n input_shape = one_elem(input_shapes)\n channels, m, n = shape3d(input_shape)\n argmax_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': [float(scales[2]),float(scales[3])],\n }\n network['layers'].append(argmax_layer)\n\n elif node.op_type == \"Softmax\":\n prev = get_previous_nodes(nodes, node)[0]\n if prev.op_type == \"Mul\":\n scale = get_tensor(inits, prev.input[1])\n scale = scale.flatten().tolist()\n else:\n scale = [1.0]\n if len(scale) > 1:\n raise NotImplementedError(\"Broadcast scale not implemented for softmax\")\n\n shapes = input_shapes\n channels, m, n = shape3d(output_shape)\n softmax_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': scale,\n 'size': len(scale),\n }\n network['layers'].append(softmax_layer)\n\n # softmax_sublayer = {u'op_type': u'Softmax', 'scale': 1.0}\n # previous['sublayers'].append(softmax_sublayer)\n # previous['output_id'] = output_id\n # print('warning SOFTMAX ignored!... fine if last layer and sorting outputs')\n\n elif node.op_type == \"Transpose\":\n shapes = input_shapes\n\n channels, m, n = shape3d(output_shape)\n permutation =[p-1 for p in get_attr(node, 'perm')[1:]]\n transpose_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'permutation':permutation,\n \"sublayers\": [],\n }\n network['layers'].append(transpose_layer)\n else:\n raise RuntimeError('Unknown node type:{} '.format(node.op_type))\n\n idx += 1\n if idx > last_idx:\n break\n\n unsigned_network_inputs = input_type == np.uint8\n\n if CVI_1x1:\n network = mxp_gemm_to_conv(network)\n\n network = mxp_set_replay(network, io_info)\n network = mxp_set_cvi(network)\n network = mxp_set_unsigned(network, unsigned_network_inputs)\n\n if inline_depthwise:\n network = mxp_inline_depthwise(network)\n\n network = mxp_describe_layers(network)\n network = mxp_number_buffers(network)\n buffers = mxp_size_buffers(network)\n network = mxp_number_sublayers(network)\n\n network['num_layers'] = len(network['layers'])\n network['buffers'] = buffers\n\n return network", "def nodes_names_map(self):\n return {nd.name: nd for nd in self.nodes}", "def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def", "def map_input_and_node(cls, onnx_model: onnx.ModelProto):\n\n input2node: Dict[str, List] = dict()\n for node in onnx_model.graph.node:\n for idx, input_name in enumerate(node.input):\n if input_name not in input2node:\n input2node[input_name] = []\n input2node[input_name].append([node, idx])\n return input2node", "def get_output_node_names(self, node_name):\n # (str) -> list\n node = self.get_node(node_name)\n return node.tops", "def test_get_node_type_name(self):\n pass", "def map_to_homo_nid(self, ids, ntype):\n ...", "def map_to_homo_nid(self, ids, ntype):\n ...", "def load_onnx(model_name):\n onnx_path = '%s.onnx' % model_name\n if not os.path.isfile(onnx_path):\n print('ERROR: file (%s) not found! You might want to run yolo_to_onnx.py first to generate it.' % onnx_path)\n return None\n else:\n with open(onnx_path, 'rb') as f:\n return f.read()", "def test_get_node_hardware(self):\n pass", "def _compare_onnx_pytorch_outputs(\n onnx_outs: _OutputsType,\n pt_outs: Any,\n options: VerificationOptions,\n):\n if options.ignore_none:\n # torch.jit._flatten filters None type\n pt_outs, _ = torch.jit._flatten(pt_outs)\n else:\n pt_outs = _inline_flatten_list([pt_outs], [])\n pt_outs_np = _unpack_to_numpy(pt_outs, cast_onnx_accepted=False)\n onnx_outs = _inline_flatten_list(onnx_outs, [])\n _compare_onnx_pytorch_outputs_in_np(onnx_outs, pt_outs_np, options)", "def nodes(topology):\n return topology.nodes()", "def getNodeNames(self, includeDisabled=False):", "def nodules_connection(label_data, label_header):\n\n\n las_labels = measure.label(label_data,\n neighbors=8,\n background=0,\n return_num=True)\n\n las_labels_nzero = np.nonzero(las_labels[0])\n [xdif, ydif, zdif] = [np.amax(las_labels_nzero[0])-np.amin(las_labels_nzero[0]),\n np.amax(las_labels_nzero[1])-np.amin(las_labels_nzero[1]),\n np.amax(las_labels_nzero[2])-np.amin(las_labels_nzero[2])]\n\n # conversion pixels to mm\n dims = label_header['pixdim']\n if label_header['xyzt_units'] == 10:\n #dimensions in mm\n print('xyzt_units=10')\n xdif=dims[1]*xdif\n ydif=dims[2]*ydif\n zdif=dims[3]*zdif\n\n\n return las_labels,[xdif,ydif,zdif]", "def test_model_with_input_last_onnx_node(self):\n\n roi_model = RoiModel(height=7, width=7, scale=0.25)\n x = torch.rand(1, 1, 6, 6)\n rois = torch.tensor([ [0, -2.0, -2.0, 22.0, 22.0], ])\n dummy_input = (x, rois)\n onnx_utils.OnnxSaver.set_node_names('./data/roi.onnx', roi_model, dummy_input, is_conditional=False,\n module_marker_map={},\n onnx_export_args=(onnx_utils.OnnxExportApiArgs(opset_version=11))\n )\n onnx_model = onnx.load('./data/roi.onnx')\n end_nodes = [ n.name for n in onnx_model.graph.node if 'end' in n.name]\n assert len(end_nodes) == 1", "def test_get_node_hardware_fast(self):\n pass", "def test_get_pci_device_by_moid(self):\n pass", "def debug_cntk_outputnodes():\n\tz = load_model(MODEL)\n\tprint (\"Load complete.\");\n\tfor index in range(len(z.outputs)):\n\t\tprint(\"Index {} for output: {}.\".format(index, z.outputs[index].name))", "def _retrieve_or_adapt_input_to_graph_set(\n fx_node_arg: fx_type_utils.Argument,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n):\n\n onnx_tensor = fx_node_arg\n if isinstance(onnx_tensor, torch.fx.Node):\n # 1. fx_node_arg is a torch.fx.Node, which means\n # fx_node_arg stands for the output of that torch.fx.Node.\n # 2. fx_node_arg (variable in torch.fx.Graph) is be mapped to\n # torch.jit.Value, fx_name_to_onnxscript_value[fx_node_arg.name],\n # in TorchScript graph.\n return fx_name_to_onnxscript_value[onnx_tensor.name]\n if isinstance(onnx_tensor, (tuple, list)) and any(\n isinstance(node, torch.fx.Node)\n and isinstance(node.meta.get(\"val\"), torch.SymInt)\n for node in onnx_tensor\n ):\n # This intends to handle dynamic axes. for example, if the input size of op.Expand\n # is dynamic, each dimension would be variable (i.e., sym variable in Pytorch\n # FX graph. Note that sym variable is mapped to tensor in ONNX Script world)\n # calculated by other operators.\n sequence_mixed_elements: List[\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n List[int],\n ]\n ] = []\n for tensor in onnx_tensor:\n if isinstance(tensor, torch.fx.Node) and isinstance(\n tensor.meta.get(\"val\"), torch.SymInt\n ):\n sequence_mixed_elements.append(fx_name_to_onnxscript_value[tensor.name])\n elif isinstance(tensor, int):\n # NOTE: op.Concat doesn't support scalar, so we need to wrap it with\n # dim, and onnx-script will promote it to tensot(int64)\n sequence_mixed_elements.append([tensor])\n # Concat all the elements in the sequence.\n # shapes are mapped to tensors in ONNX graph (TorchScriptGraph),\n # so list of sym_ints is concatenated to a tensor before calling ONNX op.\n\n # For example:\n # inputs: [[2], [4], fx.Node(SymIntA), [1], fx.Node(SymIntB)]\n # outputs: op.Concat([op.Constant(2), op.Constant(4), TorchScriptTensor(A), op.Constant(1), TorchScriptTensor(B)])\n\n # onnx-script auto wraps python number with op.Constants,\n # so we don't need to specifically process them.\n with onnxscript.evaluator.default_as(tracer):\n output = onnxscript.opset18.Concat(*sequence_mixed_elements, axis=0)\n output.dtype = torch.int64\n output.shape = [len(sequence_mixed_elements)]\n return output\n elif isinstance(onnx_tensor, (tuple, list)) and all(\n isinstance(node, torch.fx.Node) or node is None for node in onnx_tensor\n ):\n sequence_elements: List[\n Union[\n Optional[onnxscript_graph_building.TorchScriptTensor],\n Tuple[\n onnxscript_graph_building.TorchScriptTensor,\n ...,\n ],\n ]\n ] = []\n for tensor in onnx_tensor:\n sequence_elements.append(\n fx_name_to_onnxscript_value[tensor.name] if tensor is not None else None\n )\n return sequence_elements\n if isinstance(onnx_tensor, torch.dtype):\n onnx_tensor = int(\n jit_type_utils.JitScalarType.from_dtype(onnx_tensor).onnx_type()\n )\n # NOTE: if device is specified in kwargs (not consumed), it's free to ignored. But\n # if it's in args, we need to set it to string for dispatcher to match schema.\n if isinstance(onnx_tensor, torch.device):\n # torch.device is not supported by onnxscript (no op). We turn it into\n # a string.\n return str(onnx_tensor)\n\n # all other cases, we do nothing.\n return onnx_tensor", "def map_to_homo_nid(self, ids, ntype): # -> None:\n ...", "def export_onnx():\r\n model = DivideBy255()\r\n X = torch.randn(1, 3, 256, 256, dtype=torch.float)\r\n onnx_name = \"DivideBy255.onnx\"\r\n\r\n print(f\"Generating {onnx_name}\")\r\n torch.onnx.export(\r\n model,\r\n (X),\r\n onnx_name,\r\n opset_version=10,\r\n do_constant_folding=True,\r\n # verbose=True,\r\n # input_names=['Identity_1', 'Identity'],\r\n output_names=['input_1']\r\n )", "def get_mds_dimension_names(node):\n ndims=len(get_mds_shape(node))\n own_name=get_mds_shortname(node)\n dimension_names=[]\n for i in range(ndims):\n dimension=node.dim_of(i)\n try:\n name=get_mds_shortname(get_mds_node_reference(dimension))\n if len(get_mds_shape(dimension))>1:\n name=name+\"_index\"\n except:\n name=own_name+\"_index\"\n dimension_names.append(name)\n return dimension_names", "def test_get_pci_link_by_moid(self):\n pass", "def test_get_hyperflex_cluster_by_moid(self):\n pass", "def test_get_hyperflex_server_model_by_moid(self):\n pass", "def nodes_mapped(instance):\n G, mapping = instance.network()\n node_dict = instance.network_nodes_species()\n\n node_dict_mapped = {}\n\n for old_label, new_label in mapping.items():\n for node, ammentity in node_dict.items():\n if old_label == node:\n node_dict_mapped[new_label] = ammentity\n\n return node_dict_mapped", "def create_mock_data(number_of_nodes, edge_per_node, in_channels):\n graph = nx.watts_strogatz_graph(number_of_nodes, edge_per_node, 0.5)\n edge_index = torch.LongTensor(np.array([edge for edge in graph.edges()]).T)\n X = torch.FloatTensor(np.random.uniform(-1, 1, (number_of_nodes, in_channels)))\n return X, edge_index", "def test_get_hyperflex_node_list(self):\n pass", "def get_node_info(rdb, fk_array, ntype, hint, taxid):\n hint = None if hint == '' or hint is None else hint.upper()\n taxid = None if taxid == '' or taxid is None else str(taxid)\n if ntype == '':\n ntype = None\n\n if ntype is None:\n res_arr = rdb.mget(['::'.join(['stable', str(fk), 'type']) for fk in fk_array])\n fk_prop = [fk for fk, res in zip(fk_array, res_arr) if res is not None\n and res.decode() == 'Property']\n fk_gene = [fk for fk, res in zip(fk_array, res_arr) if res is not None\n and res.decode() == 'Gene']\n if fk_prop and fk_gene:\n raise ValueError(\"Mixture of property and gene nodes.\")\n ntype = 'Property' if fk_prop else 'Gene'\n\n if ntype == \"Gene\":\n stable_array = conv_gene(rdb, fk_array, hint, taxid)\n elif ntype == \"Property\":\n stable_array = fk_array\n else:\n raise ValueError(\"Invalid ntype\")\n\n return list(zip(fk_array, *node_desc(rdb, stable_array)))", "def get_output_names(hf):\n return sorted(map(str, hf['/output/data'].keys()))", "def test_nodes_at_link():\n grid = HexModelGrid((3, 2))\n\n assert_array_equal(grid.nodes_at_link[:, 0], grid.node_at_link_tail)\n assert_array_equal(grid.nodes_at_link[:, 1], grid.node_at_link_head)\n\n assert np.may_share_memory(grid.nodes_at_link, grid.node_at_link_tail)\n assert np.may_share_memory(grid.nodes_at_link, grid.node_at_link_head)", "def get_ioport_names():\n return sorted(set(get_input_names()) & set(get_output_names()))", "def test_get_pci_switch_by_moid(self):\n pass", "def display_nodes(nodes):\n for node in nodes:\n print(f'{node.name} has an IP address of {node.address}.')", "def get_output_nodes(self):\n \n\n self.buildings = self.dataset.groups['buildings']\n self.building_nodes = self.buildings.groups['nodes']\n\n eta_output_added = getattr(self.building_nodes,'eta_output_added')\n uv_output_added = getattr(self.building_nodes,'uv_output_added')\n\n eta = []\n uv = []\n nodeIds = []\n time = []\n \n if(eta_output_added or uv_output_added ):\n time = self.building_nodes.variables['time'][:].tolist()\n nodeIds = self.building_nodes.variables['id'][:].tolist()\n if eta_output_added: eta = self.building_nodes.variables['eta'][:].tolist()\n if uv_output_added: uv = self.building_nodes.variables['uv'][:].tolist()\n\n \n return nodeIds,eta, uv, time", "def get_input_node_names(self, node_name):\n # (str) -> list\n node = self.get_node(node_name)\n return node.bottoms", "def check_onnx_model(onnx_path):\n # Load the ONNX model\n model = onnx.load(onnx_path)\n\n # Check that the IR is well formed\n onnx.checker.check_model(model)\n\n # Print a human readable representation of the graph\n onnx.helper.printable_graph(model.graph)", "def get_node_info(node_name, nodes_file_name):\r\n\r\n data = []\r\n node = []\r\n mvtype = ''\r\n\r\n with open(nodes_file_name) as n:\r\n for num, line in enumerate(n):\r\n if node_name in line:\r\n data = line.split()\r\n if node_name == data[0]:\r\n node.append(float(data[1]))\r\n node.append(float(data[2]))\r\n if 'terminal' in line:\r\n mvtype = 'terminal'\r\n elif 'terminal_NI' in line:\r\n mvtype = 'terminal_NI'\r\n else:\r\n mvtype = 'non-terminal'\r\n break\r\n\r\n pl_file_name = nodes_file_name.replace('.nodes', '.pl')\r\n with open(pl_file_name) as p:\r\n for num, line in enumerate(p):\r\n if node_name in line:\r\n data = line.split()\r\n if node_name == data[0]:\r\n node.append(float(data[1]))\r\n node.append(float(data[2]))\r\n break\r\n\r\n node.append(mvtype)\r\n return node", "def test_get_related_nodes(self):\n pass", "def is_node_output_tensor(node: pippy.fx.Node) -> bool:\n type_ = node.meta.get(\"type\", None)\n return type_ is not None and issubclass(type_, torch.Tensor)", "def test_output_head_layers():\n for output_dim in [[[\"linear\", 3],[\"linear\", 9]], [[\"linear\", 4], [\"linear\", 20]], [[\"linear\", 1], [\"linear\", 1]]]:\n nn_instance = RNN(input_dim=5, layers_info=[[\"gru\", 20], [\"lstm\", 8], output_dim],\n hidden_activations=\"relu\", output_activation=[\"softmax\", None])\n assert nn_instance.output_layers[0].out_features == output_dim[0][1]\n assert nn_instance.output_layers[0].in_features == 8\n assert nn_instance.output_layers[1].out_features == output_dim[1][1]\n assert nn_instance.output_layers[1].in_features == 8", "def _ProjectImpl(self, tensor_names: List[Text]) -> \"TFXIO\":", "def test_iou_multi(self):\n t = init_tensors()\n metrics_dict = create_metrics_dict(3)\n metrics_dict = iou(t['pred_multi'], \n t['lbl_multi'], \n batch_size=2, \n num_classes=3,\n metric_dict=metrics_dict,\n ignore_index=-1)\n assert \"{:.6f}\".format(metrics_dict['iou'].val) == \"0.185185\"", "def getOthNodes( self ):\n\n if self.othNodes:\n return self.othNodes.keys()\n\n if not self.othNames:\n self.getOthNames( )\n\n for id1 in self.othNames.values():\n nNodes = self.adb.get(\t \"nOthNodes\", id1\t)\n for id3 in range(nNodes):\n nd = self.adb.get(\t \"othNode\", id1,id3 )\n self.othNodes[ nd ] = id3\n self.othNodes[ str( nd ) ] = id3\n\n return self.othNodes.keys()", "def get_annotation_names(viewer):\n\n layer_nodes_name = None\n layer_edges_name = None\n for layer in viewer.layers:\n if isinstance(layer, napari.layers.points.points.Points):\n layer_nodes_name = layer.name\n elif isinstance(layer, napari.layers.shapes.shapes.Shapes):\n layer_edges_name = layer.name\n if layer_nodes_name is not None and layer_edges_name is not None:\n break\n return layer_nodes_name, layer_edges_name", "def test_get_hyperflex_cluster_profile_by_moid(self):\n pass", "def get_target_nodes(self):\n url = 'https://raw.githubusercontent.com/ChandlerBang/Pro-GNN/master/nettack/{}_nettacked_nodes.json'.format(self.name)\n json_file = osp.join(self.root,\n '{}_nettacked_nodes.json'.format(self.name))\n\n if not osp.exists(json_file):\n self.download_file(url, json_file)\n # with open(f'/mnt/home/jinwei2/Projects/nettack/{dataset}_nettacked_nodes.json', 'r') as f:\n with open(json_file, 'r') as f:\n idx = json.loads(f.read())\n return idx[\"attacked_test_nodes\"]", "def _common_onnx_node_to_singa_op(cls, onnx_node, inputs, opset_version):\n onnx_op_type = onnx_node.op_type\n assert onnx_op_type in cls._rename_operators, \"not support operator: {}\".format(\n onnx_op_type)\n autograd_op = getattr(autograd, cls._rename_operators[onnx_op_type])\n return None, autograd_op", "def get_node_features(odl_url, odl_usr, odl_pass, node_id):\n if odl_url.endswith('/'):\n odl_url = odl_url[:-1]\n inventory_url = odl_url + '/opendaylight-inventory:nodes/node/'\n node_url = inventory_url + node_id\n topology_json = call_odl_api(odl_usr, odl_pass, node_url)\n return topology_json", "def reference_nodes_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_idx_references", "def get_nodes(wf_results):\n return {node.fullname: node for node in wf_results.nodes}", "def test_src_node() -> dict:\n return {\"aetitle\": \"pacsanini_testing\", \"ip\": 11114}", "def test_nmap_get_sensordef(self):\n test_sensordef = {\n \"kind\": self.test_nmap.get_kind(),\n \"name\": \"NMAP\",\n \"description\": \"Checks the availability of systems.\",\n \"help\": \"Checks the availability of systems on a network and logs this to a separate \"\n \"logfile on the miniprobe.\",\n \"tag\": \"mpnmapsensor\",\n \"groups\": [\n {\n \"name\": \"nmapspecific\",\n \"caption\": \"NMAP specific\",\n \"fields\": [\n {\n \"type\": \"integer\",\n \"name\": \"timeout\",\n \"caption\": \"Timeout (in ms)\",\n \"required\": \"1\",\n \"default\": 50,\n \"minimum\": 10,\n \"maximum\": 1000,\n \"help\": \"If the reply takes longer than this value the request is aborted \"\n \"and an error message is triggered. Max. value is 1000 ms. (=1 sec.)\"\n },\n {\n \"type\": \"edit\",\n \"name\": \"ip\",\n \"caption\": \"IP-Address(es)\",\n \"required\": \"1\",\n \"default\": \"\",\n \"help\": \"Specify the ip-address or a range of addresses using one of the following notations:[br]Single: 192.168.1.1[br]CIDR: 192.168.1.0/24[br]- separated: 192.168.1.1-192.168.1.100\"\n }\n ]\n }\n ]\n }\n assert_equal(self.test_nmap.get_sensordef(), test_sensordef)", "def network_nodes_species(self):\n G, mapping = self.network()\n waste, resources, intmed_products = self.amenities()\n\n node_dict = {}\n\n for nd in G:\n # print(nd)\n if isinstance(nd, int):\n node_dict[nd] = \"r\"\n elif nd in self.commodity:\n node_dict[nd] = \"Xc\"\n elif nd in waste:\n node_dict[nd] = \"w\"\n elif nd in resources:\n node_dict[nd] = \"Xr\"\n elif nd in intmed_products:\n node_dict[nd] = \"InPr\"\n\n return node_dict", "def returnTipsFromNeedleModels(self):\n #productive\n profprint()\n returnTips=[]\n modelNodes=slicer.mrmlScene.GetNodesByClass('vtkMRMLModelNode')\n nbNode=modelNodes.GetNumberOfItems()\n for nthNode in range(nbNode):\n # print nthNode\n node=slicer.mrmlScene.GetNthNodeByClass(nthNode,'vtkMRMLModelNode')\n if node.GetAttribute('type')=='Validation':\n polydata = node.GetPolyData()\n p,pbis=[0,0,0],[0,0,0]\n if polydata.GetNumberOfPoints()>100: #??? this is risky when u have other models in the scene (not only neeedles(\n polydata.GetPoint(0,p)\n polydata.GetPoint(int(polydata.GetNumberOfPoints()-1),pbis)\n if pbis[2]>p[2]:\n p=pbis\n \n returnTips.append(self.ras2ijk(p))\n return returnTips", "def pytorch2onnx(model,\n input_shape,\n opset_version=11,\n show=False,\n output_file='tmp.onnx',\n verify=False):\n model.cpu().eval()\n\n if isinstance(model.decode_head, nn.ModuleList):\n num_classes = model.decode_head[-1].num_classes\n else:\n num_classes = model.decode_head.num_classes\n\n mm_inputs = _demo_mm_inputs(input_shape, num_classes)\n\n imgs = mm_inputs.pop('imgs')\n img_metas = mm_inputs.pop('img_metas')\n\n img_list = [img[None, :] for img in imgs]\n img_meta_list = [[img_meta] for img_meta in img_metas]\n\n # replace original forward function\n origin_forward = model.forward\n model.forward = partial(\n model.forward, img_metas=img_meta_list, return_loss=False)\n\n register_extra_symbolics(opset_version)\n with torch.no_grad():\n torch.onnx.export(\n model, (img_list, ),\n output_file,\n export_params=True,\n keep_initializers_as_inputs=True,\n verbose=show,\n opset_version=opset_version)\n print(f'Successfully exported ONNX model: {output_file}')\n model.forward = origin_forward\n\n if verify:\n # check by onnx\n import onnx\n onnx_model = onnx.load(output_file)\n onnx.checker.check_model(onnx_model)\n\n # check the numerical value\n # get pytorch output\n pytorch_result = model(img_list, img_meta_list, return_loss=False)[0]\n\n # get onnx output\n input_all = [node.name for node in onnx_model.graph.input]\n input_initializer = [\n node.name for node in onnx_model.graph.initializer\n ]\n net_feed_input = list(set(input_all) - set(input_initializer))\n assert (len(net_feed_input) == 1)\n sess = rt.InferenceSession(output_file)\n onnx_result = sess.run(\n None, {net_feed_input[0]: img_list[0].detach().numpy()})[0]\n if not np.allclose(pytorch_result, onnx_result):\n raise ValueError(\n 'The outputs are different between Pytorch and ONNX')\n print('The outputs are same between Pytorch and ONNX')", "def map(self, app, node):", "def test_from_onnx(self):\n pytorch_model = PyTorchLinear()\n dummy_input = torch.empty(10, 10)\n\n with io.BytesIO() as f:\n f = onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)\n f.seek(0)\n\n crypten_model = onnx_converter.from_onnx(f)\n\n self.assertTrue(hasattr(crypten_model, \"encrypt\"))", "def test_rdf2nx(example_ns, SCHEMA, simple_rdf_graph):\n KNOWN_EDGE = (URIRef(example_ns.Protagonist), URIRef(example_ns.Antagonist))\n namespaces = {\"schema\": SCHEMA, \"ex\": example_ns, \"base\": example_ns}\n nx_graph = RDF2NX.convert(rdf_graph=simple_rdf_graph, namespaces=namespaces)\n\n try:\n protagonist = nx_graph.nodes[example_ns.Protagonist]\n except KeyError:\n raise KeyError(\"Protagonist node not found in fixture graph.\")\n\n p_height = protagonist.get(\"ex:height\", None)\n assert (\n type(p_height) == float\n ), \"XSD Datatype failed to map to python type correctly.\"\n\n p_type = type(protagonist.get(\"type\", None))\n assert not isinstance(\n p_type, type(None)\n ), f\"Failed to get type of node from node keys: {protagonist.keys()}\"\n assert p_type == URIRef, \"URIRef node attribute is not URI.\"\n\n assert KNOWN_EDGE in nx_graph.edges(data=False) and KNOWN_EDGE[\n ::-1\n ] in nx_graph.edges(data=False), \"Known relations missing in the networkx graph.\"\n\n # Run once more with rdf namespace and check type\n namespaces = {\"rdf\": RDF, **namespaces}\n nx_graph = RDF2NX.convert(rdf_graph=simple_rdf_graph, namespaces=namespaces)\n\n try:\n protagonist = nx_graph.nodes[example_ns.Protagonist]\n except KeyError:\n raise KeyError(\"Protagonist node not found in fixture graph.\")\n\n p_type = type(protagonist.get(\"rdf:type\", None))\n assert not isinstance(\n p_type, type(None)\n ), f\"Failed to get rdf:type of node from node keys: {protagonist.keys()}\"", "def get_ntype_featnames(ntype_name, schema_map):\n node_data = schema_map[constants.STR_NODE_DATA]\n feats = node_data.get(ntype_name, {})\n return [feat for feat in feats]", "def gen_nodes(modelfile, starting_genes):\n # read json file with final model variables\n shape, top_genes, weights, output_key, biases = read_json(modelfile)\n\n # initialize database\n database = db.Database()\n\n # create list to store all layers\n NN = []\n\n # get input probe sequences\n input_seqs_df = inputs.probes_df(top_genes)\n # each layer is a dictionary with keys as names of strands and values as a list of seqs\n l_0 = {}\n probe_seqs = []\n for probe in input_seqs_df[\"Probe Sequences\"]:\n index = 0\n size = database.size\n while database.size < size + 1:\n try:\n database.database_insert(Seq(probe[index]))\n index += 1\n # except block handles case that NONE of the probe sequences were accepted into the database\n # ***TEMPORARY FIX***\n except IndexError:\n index -= 1\n break\n probe_seqs.append(Seq(probe[index]))\n l_0[\"Probe Sequence\"] = probe_seqs\n print(\"Layer 0: \", l_0)\n NN.append(l_0)\n\n # add the tether and promotor to the database\n database.database_insert(starting_genes[\"Tether\"])\n database.database_insert(starting_genes[\"T7 Promoter\"])\n\n # generate all the sequences for every node in each layer\n for layer in range(1, len(shape)):\n # add the cage and tether sequences to the layer dictionary\n l_i = {}\n l_i[\"Cage Sense\"] = [starting_genes[\"Cage Sense\"]] * shape[layer]\n l_i[\"Cage Antisense\"] = [starting_genes[\"Cage Antisense\"]] * shape[layer]\n l_i[\"Tether\"] = [starting_genes[\"Tether\"]] * shape[layer]\n\n print(\"getting anchor strands\")\n tether_length = len(starting_genes[\"Tether\"])\n size = database.size\n # generate anchor strands until all of them have been accepted into the database\n while database.size < size + shape[layer]:\n anchor = oligo.oligo(tether_length)\n database.database_insert(anchor)\n anchor_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n print(\"getting transcription factors\")\n threshold_energy = 9 # variable that can be changed, pos integer, see gen_tf for description\n static_tf_seqs = []\n tf_seqs = []\n for anchor in anchor_seqs:\n static_tf, tf = gen_tf(anchor, starting_genes[\"Tether\"], threshold_energy)\n static_tf_seqs.append(static_tf)\n tf_seqs.append(tf)\n print(\"DONE\")\n\n print(\"getting outputs\")\n output_length = 25 # length of dna transcript from one node\n size = database.size\n while database.size < size + shape[layer]:\n output = oligo.oligo(output_length).sequence\n database.database_insert(output)\n transcript_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n # assemble longer strands in the node\n l_i[\"Static TF + Transcript Sense\"] = [static_tf_seqs[i] + starting_genes[\"T7 Promoter\"] + transcript_seqs[i]\n for i in range(shape[layer])]\n l_i[\"Transcript Antisense + Anchor\"] = [\n oligo.complement(transcript_seqs[i]) + oligo.complement(starting_genes[\"T7 Promoter\"]) + anchor_seqs[i] for\n i in range(shape[layer])]\n\n # intermediates are the strands that determine weights in toehold-mediated displacement\n print(\"getting intermediate\")\n toe_length = 20 # standard length for all toehold sequences\n # get the 2D matrix for this layer and round the values to one decimal place\n weight_matrix = np.array(weights[layer - 1])\n weight_matrix = np.round(weight_matrix, 1)\n intermediate_seqs = []\n tf_appendage_seqs = []\n for i in range(shape[layer - 1]):\n if layer == 1:\n output = NN[0][\"Probe Sequence\"][i]\n else:\n output = NN[layer - 1][\"Static TF + Transcript Sense\"][i][-output_length:]\n inters = []\n top_toe = output[:toe_length]\n b_dom = output[toe_length:]\n tf_appendage_seqs.append(b_dom)\n # get all the possible sequences for toehold weights between 0 and 1\n weight_dict = quant.find_quanta(top_toe)\n for j in range(shape[layer]):\n w = weight_matrix[j, i]\n tf = tf_seqs[j]\n a_star_tf = tf[:len(tf) // 2]\n if w < 0:\n # negative weights\n inters.append(a_star_tf + oligo.complement(b_dom) + weight_dict[w * -1])\n else:\n # positive weights\n inters.append(oligo.complement(a_star_tf) + oligo.complement(b_dom) + weight_dict[w])\n\n intermediate_seqs.append(inters)\n # each list in the nested list is for one node in the layer, get nodes row-wise\n l_i[\"Intermediate\"] = np.array(intermediate_seqs).T.tolist()\n print(\"DONE\")\n\n # TF and TF Inhibitor are products of toehold-mediated displacement for pos and neg weights, respectively\n full_tf_seqs_2D = []\n attack_seqs_2D = []\n for tf in tf_seqs:\n full_tf_seqs = []\n attack_seqs = []\n for appendage in tf_appendage_seqs:\n full_tf_seq = appendage + tf\n attack_seq = appendage + oligo.complement(tf[:len(tf) // 2])\n full_tf_seqs.append(full_tf_seq)\n attack_seqs.append(attack_seq)\n full_tf_seqs_2D.append(full_tf_seqs)\n attack_seqs_2D.append(attack_seqs)\n l_i[\"TF\"] = full_tf_seqs_2D\n l_i[\"TF Inhibitor\"] = attack_seqs_2D\n\n print(\"Layer {}: \".format(layer), l_i)\n # add the completed layer to the NN list\n NN.append(l_i)\n\n return NN", "def import_onnx(onnx_model: \"onnx.ModelProto\") -> Graph:\n from onnx_graphsurgeon.importers.onnx_importer import OnnxImporter\n\n return OnnxImporter.import_graph(onnx_model.graph, opset=OnnxImporter.get_opset(onnx_model))", "def list_known_phylogenetic_metrics():\r\n result = []\r\n for name in dir(qiime.beta_metrics):\r\n if name.startswith('dist_'):\r\n result.append(name[5:])\r\n result.sort()\r\n return result", "def test_get_hyperflex_capability_info_by_moid(self):\n pass", "def getOutputsNames(net):\r\n # Get the names of all the layers in the network\r\n layersNames = net.getLayerNames()\r\n # Get the names of the output layers, i.e. the layers with unconnected outputs\r\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]", "def getOutputsNames(net):\n # Get the names of all the layers in the network\n layersNames = net.getLayerNames()\n # Get the names of the output layers, i.e. the layers with unconnected outputs\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]", "def get_node_children_names(model, node):\n\n output_nodes = model.get_children(node)\n outputs = [node.name for node in output_nodes]\n return outputs", "def test_good_node():\n node_a = Node({'A':['B','C']})\n assert node_a.name == 'A'\n assert node_a.connections == ['B','C']", "def enumerate_model_node_outputs(model, add_node=False, order=False):\n if not hasattr(model, \"graph\"):\n raise TypeError( # pragma: no cover\n \"Parameter model is not an ONNX model but \"\n \"{}\".format(type(model)))\n if order:\n edges = []\n order = {}\n node_names = {}\n for inp in model.graph.input:\n order[0, inp.name] = 0\n for node in model.graph.node:\n order[1, node.name] = 0\n for i in node.input:\n edges.append(('in', i, node.name))\n for o in node.output:\n edges.append(('out', o, node.name))\n node_names[o] = node\n order[0, o] = 0\n\n modif = 1\n while modif > 0:\n modif = 0\n for kind, data_name, node_name in edges:\n if kind == 'in':\n if (0, data_name) not in order:\n continue\n if order[0, data_name] + 1 > order[1, node_name]:\n modif += 1\n order[1, node_name] = order[0, data_name] + 1\n else:\n if order[1, node_name] + 1 > order[0, data_name]:\n modif += 1\n order[0, data_name] = order[1, node_name] + 1\n\n orders = [(v, k) for k, v in order.items()]\n orders.sort()\n\n for _, k in orders:\n if k[0] == 1:\n continue\n out = k[1]\n if out not in node_names:\n continue\n yield (out, node_names[out]) if add_node else out\n else:\n for node in model.graph.node:\n for out in node.output:\n yield (out, node) if add_node else out", "def _nmap_discover_devices(ip_range):\n def map_host_ip(line):\n ip = re.match('.*\\s(\\d+\\.\\d+\\.\\d+\\.\\d+).*', line)\n if ip:\n return ip.groups()[0]\n\n nmap_lines = subprocess.check_output(['nmap', '-n', '-sn', ip_range]).split('\\n')\n return filter(None, map(map_host_ip, nmap_lines))", "def test_get_hyperflex_node_profile_list(self):\n pass", "def test_lookup(graph):\n node1 = graph.lookup(0)\n assert str(node1) == \"<1, 2>\"\n\n node2 = graph.lookup(3)\n assert str(node2) == \"<1, 2>\"\n\n node3 = graph.lookup(1)\n assert str(node3) == \"<0, 2, 3>\"", "def test_get_nodes(self):\n wp22_rdf_graph = parse_rdf(WP22)\n wp706_rdf_graph = parse_rdf(WP706)\n wp1871_rdf_graph = parse_rdf(WP1871)\n wp2799_rdf_graph = parse_rdf(WP2799)\n\n nodes_wp22 = _get_nodes(wp22_rdf_graph)\n nodes_wp706 = _get_nodes(wp706_rdf_graph)\n nodes_wp1871 = _get_nodes(wp1871_rdf_graph)\n nodes_wp2799 = _get_nodes(wp2799_rdf_graph)\n\n self.assertEqual(len(nodes_wp22), 17)\n self.assertEqual(len(nodes_wp706), 186)\n self.assertEqual(len(nodes_wp1871), 115)\n self.assertEqual(len(nodes_wp2799), 141)", "def __init__(self, graph,\n nn_module='MLP',\n nn_layers=1,\n nn_mid_units=128,\n nn_mid_acti=tf.tanh,\n nn_out_units=1,\n nn_out_acti=None,\n ignore_nodetype=True,\n name='node_fn'):\n self.graph = graph\n self.nn_module = nn_module\n self.nn_layers = nn_layers\n self.nn_mid_units = nn_mid_units\n self.nn_mid_acti = nn_mid_acti\n self.nn_out_units = nn_out_units\n self.nn_out_acti = nn_out_acti\n self.ignore_nodetype = ignore_nodetype\n self.name = name\n\n self.reuse = None", "def get_images_and_labels_nc():\n refs = get_ref_df()\n images = {}\n for _, data in refs.iterrows():\n if data['ProbeFileName'] in images:\n continue\n im = data['ProbeFileName']\n images[im] = 1 if data['IsTarget'] == 'Y' else 0\n return images", "def input_nodes(self):\n pass", "def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm", "def get_all_node_outputs(node: Node):\n return [port.node for port in get_node_output_ports(node)]", "def list_known_nonphylogenetic_metrics():\r\n result = []\r\n for name in dir(distance_transform):\r\n if name.startswith('dist_'):\r\n result.append(name[5:])\r\n elif name.startswith('binary_dist_'):\r\n result.append('binary_' + name[12:])\r\n result.sort()\r\n return result", "def extract_nodes(file_name, file_name_out):\n with open(file_name, 'r') as file_in:\n nodes = {} # dict of player and unique id\n uid = 1\n for line in file_in:\n fields = parse_line(line)\n player = format_name(fields[0])\n if player not in nodes:\n nodes[player] = uid\n uid += 1\n\n with open(file_name_out, 'w') as file_out:\n print('id,label', file=file_out)\n for player in nodes:\n print(nodes[player], player, sep=',', file=file_out)\n\n return nodes" ]
[ "0.75095046", "0.6466577", "0.64522165", "0.6451405", "0.6420626", "0.6097081", "0.5859226", "0.5858057", "0.5838287", "0.57737774", "0.5742301", "0.5723268", "0.56167114", "0.55878025", "0.558724", "0.55839914", "0.5539161", "0.5493296", "0.5454495", "0.5449514", "0.5327194", "0.5264711", "0.52570266", "0.52436924", "0.51749885", "0.5145236", "0.51336056", "0.51336056", "0.51066095", "0.51051134", "0.50850415", "0.5076991", "0.5061238", "0.505792", "0.50526273", "0.50503343", "0.50492567", "0.5042079", "0.50389487", "0.50361913", "0.5019495", "0.5011701", "0.50068843", "0.499908", "0.49922815", "0.49921542", "0.49815437", "0.49787292", "0.49696988", "0.49631137", "0.49542227", "0.49531898", "0.49433744", "0.49400622", "0.49377418", "0.49372053", "0.49359572", "0.49358192", "0.49338222", "0.49252012", "0.49233022", "0.49217293", "0.4917197", "0.49165234", "0.49019966", "0.48996252", "0.48948863", "0.48948342", "0.48918548", "0.48871967", "0.48811686", "0.48649096", "0.4861861", "0.4860263", "0.48597953", "0.48527786", "0.48490822", "0.4839701", "0.48396954", "0.4820827", "0.4816662", "0.48113033", "0.4809477", "0.48043376", "0.47752252", "0.4770181", "0.476899", "0.47645542", "0.47627917", "0.47624144", "0.47619027", "0.47511822", "0.47473028", "0.47462648", "0.47457024", "0.47447443", "0.4743682", "0.4738462", "0.47337204", "0.4732988" ]
0.69601613
1
Test that node names are set correctly for linear ops turned into matmul/add in onnx.
def test_set_node_name_for_matmul_add_linear(self, export_args): class Linear(torch.nn.Module): def __init__(self): super(Linear, self).__init__() self.linear = torch.nn.Linear(3, 2) def forward(self, inp): x = self.linear(inp) return x model = Linear() # Using an input to linear op with dimension != 2 causes torch to use matmul->add instead of gemm op onnx_path = './data/MyModel.onnx' onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 1, 3), onnx_export_args=copy.deepcopy(export_args)) onnx_model = onnx.load(onnx_path) expected_node_names = ['linear', 'linear#1.end'] actual_node_names = [node.name for node in onnx_model.graph.node] for name in expected_node_names: assert name in actual_node_names expected_param_names = ['linear.weight', 'linear.bias'] _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model) for name in expected_param_names: assert name in valid_param_set # Check that gemm still works as expected onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 3), onnx_export_args=copy.deepcopy(export_args)) onnx_model = onnx.load(onnx_path) actual_node_names = [node.name for node in onnx_model.graph.node] assert 'linear' in actual_node_names assert 'linear#1' not in actual_node_names expected_param_names = ['linear.weight', 'linear.bias'] _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model) for name in expected_param_names: assert name in valid_param_set self.check_onnx_node_name_uniqueness(onnx_model) if os.path.exists(onnx_path): os.remove(onnx_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_onnx_node_name_to_input_output_names_util(self):\n model = models.resnet18(pretrained=False)\n dummy_input = torch.randn(1, 3, 224, 224)\n torch.onnx.export(model, dummy_input, './data/resnet18.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/resnet18.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/resnet18.onnx')\n\n # Get Dict mapping node name to the input and output names\n node_to_io_dict,_ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n\n node_0 = onnx_model.graph.node[0]\n assert node_0.input == node_to_io_dict[node_0.name].inputs\n assert node_0.output == node_to_io_dict[node_0.name].outputs", "def test_non_leaf_module_names(self):\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = HierarchicalMultiplyModule()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n expected_names = [\n # names compatible with torch 1.9.1 version (should be removed in the future)\n 'layer.mul1.mul',\n 'layer.mul1.Mul_7',\n 'layer.mul2.mul',\n 'layer.mul2.Mul_15',\n 'layer.Mul_18',\n \n # names compatible with torch 1.13.1 version \n '/layer/mul1/Mul',\n '/layer/mul2/Mul',\n '/layer/Mul'\n ]\n for node in onnx_model.graph.node:\n assert 'Constant' in node.name or node.name in expected_names\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def test_set_unique_node_names(self):\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def check_onnx_node_name_uniqueness(onnx_model):\n onnx_node_names = [node.name for node in onnx_model.graph.node]\n assert len(onnx_node_names) == len(set(onnx_node_names)), f'list size mismatch, check if names are unique'", "def test_node_name(self):\n xmlns = {\n \"a\": \"_a\",\n \"g\": \"_g\"\n }\n self.assertEqual(\n utils._node_name(\"a\", \"g\", xmlns),\n (False, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"a@a\", \"g\", xmlns),\n (False, \"a\", \"{_a}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@@a\", \"g\", xmlns),\n (True, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@a@a\", \"g\", xmlns),\n (True, \"a\", \"{_a}a\")\n )\n # something not equal to _\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"1@a@a\", \"g\", xmlns),\n # too many @\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"@@a@a\", \"g\", xmlns),", "def test_valid_tensor_op_name_inputs(self, data, description):\n name_a, name_b = data\n self.assertEqual(name_a, name_b, msg=description)", "def test_create_named_input_edge(self):\n n1, n2 = Node(), Node()\n result = n1 | 'foo' * n2\n self.assertEqual(result, n2)\n self.assertEqual(n1.eout, [Edge(n1, n2, input_name='foo')])\n self.assertEqual(n2.ein, [Edge(n1, n2, input_name='foo')])", "def test_expansion(self, n_wires, expected_names, expected_wires):\n\n shapes = expected_shapes(1, n_wires)\n weights = [np.random.random(shape) for shape in shapes]\n\n op = qml.CVNeuralNetLayers(*weights, wires=range(n_wires))\n tape = op.expand()\n\n i = 0\n for gate in tape.operations:\n if gate.name != \"Interferometer\":\n assert gate.name == expected_names[i]\n assert gate.wires.labels == tuple(expected_wires[i])\n i = i + 1\n else:\n for gate_inter in gate.expand().operations:\n assert gate_inter.name == expected_names[i]\n assert gate_inter.wires.labels == tuple(expected_wires[i])\n i = i + 1", "def test_custom_wire_labels(self, tol):\n shapes = expected_shapes(1, 3)\n weights = [np.random.random(shape) for shape in shapes]\n\n dev = DummyDevice(wires=3)\n dev2 = DummyDevice(wires=[\"z\", \"a\", \"k\"])\n\n @qml.qnode(dev)\n def circuit():\n qml.CVNeuralNetLayers(*weights, wires=range(3))\n return qml.expval(qml.Identity(0))\n\n @qml.qnode(dev2)\n def circuit2():\n qml.CVNeuralNetLayers(*weights, wires=[\"z\", \"a\", \"k\"])\n return qml.expval(qml.Identity(\"z\"))\n\n circuit()\n circuit2()\n\n assert np.allclose(dev._state[0], dev2._state[0], atol=tol, rtol=0)\n assert np.allclose(dev._state[1], dev2._state[1], atol=tol, rtol=0)", "def test_add_00():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [\n info(\"A\", TensorProto.FLOAT, a_shape),\n info(\"B\", TensorProto.FLOAT, b_shape),\n ]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n b = np.random.rand(*b_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a, \"B\": b}, outputs).run()", "def set_node_label_bypass(node_names, new_labels, network=None, base_url=DEFAULT_BASE_URL):\n res = set_node_property_bypass(node_names, new_labels, 'NODE_LABEL', network=network, base_url=base_url)\n return res", "def __init__(self, graph,\n nn_module='MLP',\n nn_layers=1,\n nn_mid_units=128,\n nn_mid_acti=tf.tanh,\n nn_out_units=1,\n nn_out_acti=None,\n ignore_nodetype=True,\n name='node_fn'):\n self.graph = graph\n self.nn_module = nn_module\n self.nn_layers = nn_layers\n self.nn_mid_units = nn_mid_units\n self.nn_mid_acti = nn_mid_acti\n self.nn_out_units = nn_out_units\n self.nn_out_acti = nn_out_acti\n self.ignore_nodetype = ignore_nodetype\n self.name = name\n\n self.reuse = None", "def test_add_02():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_add_01():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (1, 1, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_add_03():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (3, 4)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_naming_for_model_with_deep_graph(self):\n\n model = models.resnet152(pretrained=False)\n dummy_input = torch.randn(1, 3, 224, 224)\n\n onnx_path= './data/' + model.__class__.__name__ + '.onnx'\n with onnx_simply(True):\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input,\n is_conditional=False, module_marker_map={})\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_names(onnx_model)\n\n counts = defaultdict(int)\n top_level_nodes = tuple(['conv1', 'bn1', 'relu', 'maxpool', 'avgpool', 'Flatten_', '/Flatten', 'fc'])\n for node in onnx_model.graph.node:\n if node.name.startswith(top_level_nodes):\n continue\n elif '.' in node.name:\n layer_name = '.'.join(node.name.split('#')[0].split('.')[:-1])\n counts[layer_name] += 1\n elif node.name.startswith('/'):\n layer_name = '.'.join(node.name.split('/')[1:-1])\n counts[layer_name] += 1\n\n for name, counts in counts.items():\n if 'downsample' in name:\n assert counts == 2\n else:\n print(name, counts)\n assert counts == 10\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def test_get_hyperflex_node_by_moid(self):\n pass", "def test_create_two_named_edges(self):\n n1, n2 = Node('a'), Node('b')\n result = n1 * 'foo' | 'bar' * n2\n self.assertEqual(result, n2)\n self.assertEqual(n1.eout, [Edge(n1, n2, 'foo', 'bar')])\n self.assertEqual(n2.ein, [Edge(n1, n2, 'foo', 'bar')])", "def test_node_bad_name(self):\n node_name = 1\n self.assertRaises(TypeError, Node, node_name, '1', '2', 'leaf')", "def test_add_node(num_mutations):\n net = WeightAgnosticNetwork(10, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n num_connections_pre, num_neurons_pre, num_layers_pre = get_network_stats(net)\n net.add_node()\n assert net.get_num_connections() == num_connections_pre + 1\n assert net.num_neurons == num_neurons_pre + 1\n assert len(net.neurons_in_layer) == num_layers_pre or len(\n net.neurons_in_layer) == num_layers_pre + 1", "def test_model_with_simple_rnn_layer_relu(self):\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Add an RNN layer with 12 internal units.\n x = tf.keras.layers.SimpleRNN(12, name='rnn0', activation='relu')(inputs)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax, name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./simple_rnn', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n # there should be only 4 connected graph ops, input, simpleRNN , Dense and Softmax\n self.assertEqual(4, len(conn_graph.get_all_ops()))\n simple_rnn_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'SimpleRNN':\n simple_rnn_detected = True\n inner_list = op.internal_ops\n self.assertEqual(49, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('rnn0/while/MatMul'))\n self.assertEqual('rnn0', op.name)\n self.assertTrue(simple_rnn_detected)\n\n # check for 2 MatMuls, 1 BiasAdd and an activation function in the inner op list\n valid_matmuls = []\n valid_bias_add = []\n valid_activation = []\n for op in inner_list:\n if op.type == 'MatMul' and op not in valid_matmuls:\n valid_matmuls.append(op)\n if op.type == 'BiasAdd' and op not in valid_bias_add:\n valid_bias_add.append(op)\n if op.type == 'Relu' and op not in valid_activation:\n valid_activation.append(op)\n\n self.assertEqual(2, len(valid_matmuls))\n self.assertEqual(1, len(valid_bias_add))\n self.assertEqual(1, len(valid_activation))", "def test_activation_names(num_mutations):\n net = WeightAgnosticNetwork(10, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n assert len(net.activation_names) == net.num_neurons", "def _common_singa_tensor_to_onnx_node(cls, op, op_t):\n node_def = NodeProto()\n node_def.name = op.name\n\n optype = cls._get_singa_op_type(op)\n node_def.op_type = cls._rename_operators.get(optype, optype)\n\n inputs, outputs = cls._get_singa_op_inputs_outputs(op)\n node_def.input.extend(inputs)\n node_def.output.extend(outputs)\n\n return node_def", "def check_nodes(self) -> bool:\n # check the input-output consistency\n for op_name in self.__ops:\n op = cast(Operator, self.__ops[op_name])\n inputs: Dict[str, Operator] = op.input_ops\n for i in inputs.values():\n if op not in i.output_op_list:\n return False\n\n return True", "def test_good_node():\n node_a = Node({'A':['B','C']})\n assert node_a.name == 'A'\n assert node_a.connections == ['B','C']", "def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )", "def listNodesWithIncorrectNames(*args, **kwargs)->None:\n pass", "def test_custom_relu_mnist():\n loss1 = mnist()\n loss2 = custom_mnist()\n assert np.allclose(loss1, loss2, equal_nan=True)", "def test_model_with_simple_rnn_layer(self):\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Add an RNN layer with 12 internal units.\n x = tf.keras.layers.SimpleRNN(12, name='rnn0')(inputs)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./simple_rnn', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n # there should be only 4 connected graph ops, input, simpleRNN , Dense and Softmax\n self.assertEqual(4, len(conn_graph.get_all_ops()))\n simple_rnn_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'SimpleRNN':\n simple_rnn_detected = True\n inner_list = op.internal_ops\n self.assertEqual(49, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('rnn0/while/MatMul'))\n self.assertEqual('rnn0', op.name)\n self.assertTrue(simple_rnn_detected)\n\n # check for 2 MatMuls, 1 BiasAdd and an activation function in the inner op list\n valid_matmuls = []\n valid_bias_add = []\n valid_activation = []\n for op in inner_list:\n if op.type == 'MatMul' and op not in valid_matmuls:\n valid_matmuls.append(op)\n if op.type == 'BiasAdd' and op not in valid_bias_add:\n valid_bias_add.append(op)\n if op.type == 'Tanh' and op not in valid_activation:\n valid_activation.append(op)\n\n self.assertEqual(2, len(valid_matmuls))\n self.assertEqual(1, len(valid_bias_add))\n self.assertEqual(1, len(valid_activation))", "def test_nodes(self):\r\n self.assertTrue(isinstance(self.node1, Node))\r\n self.assertEqual(len(self.net.node), 3)\r\n if (isinstance(self.net.environment, Environment2D)):\r\n self.assertEqual(self.net.environment.im.shape,\r\n settings.ENVIRONMENT2D_SHAPE,\r\n 'incorrect default size')\r\n self.assertTrue(isinstance(self.net.channelType, ChannelType))", "def _common_onnx_node_to_singa_op(cls, onnx_node, inputs, opset_version):\n onnx_op_type = onnx_node.op_type\n assert onnx_op_type in cls._rename_operators, \"not support operator: {}\".format(\n onnx_op_type)\n autograd_op = getattr(autograd, cls._rename_operators[onnx_op_type])\n return None, autograd_op", "def test_graph_adds_and_lists_nodes(graph_no_edges):\n listy = ['BB', 82, 99, 'AA']\n for node in listy:\n assert node in graph_no_edges.nodes()", "def _get_input_output_node_names(nodes):\n input_names, output_names = set(), set()\n extension_output_names = set()\n for node in nodes:\n tf_node = node if isinstance(node,\n TensorflowNode) else TensorflowNode(node)\n output_names.add(node.name)\n # Add outputs for Split, Switch TensorArrayV3\n if tf_node.op_type == \"Split\":\n for i in range(1, tf_node.attr[\"num_split\"]):\n output_names.add(tf_node.name + \":{}\".format(i))\n if tf_node.op_type == \"Switch\":\n output_names.add(tf_node.name + \":1\")\n extension_output_names.add((tf_node.name, tf_node.name + \":1\"))\n if tf_node.op_type == \"TensorArrayV3\":\n output_names.add(tf_node.name + \":1\")\n extension_output_names.add((tf_node.name, tf_node.name + \":1\"))\n input_names.update(\n set([inp if inp[0] != \"^\" else inp[1:] for inp in tf_node.inputs]))\n inputs = input_names - output_names\n outputs = output_names - input_names\n while extension_output_names:\n ext_names = extension_output_names.pop()\n for name in ext_names:\n if name in outputs:\n outputs -= set(ext_names)\n break\n inputs.discard(None)\n return list(inputs), list(outputs)", "def test_matmul_vm(self):\n self.check_dot_vm(matmul_usecase, None, \"'@'\")", "def add_node(self, name):\n if name not in self._name:\n self._name.append(name)\n while(len(self._name) > len(self._adjmatrix)):\n self._adjmatrix.append([])\n for single_list in self._adjmatrix:\n while(len(self._name) > len(single_list)):\n single_list.append(False)", "def test_structure(self):\n mn = LogLinearModel()\n\n mn.set_unary_factor(0, np.random.randn(4))\n mn.set_unary_factor(1, np.random.randn(3))\n mn.set_unary_factor(2, np.random.randn(5))\n\n mn.set_edge_factor((0, 1), np.random.randn(4, 3))\n mn.set_edge_factor((1, 2), np.random.randn(3, 5))\n\n print(\"Neighbors of 0: \" + repr(mn.get_neighbors(0)))\n print(\"Neighbors of 1: \" + repr(mn.get_neighbors(1)))\n print(\"Neighbors of 2: \" + repr(mn.get_neighbors(2)))\n\n assert mn.get_neighbors(0) == set([1]), \"Neighbors are wrong\"\n assert mn.get_neighbors(1) == set([0, 2]), \"Neighbors are wrong\"\n assert mn.get_neighbors(2) == set([1]), \"Neighbors are wrong\"", "def test_names(self):\n for m in self.masks:\n for name in m.names():\n self.assertEqual(m.mask(name), 2**m.bitnum(name), 'Failed matching mask to bitnum for '+name)\n self.assertEqual(m.mask(name), m.mask(m.bitnum(name)), 'Failed matching mask to name for '+name)\n self.assertEqual(m.bitname(m.bitnum(name)), name, 'Failed bit name->num->name roundtrip for '+name)\n c = m.comment(name)", "def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\r\n new_node = Op.__call__(self)\r\n new_node.matmul_attr_trans_A = trans_A\r\n new_node.matmul_attr_trans_B = trans_B\r\n new_node.inputs = [node_A, node_B]\r\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\r\n return new_node", "def from_onnx(self, graph):\n # parse network inputs, aka parameters\n for init_tensor in graph.initializer:\n if not init_tensor.name.strip():\n raise ValueError(\"Tensor's name is required.\")\n self._params[init_tensor.name] = self._parse_array(init_tensor)\n\n # converting GraphProto message\n for i in graph.input:\n if i.name in self._params:\n # i is a param instead of input\n name_param = 'param_{}'.format(self._num_param)\n self._num_param += 1\n self._params[name_param] = self._params.pop(i.name)\n self._nodes[name_param] = mx.sym.Variable(name=name_param,\n shape=self._params[name_param].shape)\n self._renames[i.name] = name_param\n else:\n name_input = 'input_{}'.format(self._num_input)\n self._num_input += 1\n self._nodes[name_input] = mx.sym.Variable(name=name_input)\n self._renames[i.name] = name_input\n\n # constructing nodes, nodes are stored as directed acyclic graph\n # converting NodeProto message\n for node in graph.node:\n op_name = node.op_type\n node_name = node.name.strip()\n node_name = node_name if node_name else None\n onnx_attr = self._parse_attr(node.attribute)\n new_op, mx_attr = _convert_operator(op_name, onnx_attr)\n inputs = [self._nodes[self._renames.get(i, i)] for i in node.input]\n\n # some workarounds for onnx problem\n mx_attr = self._fix_bias(new_op, mx_attr, len(inputs))\n mx_attr = self._fix_channels(new_op, mx_attr, list(node.input))\n self._fix_bias_shape(node.op_type, node.input, onnx_attr)\n\n # calling again to get new symbols after some workarounds\n inputs = [self._nodes[self._renames.get(i, i)] for i in node.input]\n\n # onnx's Gemm operator also supports broadcasting C input which\n # mxnet's equivalent linalg_gemm doesn't. So using combination of\n # transpose and FullyConnected operators.\n if op_name == 'Gemm':\n new_op, inputs, mx_attr = self._fix_gemm('FullyConnected', inputs, onnx_attr)\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(inputs, mx_attr)\n elif op_name == 'AveragePool' and onnx_attr.get('pads') is not None or \\\n op_name == 'MaxPool' and onnx_attr.get('pads') is not None:\n op = self._fix_pooling(op_name, inputs, onnx_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(inputs, mx_attr)\n else:\n op = new_op(name=node_name, *inputs, **mx_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n\n assert len(node_output) == len(op.list_outputs()), (\n \"Number of output mismatch {} vs {} in {}.\".format(\n len(node_output), len(op.list_outputs()), op_name))\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n # now return the outputs\n out = [self._nodes[i.name] for i in graph.output]\n if len(out) > 1:\n out = mx.sym.Group(out)\n else:\n out = out[0]\n return out, self._params", "def test_single_pytorch_module_mapping_to_many_onnx_nodes(self):\n\n AimetLogger.set_level_for_all_areas(logging.DEBUG)\n\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model_name = 'multilayer_lstm'\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n\n torch.onnx.export(model, dummy_input, './data/' + model_name + '.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/' + model_name + '.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/' + model_name + '.onnx')\n\n lstm_nodes = [node for node in onnx_model.graph.node if node.op_type == 'LSTM']\n assert 3 == len(lstm_nodes)\n\n node_to_io_dict, _ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n assert isinstance(node_to_io_dict['lstm#root_node'], list)\n assert 3 == len(node_to_io_dict['lstm#root_node'])", "def test_nxgraph(self):\n self._build_sample_graph()\n # Adding singleton\n sg = self.skill_graph.add(Skill.build('g', ''))\n skill_map = SkillMap.load(self.course)\n nxgraph = SkillMapMetrics(skill_map).nxgraph\n self.assertIsInstance(nxgraph, DiGraph)\n successors = skill_map.build_successors()\n # Check nodes\n self.assertEqual(len(nxgraph), len(successors))\n for skill in successors:\n self.assertIn(skill, nxgraph.nodes(),\n msg='Node {} not found in nx graph.'.format(skill))\n # Check edges\n original_edges = sum(len(dst) for dst in successors.values())\n self.assertEqual(len(nxgraph.edges()), original_edges)\n for src, dst in nxgraph.edges_iter():\n self.assertIn(src, successors)\n self.assertIn(dst, successors[src],\n msg='Extra {},{} edge in nx graph.'.format(src, dst))", "def node_name(self) -> str:\n op_name = f\"{self.name.name}_{self.name.overload_name}\".lower()\n return \"\".join(word.capitalize() or \"\" for word in op_name.split(\"_\"))", "def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors", "def test_invalid_tensor_name_inputs_with_wrong_types(self, data, description):\n with self.assertRaises(TypeError, msg=description):\n tfx.tensor_name(data)", "def __call__(self, node_A, node_B, trans_A=False, trans_B=False):\n new_node = Op.__call__(self)\n new_node.matmul_attr_trans_A = trans_A\n new_node.matmul_attr_trans_B = trans_B\n new_node.inputs = [node_A, node_B]\n new_node.name = \"MatMul(%s,%s,%s,%s)\" % (node_A.name, node_B.name, str(trans_A), str(trans_B))\n return new_node", "def test_create_named_output_edge(self):\n n1, n2 = Node('a'), Node('b')\n result = n1 * 'foo' | n2\n self.assertEqual(result, n2)\n self.assertEqual(n1.eout, [Edge(n1, n2, 'foo')])\n self.assertEqual(n1.ein, [])\n self.assertEqual(n2.ein, [Edge(n1, n2, 'foo')])\n self.assertEqual(n2.eout, [])", "def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_vals) == 2\r\n if node.matmul_attr_trans_A :\r\n input_vals[0] = input_vals[0].T\r\n if node.matmul_attr_trans_B :\r\n input_vals[1] = input_vals[1].T\r\n return np.matmul(input_vals[0] , input_vals[1])", "def add_node(self, name, edges):\n self.nodes[name] = Node(edges)", "def test_reduce_mean_00():\n\n class ReduceMeanTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"ReduceMean\", inputs=[\"v0\"], outputs=[\"v1\"])\n inputs = [info(\"v0\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"v1\", TensorProto.FLOAT, (1, 1, 1, 1))]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n v0 = np.random.rand(1, 3, 4, 5).astype(np.float32)\n\n outputs = [\"v1\"]\n ReduceMeanTester({\"v0\": v0}, outputs).run()", "def _add_label_switching_node( self,\n node_tree,\n label_vec,\n last_element,\n label_ID_node=None,\n node_index=0,\n uv_map=None, \n node_offset=[0,0]):\n\n # define local variables #######################################################################################\n _step_node_width = 200 # x seperation of nodes\n _step_node_height = 200 # y seperation of nodes\n ################################################################################ end of define local variables #\n\n # create image ID handle #######################################################################################\n if label_ID_node is None:\n label_ID_node = node_tree.node_tree.nodes.new(\"ShaderNodeValue\")\n label_ID_node.location = ((node_offset[0]-400,node_offset[1]-100))\n label_ID_node.name = \"label_step_ID\"\n label_ID_node.label = \"label_step_ID\"\n label_ID_node.outputs[0].default_value = 1\n ############################################################################### end of create image ID handle #\n\n # create image nodes ###########################################################################################\n _x_offset = (node_index+1)*_step_node_width + node_offset[0]\n _y_offset = (node_index+1)*_step_node_height + node_offset[1]\n\n _semantic_node_offset = [(node_index+1)*_step_node_width*2 + node_offset[0]-1000,(node_index+1)*\\\n _step_node_height + node_offset[1]+200]\n\n _semantic_tree, self._semantic_pass_id = self.create_semantic_nodes(node_tree=self._world_node_tree,\n label_ID_vec=label_vec,\n num_label_per_channel=15, # TODO add in script\n env_mode=True,\n uv_map=uv_map,\n node_offset=_semantic_node_offset)\n\n _semantic_tree.inputs[0].default_value = 1\n\n # create new mix node ######################################################################################\n _current_mix_shader_node = node_tree.node_tree.nodes.new(\"ShaderNodeMixRGB\")\n _current_mix_shader_node.location = (((node_index+1)*_step_node_width*2 + node_offset[0],\n (node_index+1)*_step_node_height + node_offset[1]))\n ############################################################################### end of create new mix node #\n\n # create compare node ######################################################################################\n _current_compare_node = node_tree.node_tree.nodes.new(\"ShaderNodeMath\")\n _current_compare_node.location = (((node_index+1)*_step_node_width*2 + node_offset[0],\n node_offset[1]-_step_node_height))\n _current_compare_node.operation = 'COMPARE'\n _current_compare_node.inputs[0].default_value = node_index\n _current_compare_node.inputs[2].default_value = 0 # delta value should be zero for equal comparison\n ############################################################################### end of create compare node #\n\n\n # link nodes togther #######################################################################################\n node_tree.node_tree.links.new(_current_mix_shader_node.inputs[0], _current_compare_node.outputs[0])\n if last_element is not None:\n node_tree.node_tree.links.new(_current_mix_shader_node.inputs[1], last_element.outputs[0])\n node_tree.node_tree.links.new(_current_mix_shader_node.inputs[2], _semantic_tree.outputs[0])\n \n node_tree.node_tree.links.new(_current_compare_node.inputs[1], label_ID_node.outputs[0])\n ################################################################################ end of link nodes togther #\n #################################################################################### end of create image nodes #\n\n # return last mix shader node\n return _current_mix_shader_node, label_ID_node", "def test_error_node():\n try:\n node_a = Node({'a':'a'})\n except Exception as e:\n assert str(e) == 'input connected nodes info is not in a list.'", "def convert_linalg_gemm2(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Getting the attributes and assigning default values.\n alpha = float(attrs.get(\"alpha\", 1.0))\n trans_a = get_boolean_attribute_value(attrs, \"transpose_a\")\n trans_b = get_boolean_attribute_value(attrs, \"transpose_b\")\n\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n\n if alpha == 1.0 and trans_a == 0 and trans_b == 0:\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n return [matmul_node]\n elif trans_a == 1 and trans_b == 0:\n op_name = \"transpose\" + str(kwargs[\"idx\"])\n node_name = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[node_name, input_nodes[1]],\n outputs=[name],\n name=name\n )\n return [trans_a_node, matmul_node]\n\n elif trans_a == 0 and trans_b == 1:\n node_name = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=[input_nodes[0], node_name],\n outputs=[name],\n name=name\n )\n\n return [trans_b_node, matmul_node]\n else:\n node_name_a = op_name+\"_a\"\n trans_a_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[0]],\n outputs=[op_name+\"_a\"],\n name=node_name_a\n )\n\n node_name_b = op_name + \"_b\"\n trans_b_node = onnx.helper.make_node(\n 'Transpose',\n inputs=[input_nodes[1]],\n outputs=[op_name+\"_b\"],\n name=node_name_b\n )\n\n matmul_node = onnx.helper.make_node(\n 'MatMul',\n inputs=input_nodes,\n outputs=[name],\n name=name\n )\n\n return [trans_a_node, trans_b_node, matmul_node]", "def test_label(self):\n try:\n t = self.OntTerm(label='diffuse')\n raise AssertionError(f'should fail {t!r}')\n except TypeError:\n pass", "def __init__(self, a_node, b_node, name=None):\n BinaryMatrixOp.__init__(self, a_node, b_node, name)", "def test_duplicate_named_input_edge(self):\n with self.assertRaises(ValidationError):\n with Graph('g'):\n n1, n2 = Node('a'), Node('b')\n n1 | 'bar' * n2\n n1 * 'foo' | 'bar' * n2", "def check_name(self, node):\n assert \"name\" in node, \"Package node does not contain attribute 'node'\"\n assert len(node[\"name\"]) >= 1, \"Expecting at least one 'name' value\"\n # TODO: add more thorough checks", "def graph_node_names_details(model):\n\n node_details = namedtuple('node_details', ['node', 'outputs'])\n node_names_details = {}\n for initializer in model.initializer():\n initializer_name = initializer.name\n each_node = node_details(node=initializer, outputs=[])\n if initializer_name not in node_names_details:\n each_node.outputs.extend(get_initializer_children_names(model, initializer))\n node_names_details[initializer_name] = each_node\n for node in model.nodes():\n node_name = node.name\n output_names = node.output\n # onnx output has different name from node name\n for output_name in output_names:\n if output_name not in node_names_details:\n node_names_details[output_name] = node_name\n each_node = node_details(node=node, outputs=[])\n if node_name not in node_names_details:\n each_node.outputs.extend(get_node_children_names(model, node))\n node_names_details[node_name] = each_node\n for graph_input in model.graph().input:\n outputs = []\n node_name = graph_input.name\n for k, v in node_names_details.items():\n try:\n if node_name in v.node.input:\n outputs.append(k)\n except BaseException:\n continue\n each_node = node_details(node=graph_input, outputs=outputs)\n # if node_name not in node_names_details:\n node_names_details[node_name] = each_node\n\n return node_names_details", "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node", "def mutate_topology_func(op_names):\n def mutate_topology_func(parent_arch):\n child_arch = deepcopy( parent_arch )\n node_id = random.randint(0, len(child_arch.nodes)-1)\n node_info = list( child_arch.nodes[node_id] )\n snode_id = random.randint(0, len(node_info)-1)\n xop = random.choice( op_names )\n while xop == node_info[snode_id][0]:\n xop = random.choice( op_names )\n node_info[snode_id] = (xop, node_info[snode_id][1])\n child_arch.nodes[node_id] = tuple( node_info )\n return child_arch\n return mutate_topology_func", "def test_reduce_mean_02():\n\n class ReduceMeanTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"ReduceMean\", inputs=[\"v0\"], outputs=[\"v1\"], axes=[1, 2])\n inputs = [info(\"v0\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"v1\", TensorProto.FLOAT, [1, 1, 1, 5])]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n v0 = np.random.rand(1, 3, 4, 5).astype(np.float32)\n\n outputs = [\"v1\"]\n ReduceMeanTester({\"v0\": v0}, outputs).run()", "def test_data_naming():\n with pm.Model(\"named_model\") as model:\n x = pm.ConstantData(\"x\", [1.0, 2.0, 3.0])\n y = pm.Normal(\"y\")\n assert y.name == \"named_model::y\"\n assert x.name == \"named_model::x\"", "def test_instance_naming_creation(os_info):\n NEUTRON.list_security_groups = mock.MagicMock(\n return_value=iter([{\"security_groups\": []}]))\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n\n instance_names = os_info.nodes_names\n for i in range(len(instance_names)):\n assert instance_names[i] == 'test-node-{}'.format(i + 1)", "def _on_node_name_changed(self, oldname, newname):\n if newname in self._nodes and self._nodes[oldname] != self._nodes[newname]:\n raise Exception(\"New name collides with existing node.\")\n node = self._nodes[oldname]\n self._nodes[newname] = node\n del self.__nodes[oldname]\n self.node_name_changed.emit(oldname, newname)", "def add_node(self, name, node):\n\n self.nodes[name] = fold_constant(node)", "def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()", "def test_set_node_properties(self):\n\n pass", "def __init__(self, graph,\n nn_module='MLP',\n nn_layers=1,\n nn_mid_units=128,\n nn_mid_acti=tf.tanh,\n nn_out_units=1,\n nn_out_acti=None,\n ignore_receiver=False,\n ignore_edgetype=False,\n use_interacted_feature=False,\n name='edge_fn'):\n self.graph = graph\n self.nn_module = nn_module\n self.nn_layers = nn_layers\n self.nn_mid_units = nn_mid_units\n self.nn_mid_acti = nn_mid_acti\n self.nn_out_units = nn_out_units\n self.nn_out_acti = nn_out_acti\n self.ignore_receiver = ignore_receiver\n self.ignore_edgetype = ignore_edgetype\n self.use_interacted_feature = use_interacted_feature\n self.name = name\n\n self.reuse = None", "def test_add_network(self):\n pass", "def test_model_with_basic_lstm_layer(self):\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Add an RNN layer with 12 internal units.\n x = tf.keras.layers.LSTM(12, name='lstm0')(inputs)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./lstm', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n # there should be only 4 connected graph ops, input, LSTM , Dense and Softmax\n self.assertEqual(4, len(conn_graph.get_all_ops()))\n lstm_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'LSTM':\n lstm_detected = True\n inner_list = op.internal_ops\n self.assertEqual(86, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('lstm0/while/MatMul'))\n self.assertEqual('lstm0', op.name)\n self.assertTrue(lstm_detected)\n\n valid_matmuls = []\n valid_muls = []\n valid_bias_add = []\n valid_activation = []\n for op in inner_list:\n if op.type == 'MatMul' and op not in valid_matmuls:\n valid_matmuls.append(op)\n if op.type == 'Mul' and op not in valid_matmuls:\n valid_muls.append(op)\n if op.type == 'BiasAdd' and op not in valid_bias_add:\n valid_bias_add.append(op)\n if op.type == 'Tanh' and op not in valid_activation:\n valid_activation.append(op)\n\n self.assertEqual(8, len(valid_matmuls))\n self.assertEqual(7, len(valid_muls))\n self.assertEqual(4, len(valid_bias_add))\n self.assertEqual(2, len(valid_activation))", "def test_valid_tensor_op_object_inputs(self, data, description):\n tfobj_or_name_a, tfobj_or_name_b = data\n self.assertEqual(tfobj_or_name_a, tfobj_or_name_b, msg=description)", "def test_literals(self):\n self.graph.add((artis, RDFS.label, artis_label))\n self.graph.add((artis, EX['date'], date_literal))\n self.graph.add((artis, EX['bool'], bool_literal))\n self.graph.add((artis, EX['number'], number_literal))\n self.assertEquals(len(self.graph), 4)\n\n self.assertEquals(self.graph.value(artis, RDFS.label), artis_label)\n self.assertEquals(self.graph.value(artis, EX['date']), date_literal)\n self.assertEquals(self.graph.value(artis, EX['bool']), bool_literal)\n self.assertEquals(self.graph.value(artis, EX['number']), number_literal)", "def test_model_with_lstm_layer_sigmoid(self):\n\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Add an RNN layer with 12 internal units.\n x = tf.keras.layers.LSTM(12, recurrent_activation='sigmoid', name='lstm0')(inputs)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./lstm_sigmoid', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n # there should be only 4 connected graph ops, input, LSTM , Dense and Softmax\n self.assertEqual(4, len(conn_graph.get_all_ops()))\n lstm_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'LSTM':\n lstm_detected = True\n inner_list = op.internal_ops\n self.assertEqual(77, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('lstm0/while/MatMul'))\n self.assertEqual('lstm0', op.name)\n self.assertTrue(lstm_detected)\n\n valid_matmuls = []\n valid_muls = []\n valid_bias_add = []\n valid_activation = []\n for op in inner_list:\n if op.type == 'MatMul' and op not in valid_matmuls:\n valid_matmuls.append(op)\n if op.type == 'Mul' and op not in valid_matmuls:\n valid_muls.append(op)\n if op.type == 'BiasAdd' and op not in valid_bias_add:\n valid_bias_add.append(op)\n if op.type == 'Sigmoid' and op not in valid_activation:\n valid_activation.append(op)\n\n self.assertEqual(8, len(valid_matmuls))\n self.assertEqual(4, len(valid_muls))\n self.assertEqual(4, len(valid_bias_add))\n self.assertEqual(3, len(valid_activation))", "def test_mul():\n # Test for multiplication with scalar Rnode object and float value\n x = Rnode(0.11)\n y = Rnode(0.5)\n z = x * y\n\n try:\n assert z.value == x.value * y.value\n # assert x.grad() == sum(weight * var.grad()\n # for weight, var in x.children)\n except AssertionError as e:\n print(e)\n # Test for subtraction with scalar Rnode object and float value\n x = Rnode(0.5)\n z = x * 0.1\n try:\n assert z.value == x.value * 0.1\n # assert x.grad() == sum(weight * var.grad()\n # for weight, var in x.children)\n except AssertionError as e:\n print(e)", "def nonlinearity(nl):\n if nl == \"relu\":\n return tf.nn.relu\n elif nl == \"tanh\":\n return tf.nn.tanh\n elif nl == \"linear\" or nl == \"none\":\n return lambda x: x", "def test_addOutput(self):\n print(\"\\nTest 2: Adding OutputNode\")\n builder = StaticBuilder()\n builder.addInput(10, name=\"In\")\n builder.addInner(3, name=\"Det\")\n o_name = builder.addOutput(name=\"Out\")\n \n o1 = builder.nodes[o_name]\n print(\"\\nNode keys in builder:\", list(builder.nodes.keys()))\n print(\"This node's key:\", o_name)\n self.assertEqual(o1.label, 2, \"The label has not been assigned correctly\")\n self.assertEqual(builder.num_nodes, 3, \"The number of nodes has not been \"\n \"assigned correctly\")\n self.assertEqual(o1.num_declared_outputs, 0, \"The number of outputs of the \"\n \"OutputNode has not been assigned correctly\")\n self.assertEqual(o1.num_declared_inputs, 0, \"The number of inputs of the \"\n \"OutputNode has not been assigned correctly\")", "def __init__(self, nodeLabels: tuple):\n super().__init__(DEFAULT_MODEL)\n pass", "def set_node_shape_bypass(node_names, new_shapes, network=None, base_url=DEFAULT_BASE_URL):\n if not isinstance(new_shapes, list): new_shapes = [new_shapes] # TODO: It looks like this should be happening everywhere?\n\n if len(node_names) != len(new_shapes) and len(new_shapes) != 1:\n error = 'error in set_node_shape_bypass(). new_shapes count ' + str(len(new_shapes)) + ' is neither 1 nor same as node_names count ' + str(len(node_names))\n sys.stderr.write(error)\n return None # Should this be an exception?\n\n # convert old to new node shapes\n # TODO: Why isn't this done on other shape functions?\n new_shapes = ['ROUND_RECTANGLE' if shape == 'round_rect' else shape for shape in new_shapes]\n new_shapes = ['RECTANGLE' if shape == 'rect' else shape.upper() for shape in new_shapes]\n\n # ensure valid node shapes\n valid_shapes = styles.get_node_shapes(base_url=base_url)\n for shape in new_shapes:\n if not shape in valid_shapes:\n error = 'ERROR in set_node_shape_bypass(). ' + shape + ' is not a valid shape. Please note that some older shapes are no longer available. For valid ones check get_node_shapes().'\n sys.stderr.write(error)\n return None # Should this be an exception?\n\n res = set_node_property_bypass(node_names, new_shapes, 'NODE_SHAPE', network=network, base_url=base_url)\n\n return res", "def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Oneslike(%s)\" % node_A.name\n return new_node", "def test_sym_axes_label_left(tensor4):\n sym_axes = mechkit.operators.Sym(axes=[0, 1])(tensor4)\n sym_label = Sym_Fourth_Order_Special(label=\"left\")(tensor4)\n\n print(sym_axes)\n print(sym_label)\n\n assert np.allclose(sym_axes, sym_label)", "def test_reduce_mean_01():\n\n class ReduceMeanTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"ReduceMean\", inputs=[\"v0\"], outputs=[\"v1\"], keepdims=0)\n inputs = [info(\"v0\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"v1\", TensorProto.FLOAT, [])] # the shape is scalar\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n v0 = np.random.rand(1, 3, 4, 5).astype(np.float32)\n\n outputs = [\"v1\"]\n ReduceMeanTester({\"v0\": v0}, outputs).run()", "def generate_mxp_graph(model_name, activations, stats, first_node_name, last_node_name, io_info,\n input_type, ignore_strides=False, inline_depthwise=False, verbose=False):\n network = {}\n network['layers'] = []\n network['test_input'] = None\n network['test_output'] = None\n network['scale'] = 1.0\n\n model = onnx.load(model_name)\n nodes = model.graph.node\n inits = model.graph.initializer\n\n idx = get_node_index(nodes, first_node_name)\n if idx == None:\n if verbose:\n print('{} does not exist\\nopen {} in Netron + check spelling'.format(first_node_name, mname))\n assert(idx != None)\n\n last_idx = get_node_index(nodes, last_node_name)\n if last_idx == None:\n if verbose:\n print('{} does not exist\\nopen {} in Netron + check spelling'.format(last_node_name, mname))\n assert(last_idx != None)\n\n while True:\n node = nodes[idx]\n if verbose:\n print(node.name, node.op_type)\n src_node = get_node_source(nodes, node.input[0])\n if src_node == None:\n input_id = node.input[0]\n else:\n input_id = src_node.output[0]\n output_id = node.output[0]\n\n\n if len(network['layers']) == 0:\n previous = None\n else:\n previous = network['layers'][-1]\n for layer in network['layers']:\n if layer['output_id'] == input_id:\n previous = layer\n\n input_shapes, output_shapes = get_shapes(activations, stats, node)\n assert len(output_shapes) == 1, \"Multi-output nodes not supported\"\n output_shape = output_shapes[0]\n if node.op_type == \"Conv\":\n c, m, n = input_shapes[0]\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n assert(get_attr(node, 'pads') == None or not any(get_attr(node, 'pads')))\n\n group = get_attr(node, 'group')\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n dilations = np.asarray(get_attr(node, 'dilations')).tolist()\n if not group:\n group = 1\n if not strides:\n strides = [1, 1]\n if not dilations:\n dilations = [1, 1]\n\n use_strided = 0\n assert(strides == [1, 1] or strides == [2, 2] or strides == [4, 4])\n\n if DO_STRIDES and not ignore_strides:\n if (strides[0] > 1 or strides[1] > 1) and group == 1: # TODO handle depthwise as well\n assert(previous['output_size'] == int(np.prod(input_shapes[0])))\n use_strided = 1\n previous['output_strides'] = strides\n if verbose:\n print('adding output strides to previous node')\n\n m = m + (m % strides[0])\n n = n + (n % strides[1])\n if int(np.prod(input_shapes[0])) != int(c*m*n):\n if verbose:\n print('adjusting size for strided maps')\n previous['output_size'] = int(c*4*m//strides[0]*n//strides[1])\n previous['output_shape'] = (c*4,m//strides[0],n//strides[1])\n\n w = get_tensor(inits, node.input[1])\n kernels, channels, _, _ = w.shape\n if len(node.input) == 3:\n b = get_tensor(inits, node.input[2])\n\n conv_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(c*m*n),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels * group,\n 'kernels': kernels,\n 'kernel_shape': kernel_shape,\n 'dilations': dilations,\n 'strides': strides,\n 'group': group,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'use_cvi': 0,\n 'use_depthwise': 0,\n 'use_strided': use_strided,\n \"biases\": [],\n \"weights\": [],\n \"sublayers\": [],\n }\n\n w = w.flatten().tolist()\n conv_layer['weights'] = base64.b64encode(struct.pack(\"f\"*len(w), *w)).decode()\n\n if len(node.input) == 3:\n b = b.flatten().tolist()\n else:\n b = [0 for _ in range(kernels)]\n conv_layer['biases'] = base64.b64encode(struct.pack(\"f\"*len(b), *b)).decode()\n\n network['layers'].append(conv_layer)\n\n elif node.op_type == \"Gemm\":\n w = get_tensor(inits, node.input[1])\n output_size, input_size = w.shape\n\n if len(node.input) == 3:\n b = get_tensor(inits, node.input[2])\n\n gemm_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(np.prod(input_shapes[0])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape': output_shape,\n 'gemm_input_size': input_size,\n 'gemm_output_size': output_size,\n 'input_id': input_id,\n 'output_id': output_id,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"biases\": [],\n \"weights\": [],\n \"sublayers\": [],\n }\n\n w = w.flatten().tolist()\n gemm_layer['weights'] = base64.b64encode(struct.pack(\"f\"*len(w), *w)).decode()\n\n if len(node.input) == 3:\n b = b.flatten().tolist()\n else:\n b = [0 for _ in range(output_size)]\n gemm_layer['biases'] = base64.b64encode(struct.pack(\"f\"*len(b), *b)).decode()\n network['layers'].append(gemm_layer)\n\n elif node.op_type in multipath_nodes:\n node_inputs = get_previous_nodes(nodes, node)\n shapes = input_shapes\n\n if node.op_type == \"Sum\":\n assert(all([x == shapes[0] for x in shapes[1:]]))\n elif node.op_type == \"Concat\":\n assert(all([x[1:] == shapes[0][1:] for x in shapes[1:]]))\n\n buf = node_inputs[0].name\n if node.op_type == \"Concat\":\n buf = output_id\n\n buffer_offset = 0\n for n, node_input in enumerate(node_inputs):\n noutput = node_input.output[0]\n for l, layer in enumerate(network['layers']):\n if layer['output_id'] == noutput: # if layer pointing to this node\n network['layers'][l]['output_id'] = buf # rename layer's output\n network['layers'][l]['buffer_offset'] = buffer_offset # and offset appropriately\n if layer['input_id'] == noutput:\n network['layers'][l]['input_id'] = buf #TODO\n\n buffer_offset += int(np.prod(input_shapes[n]))\n\n if node.op_type == \"Sum\":\n channels, m, n = shape3d(output_shape)\n sum_layer = {\n 'op_type': \"Sum\",\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': node_inputs[0].name,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'num_inputs': len(node.input),\n \"sublayers\": [],\n }\n network['layers'].append(sum_layer)\n\n elif node.op_type == \"Identity\":\n shapes = input_shapes\n\n channels, m, n = shape3d(output_shape)\n identity_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n }\n network['layers'].append(identity_layer)\n\n elif node.op_type == \"LRN\":\n shapes = input_shapes\n channels, m, n = shape3d(output_shape)\n lrn_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'alpha': get_attr(node, 'alpha'),\n 'beta': get_attr(node, 'beta'),\n 'bias': get_attr(node, 'bias'),\n 'size': get_attr(node, 'size'),\n 'scale': 1.0,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n }\n network['layers'].append(lrn_layer)\n\n elif node.op_type == \"Scale\":\n scale_sublayer = {\n 'op_type': 'Scale',\n 'name': node.name,\n \"use_replay\": 1,\n 'scale': get_attr(node, 'scale'),\n }\n previous['sublayers'].append(scale_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type in [\"GlobalAveragePool\", \"GlobalMaxPool\"]:\n assert(previous['n'] == previous['m'])\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n pads = pads6(node)\n pool_sublayer = {\n 'op_type': node.op_type.replace('Global', ''),\n 'name': node.name,\n 'use_replay': 0,\n 'kernel_shape': [previous['m'], previous['n']],\n 'strides': [previous['m'], previous['n']],\n 'pads': pads,\n }\n previous['sublayers'].append(pool_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n\n elif node.op_type in [\"MaxPool\", \"AveragePool\"]:\n kernel_shape = np.asarray(get_attr(node, 'kernel_shape')).tolist()\n\n if node.op_type == \"AveragePool\": #TODO quick fix for tf average pool quirk\n if kernel_shape[0] * kernel_shape[1] == previous['m'] * previous['n']:\n kernel_shape = [previous['m'], previous['n']]\n strides = np.asarray(get_attr(node, 'strides')).tolist()\n if strides is None:\n strides = [ 1 for _ in kernel_shape]\n pads = pads6(node)\n pool_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'kernel_shape': kernel_shape,\n 'strides': strides,\n 'pads': pads,\n }\n previous['sublayers'].append(pool_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n elif node.op_type == \"PRelu\":\n slope = get_tensor(inits, node.input[1])\n slope = slope.flatten().tolist()\n prelu_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'slope': slope,\n }\n previous['sublayers'].append(prelu_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"LeakyRelu\":\n alpha = get_attr(node, 'alpha')\n if alpha is None:\n alpha = .01\n leaky_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'alpha': alpha\n }\n previous['sublayers'].append(leaky_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Relu\":\n relu_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n }\n previous['sublayers'].append(relu_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Clip\":\n clip_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'min': float(get_tensor(inits,node.input[1])),\n 'max': float(get_tensor(inits,node.input[2])),\n }\n previous['sublayers'].append(clip_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type == \"Pad\":\n pads = pads6(get_tensor(inits,node.input[1]).tolist())\n value = int(get_tensor(inits,node.input[2]))\n if value < -1:\n value = -1\n if value > 1:\n value = 1\n pad_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'value': value,\n 'pads': pads,\n }\n previous['sublayers'].append(pad_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n previous['output_shape'] = (output_shape)\n elif node.op_type in [\"Add\", \"Mul\", \"Sub\", \"Div\"]:\n\n skip = False\n if node.op_type == \"Mul\":\n next_nodes = get_node_inputs(nodes, node.output[0])\n if node.name == nodes[-1].name:\n if verbose:\n print('removing final scale node')\n skip = True\n\n elif previous['op_type'] in [\"LRN\"]:\n if verbose:\n print('skipping mul after lrn')\n array = get_tensor(inits, node.input[1])\n if array is None:\n array = get_tensor(inits, node.input[0])\n previous['scale'] = float(array[0])\n print('skipping mul after lrn', previous['scale'], previous['input_id'], previous['output_id'])\n\n skip = True\n\n elif next_nodes[0].op_type in [\"Softmax\"]:\n if verbose:\n print('skipping mul before softmax')\n skip = True\n\n array = get_tensor(inits, node.input[1])\n if array is None:\n array = get_tensor(inits, node.input[0])\n c = activations[node.input[1]].shape[1]\n else:\n c = input_shapes[0][0]\n\n if node.op_type == \"Add\": # TODO for scalar Add\n dims = len(np.squeeze(array).shape)\n if dims == 0:\n array = np.ones((c, 1)) * array\n\n dims = len(np.squeeze(array).shape)\n if c == 1 and dims == 0:\n dims = 1\n\n array = array.flatten().tolist()\n # force_broadcast_2 = False\n # if force_broadcast_2:\n # # if c != 1 and dims == 0:\n # if c != 1 and dims == 0 and node.op_type != \"Mul\": # TODO forcing to broadcast 2 not broadcast 3\n # dims = 1\n # array = [array[0] for _ in range(c)]\n\n if not skip:\n arithmetic_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'dims': dims,\n 'array': array,\n }\n previous['sublayers'].append(arithmetic_sublayer)\n previous['output_id'] = output_id\n\n elif node.op_type in [\"Abs\", \"Max\", \"Mean\", \"Min\", \"Neg\", \"Not\"]:\n unary_sublayer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n }\n previous['sublayers'].append(unary_sublayer)\n previous['output_id'] = output_id\n previous['output_size'] = int(np.prod(output_shape))\n\n elif node.op_type == \"Reshape\":\n dims = get_tensor(inits, node.input[1])\n\n if len(dims) == 4 and dims[-1] == 2:\n idx += 6\n node = nodes[idx]\n output_id = node.output[0]\n _, output_shapes = get_shapes(activations, stats, node)\n output_shape = output_shapes[0]\n channels, m, n = shape3d(output_shape)\n reorg_layer = {\n 'op_type': \"Reorg\",\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape': output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n \"stride\": int(dims[-1]),\n }\n network['layers'].append(reorg_layer)\n else:\n previous['output_id'] = output_id\n\n elif node.op_type in [\"Flatten\",'Cast']:\n previous['output_id'] = output_id\n elif node.op_type == \"Resize\":\n scales = get_tensor(inits, node.input[2])\n assert(scales[0] == 1 and scales[1] == 1)\n scale = float(scales[2])\n mode = get_attr(node, 'mode').decode()\n assert(mode == 'nearest' or mode == 'linear')\n shapes = input_shapes[:1]\n channels, m, n = shape3d(output_shape)\n in_size= [d for d in one_elem(input_shapes)[1:]]\n replay = 0 if in_size == [1,1] else 1\n resize_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': replay,\n 'input_size': int(np.prod(one_elem(input_shapes))),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'mode' :mode,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': [float(scales[2]),float(scales[3])],\n }\n network['layers'].append(resize_layer)\n elif node.op_type == \"ArgMax\":\n input_shape = one_elem(input_shapes)\n channels, m, n = shape3d(input_shape)\n argmax_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': [float(scales[2]),float(scales[3])],\n }\n network['layers'].append(argmax_layer)\n\n elif node.op_type == \"Softmax\":\n prev = get_previous_nodes(nodes, node)[0]\n if prev.op_type == \"Mul\":\n scale = get_tensor(inits, prev.input[1])\n scale = scale.flatten().tolist()\n else:\n scale = [1.0]\n if len(scale) > 1:\n raise NotImplementedError(\"Broadcast scale not implemented for softmax\")\n\n shapes = input_shapes\n channels, m, n = shape3d(output_shape)\n softmax_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 0,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n \"sublayers\": [],\n 'scale': scale,\n 'size': len(scale),\n }\n network['layers'].append(softmax_layer)\n\n # softmax_sublayer = {u'op_type': u'Softmax', 'scale': 1.0}\n # previous['sublayers'].append(softmax_sublayer)\n # previous['output_id'] = output_id\n # print('warning SOFTMAX ignored!... fine if last layer and sorting outputs')\n\n elif node.op_type == \"Transpose\":\n shapes = input_shapes\n\n channels, m, n = shape3d(output_shape)\n permutation =[p-1 for p in get_attr(node, 'perm')[1:]]\n transpose_layer = {\n 'op_type': node.op_type,\n 'name': node.name,\n 'use_replay': 1,\n 'input_size': int(sum([np.prod(s) for s in input_shapes])),\n 'output_size': int(np.prod(output_shape)),\n 'output_shape':output_shape,\n 'input_id': input_id,\n 'output_id': output_id,\n 'channels': channels,\n 'm': m,\n 'n': n,\n 'dma_offset': 0,\n 'buffer_offset': 0,\n 'permutation':permutation,\n \"sublayers\": [],\n }\n network['layers'].append(transpose_layer)\n else:\n raise RuntimeError('Unknown node type:{} '.format(node.op_type))\n\n idx += 1\n if idx > last_idx:\n break\n\n unsigned_network_inputs = input_type == np.uint8\n\n if CVI_1x1:\n network = mxp_gemm_to_conv(network)\n\n network = mxp_set_replay(network, io_info)\n network = mxp_set_cvi(network)\n network = mxp_set_unsigned(network, unsigned_network_inputs)\n\n if inline_depthwise:\n network = mxp_inline_depthwise(network)\n\n network = mxp_describe_layers(network)\n network = mxp_number_buffers(network)\n buffers = mxp_size_buffers(network)\n network = mxp_number_sublayers(network)\n\n network['num_layers'] = len(network['layers'])\n network['buffers'] = buffers\n\n return network", "def test_set_bad_name(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4, shape=(4, 4))\n with pytest.raises(TypeError):\n dim.name = 4", "def set_node_label_color_bypass(node_names, new_colors, network=None, base_url=DEFAULT_BASE_URL):\n if not isinstance(new_colors, list): new_colors = [new_colors] # TODO: It looks like this should be happening everywhere?\n for color in new_colors:\n if is_not_hex_color(color):\n return None # TODO: Shouldn't this be an exception?\n\n res = set_node_property_bypass(node_names, new_colors, 'NODE_LABEL_COLOR', network=network, base_url=base_url)\n\n return res", "def neural_network(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0", "def test_model_with_simple_rnn_multiple_layers(self):\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Add an RNN layer with 12 internal units.\n x = tf.keras.layers.SimpleRNN(12, name='rnn0', activation='tanh', return_sequences=True)(inputs)\n x = tf.keras.layers.SimpleRNN(12, name='rnn1', activation='relu', return_sequences=True)(x)\n x = tf.keras.layers.SimpleRNN(12, name='rnn2', activation='tanh')(x)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax, name=\"matmul0\")(x)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./simple_rnn', sess.graph)\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input_1'], ['matmul0/Softmax'])\n\n # there should be only 4 connected graph ops, input, simpleRNN , Dense and Softmax\n self.assertEqual(6, len(conn_graph.get_all_ops()))\n num_detected_rnns = 0\n for op in conn_graph.get_all_ops().values():\n if op.type == 'SimpleRNN':\n num_detected_rnns += 1\n inner_list = op.internal_ops\n self.assertEqual(49, len(inner_list))\n self.assertEqual(3, num_detected_rnns)", "def test_matmul_vv(self):\n self.check_dot_vv(matmul_usecase, \"'@'\")", "def test_add():\n # Test for addition with scalar Rnode object and float value\n x = Rnode(0.11)\n z = x**2 + x\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value **2 + x.value\n assert x.grad() == sum(weight * var.grad()\n for weight, var in x.children)\n except AssertionError as e:\n print(e)", "def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,\n device: str = \"\"):\n Node.__init__(self, g, node_id=node_id, name=name,\n op_name=op_name, outputs=[], device=device)\n self._attributes = []\n self._inputs = []\n self._control_inputs = []", "def is_node_name_ok(node_name):\n # 节点名不可包含`/`特殊字符\n node_name = node_name.strip('/')\n return node_name.find('/') == -1", "def _onnx_node_to_singa_op(cls,\n onnx_node,\n inputs,\n opset_version=_known_opset_version):\n if onnx_node.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[onnx_node.op_type])\n else:\n translator = cls._common_onnx_node_to_singa_op\n return translator(onnx_node, inputs, opset_version)", "def train_models_change_nodes(X_train, Y_train, X_dev, Y_dev, numOutputNodes, learning_rate, iterations, minibatch_size, beta, nodes, print_cost = True, is_charge = False):\n \n results = {}\n params = {}\n\n try:\n for n in nodes:\n accs, parameters = model(X_train, Y_train, X_dev, Y_dev, numOutputNodes, learning_rate, iterations, minibatch_size, n, beta, 1.0, False, True, print_cost, is_charge)\n \n results[n] = accs[3]\n params[n] = parameters\n \n except KeyboardInterrupt: # allow for exiting the for loop in case we want to stop testing all the hyperparameters; to use, press Ctrl+C in terminal\n pass\n \n best = min(results, key=results.get)\n mapes = [results[l] for l in nodes]\n\n if print_cost:\n plt.plot(nodes, mapes)\n plt.ylabel('MAPE')\n plt.xlabel('# of nodes in hidden layer')\n plt.title(\"learning rate = {}, mini-batch = {}, beta = {}\".format(learning_rate, minibatch_size, beta))\n plt.show()\n \n return results, best, params[best]", "def _build_optimizer_node(\n self,\n input_names: List[str],\n output_name: str,\n node_name: str,\n node_attributes: Dict,\n ) -> str:\n onnx_model = self.base\n\n # add the optimizer node to the onnx model\n optimizer_node = onnx.helper.make_node(\n node_name,\n input_names,\n [output_name],\n name=_graph_utils.generate_graph_name(node_name),\n domain=\"com.microsoft\",\n **node_attributes,\n )\n\n onnx_model.graph.node.append(optimizer_node)\n\n return output_name", "def _generate_node_name(self):\r\n while 1:\r\n name = \"node\" + str(self._name_sequence)\r\n if name not in self.nodes.keys():\r\n break\r\n self._name_sequence += 1\r\n\r\n return name", "def test_modifyNodes(self):\n\n # wait until our lb is ACTIVE before trying to update it\n lbaas_utils.wait_for_active_status(self)\n # modify/disable a node on our loadbalancer\n self.logging.info(\"Testing node disable...\")\n nodes = self.driver.list_lb_nodes(self.lb_id)\n mod_node = nodes['nodes'][0]\n mod_node_id = mod_node['id']\n mod_node_addr = mod_node['address']\n mod_node_data = {'condition':'DISABLED'}\n self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data)\n lbaas_utils.validate_loadBalancer(self, [mod_node_addr])\n\n # re-enable the node\n self.logging.info(\"Testing re-enable of node...\")\n mod_node_data = {'condition':'ENABLED'}\n self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data)\n lbaas_utils.validate_loadBalancer(self)\n\n # modify ip / address\n self.logging.info(\"Testing update of node ip...\")\n mod_node_data = {'address': '127.0.0.1'}\n expected_status = '400'\n self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data)\n self.assertEqual(str(self.actual_status), expected_status, msg = \"ERROR: Attempt to update node ip address succeeded with status: %s. Expected status: %s\" %(self.actual_status, expected_status))\n \n # modify port\n self.logging.info(\"Testing update of node port...\")\n mod_node_data = {'port': '443'}\n expected_status = '400'\n self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data)\n self.assertEqual(str(self.actual_status), expected_status, msg = \"ERROR: Attempt to update node port succeeded with status: %s. Expected status: %s\" %(self.actual_status, expected_status))\n\n # update of non-existent node\n self.logging.info(\"Testing update of non-existent node...\")\n mod_node_data = {'condition':\"DISABLED\"}\n expected_status = '404'\n self.actual_status = self.driver.modify_node(self.lb_id, '0', mod_node_data)\n self.assertEqual(str(self.actual_status), expected_status, msg = \"ERROR: Attempt to update node port succeeded with status: %s. Expected status: %s\" %(self.actual_status, expected_status))\n\n # lower-case condition\n #self.logging.info(\"Testing lowercase condition...\")\n #mod_node_data = {'condition':'disabled'}\n #self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data)\n #lbaas_utils.validate_loadBalancer(self, [mod_node_addr])\n #self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data)\n #lbaas_utils.validate_loadBalancer(self, [mod_node_addr])\n\n # lower-case condition\n self.logging.info(\"Testing bad condition...\")\n mod_node_data = {'condition':'TASERED_BADLY'}\n expected_statuses = ['400','512']\n self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data)\n self.assertTrue(str(self.actual_status) in expected_statuses, msg = \"ERROR: Attempt to update to bad condition succeeded with status: %s. Expected status: %s\" %(self.actual_status, expected_status))", "def test_nodes_at_link():\n grid = HexModelGrid((3, 2))\n\n assert_array_equal(grid.nodes_at_link[:, 0], grid.node_at_link_tail)\n assert_array_equal(grid.nodes_at_link[:, 1], grid.node_at_link_head)\n\n assert np.may_share_memory(grid.nodes_at_link, grid.node_at_link_tail)\n assert np.may_share_memory(grid.nodes_at_link, grid.node_at_link_head)", "def rename_nodes(graph_def: tf.GraphDef, new_names: Dict[str, str]) -> tf.GraphDef:\n # Create copy of each node with a new name\n nodes = []\n for node in graph_def.node:\n new_node = tf.NodeDef()\n new_node.CopyFrom(node)\n nodes.append(new_node)\n if node.name in new_names:\n new_node.name = new_names[node.name]\n LOGGER.info(f\"Node renamed: {node.name} -> {new_node.name}\")\n\n # Check that all new names were used\n if not set(new_names.values()) <= set(node.name for node in nodes):\n missing = set(new_names.values()) - set(node.name for node in nodes)\n raise TensorsNotFoundError(missing)\n\n # Update node references (inputs and location) to renamed nodes\n for node in nodes:\n for idx, name in enumerate(node.input):\n node.input[idx] = new_names[name] if name in new_names else name\n if \"_class\" in node.attr:\n attr = node.attr[\"_class\"]\n for idx, item in enumerate(attr.list.s):\n loc_match = re.match(r\"^loc:@(.+)$\", item.decode())\n if loc_match and loc_match.groups()[0] in new_names:\n new_name = new_names[loc_match.groups()[0]]\n attr.list.s[idx] = f\"loc:@{new_name}\".encode()\n\n # Create Graph with renamed nodes\n new_graph = tf.GraphDef()\n new_graph.node.extend(nodes)\n return new_graph", "def set_node(self, name, state):\n self.source_net.nodes[name] = state", "def get_input_node_names(self, node_name):\n # (str) -> list\n node = self.get_node(node_name)\n return node.bottoms", "def test_tensor_can_be_created(free_alg):\n\n dr = free_alg\n p = dr.names\n i, v, r = p.i, p.v, p.R\n x = IndexedBase('x')\n\n # Create the tensor by two user creation functions.\n for tensor in [\n dr.sum((i, r), x[i] * v[i]),\n dr.einst(x[i] * v[i])\n ]:\n assert tensor.n_terms == 1\n\n terms = tensor.local_terms\n assert len(terms) == 1\n term = terms[0]\n assert term == Term(((i, r),), x[i], (v[i],))", "def test_operator_set(self, test_dag):\n # Unpack the fixture\n dag, (op1, op2, op3, op4) = test_dag\n # Arrange the operators with a Label in the middle\n op1.set_downstream(op2, Label(\"Label 1\"))\n op3.set_upstream(op2, Label(\"Label 2\"))\n op4.set_upstream(op2)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Label 1\"}\n assert dag.get_edge_info(op2.task_id, op3.task_id) == {\"label\": \"Label 2\"}\n assert dag.get_edge_info(op2.task_id, op4.task_id) == {}" ]
[ "0.6792893", "0.67721677", "0.65422505", "0.61988956", "0.6150528", "0.6148229", "0.5965357", "0.59260803", "0.5876603", "0.58762425", "0.584769", "0.5770553", "0.57376754", "0.5707993", "0.5700042", "0.5673199", "0.5655918", "0.56137747", "0.5602277", "0.5545079", "0.5532079", "0.5521103", "0.5515491", "0.5515061", "0.54988176", "0.54752284", "0.5456482", "0.54459816", "0.5444347", "0.54428935", "0.54303426", "0.54283386", "0.5405377", "0.5391476", "0.53800255", "0.5377728", "0.5366948", "0.53540695", "0.53508687", "0.5348815", "0.53426874", "0.5323688", "0.5323243", "0.53147256", "0.53131276", "0.53114426", "0.53068924", "0.5298615", "0.5273016", "0.5254022", "0.5251028", "0.5240393", "0.52363056", "0.522448", "0.5224082", "0.5222658", "0.5218048", "0.52114534", "0.5210623", "0.5192558", "0.51876116", "0.51833576", "0.5182785", "0.51827794", "0.5180675", "0.51743215", "0.51734966", "0.5166593", "0.51646596", "0.51624817", "0.51549846", "0.51462036", "0.5146064", "0.5139174", "0.5126487", "0.5118592", "0.5115405", "0.51143366", "0.51109856", "0.5109316", "0.5104684", "0.5102845", "0.5099513", "0.50992453", "0.5097021", "0.5089251", "0.508737", "0.50822234", "0.50770557", "0.5069447", "0.5069309", "0.50689954", "0.5065042", "0.50621474", "0.5055676", "0.50547117", "0.5048266", "0.50431406", "0.5035289", "0.50340956" ]
0.7743685
0
Test that node names are uniquely set.
def test_set_unique_node_names(self): class TwoLayerLstmModel(torch.nn.Module): """ Model using torch.nn.LSTM module """ def __init__(self): super(TwoLayerLstmModel, self).__init__() self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3) def forward(self, x, hx=None): return self.lstm(x, hx) model = TwoLayerLstmModel() dummy_input = torch.randn(10, 1, 3) onnx_path = './data/MyModel.onnx' torch.onnx.export(model, dummy_input, onnx_path) onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input) onnx_model = onnx.load(onnx_path) self.check_onnx_node_name_uniqueness(onnx_model) if os.path.exists(onnx_path): os.remove(onnx_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_onnx_node_name_uniqueness(onnx_model):\n onnx_node_names = [node.name for node in onnx_model.graph.node]\n assert len(onnx_node_names) == len(set(onnx_node_names)), f'list size mismatch, check if names are unique'", "def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return len({node.get_name() for node in self.get_nodeset()}) != len(self.get_nodeset())", "def test_unique_atom_names(self, molecule):\n # The dataset we load in has atom names, so let's strip them first\n # to ensure that we can fail the uniqueness check\n for atom in molecule.atoms:\n atom.name = \"\"\n assert not (molecule.has_unique_atom_names)\n # Then genreate unique atom names using the built in algorithm\n molecule.generate_unique_atom_names()\n # Check that the molecule has unique atom names\n assert molecule.has_unique_atom_names\n # Check molecule.has_unique_atom_names is working correctly\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n molecule.atoms[1].name = molecule.atoms[0].name # no longer unique\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names", "def test_unique_atom_names(self, molecule):\n # The dataset we load in has atom names, so let's strip them first\n # to ensure that we can fail the uniqueness check\n for atom in molecule.atoms:\n atom.name = \"\"\n assert not (molecule.has_unique_atom_names)\n # Then genreate unique atom names using the built in algorithm\n molecule.generate_unique_atom_names()\n # Check that the molecule has unique atom names\n assert molecule.has_unique_atom_names\n # Check molecule.has_unique_atom_names is working correctly\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n molecule.atoms[1].name = molecule.atoms[0].name # no longer unique\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n assert all(\"x\" in a.name for a in molecule.atoms)", "def checkNamesUniqueness(names):\n topNames, deeperNames = getLevelNames(names)\n## print topNames\n## print deeperNames\n for name in topNames[:-1]:\n if topNames.count(name) > 1:\n raise ValueError(\"\"\"\\Names at every level must be unique!\"\"\")\n if deeperNames:\n checkNamesUniqueness(deeperNames)", "def _ValidateUniqueNames(pools):\n used_names = set()\n for pool in pools:\n name = pool.nodePool\n if name in used_names:\n raise exceptions.InvalidArgumentException(\n '--pools', 'Pool name \"%s\" used more than once.' % name)\n used_names.add(name)", "def test_consistent_ids(self) -> None:\n bnode = BNode()\n g0_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Golan Trevize\")),\n (bnode, RDF.type, FOAF.Person),\n }\n bnode = BNode()\n g1_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Janov Pelorat\")),\n (bnode, RDF.type, FOAF.Person),\n }\n\n g0 = Graph()\n g0 += g0_ts\n cg0 = to_canonical_graph(g0)\n cg0_ts = GraphHelper.triple_set(cg0)\n\n g1 = Graph()\n g1 += g1_ts\n cg1 = to_canonical_graph(g1)\n cg1_ts = GraphHelper.triple_set(cg1)\n\n assert cg0_ts.issubset(\n cg1_ts\n ), \"canonical triple set cg0_ts should be a subset of canonical triple set cg1_ts\"", "def test_node_name(self):\n xmlns = {\n \"a\": \"_a\",\n \"g\": \"_g\"\n }\n self.assertEqual(\n utils._node_name(\"a\", \"g\", xmlns),\n (False, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"a@a\", \"g\", xmlns),\n (False, \"a\", \"{_a}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@@a\", \"g\", xmlns),\n (True, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@a@a\", \"g\", xmlns),\n (True, \"a\", \"{_a}a\")\n )\n # something not equal to _\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"1@a@a\", \"g\", xmlns),\n # too many @\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"@@a@a\", \"g\", xmlns),", "def test_duplicate_name_refs(renderer):\n assert renderer.name_ref(User.age) == renderer.name_ref(User.age) == \"#n0\"", "def verify_unique_names(items):\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException(\"Error: Duplicate sequence names found.\", ErrorType.INVALID_SEQUENCE_DATA)", "def check_unique(self):\n pass", "def check_name_duplication(self, other):\n self_names = set(\n [node.get(\"name\") for node in self.root.findall(\"./*[@name]\")])\n other_names = set(\n [node.get(\"name\") for node in other.root.findall(\"./*[@name]\")])\n if len(set.intersection(self_names, other_names)):\n raise NameDuplicationError()", "def make_unique_node(graph, name):\n if name not in graph:\n return name\n ctr = 1\n while True:\n name_ = name + '_' * ctr\n if name_ not in graph:\n return name_\n ctr += 1", "def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True", "def test_unique(self):\n env = pike.Environment()\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n with self.assertRaises(KeyError):\n env.add(graph)", "def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )", "def _uniqueness_check(self, cls, unique_in = None, **attr):\n # under the same datasource, only 1 subsystem, 1 neuropil, 1 tract of the name can exist\n # under the same neuropil, only 1 neuron of the name can exist\n # multiple (collections of) synapses can exist between two neurons\n if cls == 'Species':\n tmp = self.sql_query(\n \"\"\"select from Species where (name = \"{name}\" or \"{name}\" in synonyms) and stage = \"{stage}\" and sex = \"{sex}\" \"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Species {name} at {stage} stage ({sex}) already exists with rid = {rid}\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Species {name} (as its synonym) at {stage} stage ({sex}) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = obj._id, formalname = obj.name))\n elif cls == 'DataSource':\n objs = self.find_objs('DataSource', name=attr['name'], version=attr['version'])\n #if self.exists(cls, name = attr['name'], version = attr['version']):\n if len(objs):\n raise NodeAlreadyExistError(\"\"\"{} Node with attributes {} already exists with rid = {}\"\"\".format(\n cls, ', '.join([\"\"\"{} = {}\"\"\".format(key, value) \\\n for key, value in attr.items()]), objs[0]._id))\n elif cls == 'Neurotransmitter':\n tmp = self.sql_query(\n \"\"\"select from Neurotransmitter where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n name = attr['name']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Neurotransmitter {name} already exists with rid = {rid}\"\"\".format(\n name = attr['name'], rid = objs[0]._id))\n return objs\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Neurotransmitter {name} (as its synonym) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], rid = obj._id, formalname = obj.name))\n elif cls in ['Subsystem', 'Neuropil', 'Subregion', 'Tract']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'], formalname = obj.name,\n ds = unique_in.name,\n version = unique_in.version, rid = obj._id))\n # Alternatively, try:\n # tmp = self.sql_query(\n # \"\"\"select from {cls} where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n # cls = cls, name = attr['name']))\n # ds = tmp.owned_by(cls = 'DataSource').has(rid = datasource)\n # if len(ds):\n # tmp1 = tmp.has(name = attr['name'])\n # if len(tmp1.owned_by(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = datasource.name,\n # version = datasource.version))\n # else:\n # all_synonym_objs = (tmp - tmp1).node_objs\n # for obj in objs:\n # if len(QueryWrapper.from_rids(obj._id).has(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = datasource.name,\n # version = datasource.version))\n\n # Alternatively 2, try: (will be slow when it has a lot of Owns edges)\n # tmp = sql_query(\n # \"\"\"\n # select from (select expand(out('Owns')[@class = \"{cls}\"]) from {rid}) where name = \"{name}\" or \"{name}\" in synonyms\n # \"\"\"\n # )\n # elif cls in ['Subregion']:\n # if not isinstance(unique_in, models.Neuropil):\n # raise TypeError('To check the uniqueness of a {} instance, unique_in must be a Neuropil object'.format(cls))\n # tmp = self.sql_query(\n # \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='ucls' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n # rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n # if len(tmp):\n # objs = tmp.node_objs\n # if attr['name'] in [obj.name for obj in objs]:\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = unique_in.name))\n # else:\n # for obj in objs:\n # if name in obj.synonyms:\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = unique_in.name))\n elif cls in ['Neuron', 'NeuronFragment']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where uname = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists with rid = {rid}, under DataSource {ds} version {version}\"\"\".format(\n cls = cls, name = attr['name'], rid = objs[0]._id,\n ds = unique_in.name,\n version = unique_in.version))\n elif cls == 'Circuit':\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n elif cls == 'ArborizationData':\n if not isinstance(unique_in, (models.Neuron, models.Synapse)):\n raise TypeError('To check the uniqueness of a ArborizationData instance, unique_in must be a Neuron or a Synapse object')\n tmp = self.sql_query(\n \"\"\"select from (select expand(out(HasData)) from {rid}) where @class = 'ArborizationData' \"\"\".format(rid = unique_in._id))\n if len(tmp):\n raise NodeAlreadyExistError(\"\"\"ArborizationData already exists for {node} {uname} with rid = {rid}. Use NeuroArch.update_{node}_arborization to update the record\"\"\".format(\n node = unique_in.element_type.lower(), rid = tmp.node_objs[0]._id, uname = unique_in.uname))\n else:\n raise TypeError('Model type not understood.')\n return True", "def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s", "def _match_identical_nodes(self):\n\n for job_name_b in self._topo_b_nodes:\n for job_name_a in self._unresolved_a_nodes:\n if self._is_node_identical(job_name_a, job_name_b):\n self._identical_nodes[job_name_b] = job_name_a\n self._unresolved_a_nodes.remove(job_name_a)\n self._unresolved_b_nodes.remove(job_name_b)\n break", "def add_node(self, name):\n if not name in self._main_dictionary:\n self._main_dictionary[name] = set()", "def is_unique_n_set(string: str) -> bool:\n\n return len(set(string)) == len(string)", "def has_node(self, u: Hashable) -> bool:\n return u in self._names", "def test_get_num_unique_name(self):\n\n list1 = self.test_num_unique_name\n list2 = get_num_unique_name(self.test_sorted_tuple, self.test_dict)\n self.assertEqual(list1, list2)", "def test_inconsistent_name(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'givenName': ['Maarten'],\n 'sn': ['Visscher'],\n 'cn': ['Wessel']}\n }\n with self.assertRaises(CloneError):\n clone(entries)", "def test_unique_id_1():\n id1 = tasks.unique_id()\n id2 = tasks.unique_id()\n assert id1 != id2", "def test_5_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(5):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def testNames(self):\n self.assertEquals(\n set(['BLUE', 'GREEN', 'INDIGO', 'ORANGE', 'RED',\n 'VIOLET', 'YELLOW']),\n set(Color.names()))", "def test_unique_id_2():\n ids = []\n ids.append(tasks.add(Task('one')))\n ids.append(tasks.add(Task('two')))\n ids.append(tasks.add(Task('three')))\n\n uid = tasks.unique_id()\n assert uid not in ids", "def test_adding_same_node(self):\n cons_hash = ConsistentHash(2)\n cons_hash.add('192.168.1.1') \n\n threw_value_error = False\n try:\n cons_hash.add('192.168.1.1')\n except exceptions.ValueError:\n threw_value_error = True\n self.assertTrue(threw_value_error) \n \n self.assertTrue(cons_hash._is_consistent())", "def testIdUnique(self):\n ids = {}\n # Vary parameters that affect the work or input data,\n # verify each id is unique\n for params in itertools.product(RequestNames, TaskNames, Inputs,\n Masks, Dbses, Acdcs):\n ele = WorkQueueElement(RequestName=params[0], TaskName=params[1],\n Inputs=params[2], Mask=params[3],\n Dbs=params[4], ACDC=params[5]\n )\n self.assertFalse(ele.id in ids)\n ids[ele.id] = None", "def test_2_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(2):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def getSet(unique_name):", "def getSet(unique_name):", "def test_6_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(6):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def check_name_uniqueness(cls, user_id, name):\n data_with_same_name = Data.objects.only('id').filter(user_id=user_id, name = name)\n return len(data_with_same_name) == 0", "def is_unique(x):\n return len(set(x)) == len(x)", "def test_uniqueness(self):\n passwords = tuple(generate_password(8) for i in range(100))\n self.assertEqual(len(passwords), len(set(passwords)))", "def test_equality(self):\n\n for name in TEST_NAMES:\n self.colorspace.setEqualityGroup(name)\n self.assertEqual(name, self.colorspace.getEqualityGroup())", "def nodes_row_similar(all_rows: set[tuple[str, str, str, int, int, int]],\n identifier: tuple[str, str, str, int, int, int]) -> bool:\n for row in all_rows:\n if row[0] == identifier[0] and row[1] == identifier[1]:\n return True\n return False", "def test_unicode_names(self):\n self.assertArrayEqual(self.dset['a'], self.data['a'])\n self.dset['a'] = 42\n data = self.data.copy()\n data['a'] = 42\n self.assertArrayEqual(self.dset['a'], data['a'])", "def test_7_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(7):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def get_node_names(self):\n return set({node.get_name() for node in self.get_nodeset()}) # return the set of names", "def __unpack_node_names(node_names):\n return \\\n set({\n unpacked_node_name\n for node_name in node_names\n for unpacked_node_name in node_name.split(StoerWagner.MERGED_NODE_NAME_DELIMITER)\n }) # create and return the set of unpacked node names", "def test_4_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(4):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def test_good_node():\n node_a = Node({'A':['B','C']})\n assert node_a.name == 'A'\n assert node_a.connections == ['B','C']", "def test_9_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(9):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def check_dataset_duplicate_ids(self, dataset):\n ids = [a['_id'] for a in dataset]\n # Construct list of duplicates\n dupe_ids = [a for n, a in enumerate(ids) \n if a in ids[:n]]\n if len(dupe_ids) > 0:\n # Get list of names for the duplicate pandas\n dupe_names = [a['en.name'] for a in dataset \n if a['_id'] in dupe_ids]\n raise IdError(\"ERROR: duplicate ids for en.names: %s\" \n % str(dupe_names))", "def is_unique(w):\n chars = {}\n for c in w:\n if c in chars:\n return False\n chars[c] = True\n return True", "def has_duplicates(tree):\n taxa = [tip.name for tip in tree.tips()]\n if '' in taxa or None in taxa:\n raise ValueError('Empty taxon name(s) found.')\n return len(set(taxa)) < len(taxa)", "def test_3_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(3):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))", "def _repair_names_check_unique(names: Iterable[str]) -> Iterable[str]:\n for name in names:\n if names.count(name) > 1:\n raise NameNonUniqueError(f\"Names must be unique: {name}\")\n if name == \"\" or name is numpy.nan:\n raise NameNonUniqueError(f\"Names can't be empty: {name}\")\n if re.search(r\"(?:(?<!_)_{2}\\d+|(?<!_)__)+$\", str(name)):\n raise NameNonUniqueError(\n f\"Names can't be of the form `__` or `_j`: {name}\"\n )\n return names", "def test_8_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(8):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def test_duplicate_entries(self):", "def _raise_if_duplicates(counts: Dict[str, int]) -> None:\n duplicates: List[str] = []\n for nickname, count in counts.items():\n if count > 1:\n duplicates.append(nickname)\n if len(duplicates) > 0:\n # TODO This is not always nickname\n raise ValueError(f'\\'nickname\\' not unique {duplicates}')", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def test_name(self):\n molecule1 = Molecule()\n molecule1.name = None\n\n molecule2 = Molecule()\n molecule2.name = \"\"\n assert molecule1.name == molecule2.name\n\n name = \"benzene\"\n molecule = Molecule()\n molecule.name = name\n assert molecule.name == name", "def test_name(self):\n molecule1 = Molecule()\n molecule1.name = None\n\n molecule2 = Molecule()\n molecule2.name = \"\"\n assert molecule1.name == molecule2.name\n\n name = \"benzene\"\n molecule = Molecule()\n molecule.name = name\n assert molecule.name == name", "def test_name(self):\n\n for name in TEST_NAMES:\n self.colorspace.setName(name)\n self.assertEqual(name, self.colorspace.getName())", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def check_duplicate_class_names(class_names):\n duplicates = get_duplicates(class_names)\n if duplicates:\n logger.error(f'Only globally unique class names are allowed. Found duplicates {duplicates}')\n raise SystemExit(0)", "def test_tree_intersection_name_exists():\n assert tree_intersection", "def test_ids(self):\n state1 = State()\n state2 = State()\n state3 = State()\n self.assertFalse(state1.id == state2.id)\n self.assertFalse(state1.id == state3.id)\n self.assertFalse(state2.id == state3.id)", "def test_random_id(self):\r\n ids = \\\r\n \"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969\".split(\r\n )\r\n assert random_id(ids, {}) in ids\r\n # just test we got something from the list, don't add stochastic test\r", "def check_name(self, node):\n assert \"name\" in node, \"Package node does not contain attribute 'node'\"\n assert len(node[\"name\"]) >= 1, \"Expecting at least one 'name' value\"\n # TODO: add more thorough checks", "def name_is_unique(self, name):\n unique = True\n for client in self.clients:\n unique = unique and (False if name == client.get_name() else True)\n return unique", "def test_getting_keys(self): \n cons_hash = ConsistentHash(2) \n \n nodes = ['192.168.1.1:20000',\n '192.168.1.1:20001',\n '192.168.1.1:20002',\n '192.168.1.1:20003'] \n\n for node in nodes:\n cons_hash.add(node)\n \n self.assertEquals(len(cons_hash), 8)\n node_counts = defaultdict(int)\n for i in xrange(0,100):\n key = str(uuid.uuid4())\n node = cons_hash.get_node(key)\n \n self.assertTrue(node in nodes)\n node_counts[node] += 1\n\n self.assertTrue(cons_hash._is_consistent())", "def _assert_thread_names(self):\n threads = [t for t in threading.enumerate()]\n thread_names = [t.name for t in threads]\n\n thread_names_set = set(thread_names)\n expected_names = {'MainThread', 'SQLiteExecutor',\n 'OutputManager', 'QueueFeederThread'}\n\n self.assertEqual(thread_names_set, expected_names)", "def test_instance_naming_creation(os_info):\n NEUTRON.list_security_groups = mock.MagicMock(\n return_value=iter([{\"security_groups\": []}]))\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n\n instance_names = os_info.nodes_names\n for i in range(len(instance_names)):\n assert instance_names[i] == 'test-node-{}'.format(i + 1)", "def test_install_set_multi(self):\n expected = copy.deepcopy(test_xdata)\n for thing in expected.xpath(\"Children[@identical='true']/Thing\"):\n thing.text = \"same\"\n self._install(\n [lxml.etree.Element(\n \"SetMulti\", value=\"same\",\n base='Test/Children[#attribute/identical = \"true\"]',\n sub=\"Thing/#text\")],\n expected)", "def make_unique(self, unnamed_prefix: str = '') -> 'Entity':\n orig_name = self['targetname']\n if orig_name:\n self['targetname'] = '' # Remove ourselves from the .by_target[] set.\n else:\n orig_name = unnamed_prefix\n\n base_name = orig_name.rstrip('0123456789')\n\n if self.map.by_target[base_name]:\n # Check every index in order.\n i = 1\n while True:\n name = base_name + str(i)\n if not self.map.by_target[name]:\n self['targetname'] = name\n break\n i += 1\n else:\n # The base name is free!\n self['targetname'] = base_name\n\n return self", "def test_unique_keys(self):\n registry = ClassRegistry(attr_name='element', unique=True)\n\n # We can register any class like normal...\n # noinspection PyUnusedLocal\n registry.register(Charmander)\n\n # ... but if we try to register a second class with the same\n # key, we get an error.\n with self.assertRaises(RegistryKeyError):\n registry.register(Charmeleon)", "def test_should_raise_error_for_duplicate_names(self):\r\n self.edge_spec['label'] = 'updated_at'\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement(self.property_spec)\r\n self.spec_parser.parse_statement(self.edge_spec)", "def test_duplicate_named_input_edge(self):\n with self.assertRaises(ValidationError):\n with Graph('g'):\n n1, n2 = Node('a'), Node('b')\n n1 | 'bar' * n2\n n1 * 'foo' | 'bar' * n2", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars_no_set(\"ab\"))\n self.assertTrue(all_unique_chars_no_set(\"ba\"))\n self.assertTrue(all_unique_chars_no_set(\"make\"))\n self.assertTrue(all_unique_chars_no_set(\"thorn\"))\n self.assertTrue(all_unique_chars_no_set(\"malibu\"))\n self.assertTrue(all_unique_chars_no_set(string.ascii_letters))", "def make_unique(name, reserved_names):\n while name in reserved_names:\n name += '_'\n\n return name", "def test_if_nodes_are_in_same_chunk(self, node_ids: Sequence[np.uint64]\n ) -> bool:\n assert len(node_ids) == 2\n return self.get_chunk_id(node_id=node_ids[0]) == \\\n self.get_chunk_id(node_id=node_ids[1])", "def test_duplicate_local_name(self):\n # Setup test\n infilename = os.path.join(_SAMPLE_FILES_DIR, \"reg_good_simple.xml\")\n filename = os.path.join(_TMP_DIR, \"reg_duplicate_local_name.xml\")\n out_source_name = \"physics_types_duplicate_local_name\"\n out_source = os.path.join(_TMP_DIR, out_source_name + '.F90')\n out_meta = os.path.join(_TMP_DIR, out_source_name + '.meta')\n remove_files([out_source, out_meta])\n tree, root = read_xml_file(infilename)\n # Change output filename\n for obj in root:\n oname = obj.get('name')\n if (obj.tag == 'file') and (oname == 'physics_types_simple'):\n obj.set('name', out_source_name)\n new_var = ET.SubElement(obj, \"variable\")\n new_var.set(\"local_name\", \"latitude\")\n new_var.set(\"standard_name\", \"east_wind\")\n new_var.set(\"units\", \"radians\")\n new_var.set(\"type\", \"real\")\n new_var.set(\"kind\", \"kind_phys\")\n dims_elem = ET.SubElement(new_var, \"dimensions\")\n dims_elem.text = 'horizontal_dimension'\n break\n # End if\n # End for\n tree.write(filename)\n # Run test\n with self.assertRaises(ValueError) as verr:\n _ = gen_registry(filename, 'eul', {}, _TMP_DIR, 2,\n _SRC_MOD_DIR, _CAM_ROOT,\n loglevel=logging.ERROR,\n error_on_no_validate=True)\n # End with\n # Check exception message\n emsg = \"duplicate variable local_name, 'latitude', in \"\n emsg += \"physics_types_duplicate_local_name, already defined \"\n emsg += \"with standard_name, 'latitude'\"\n self.assertEqual(emsg, str(verr.exception))\n # Make sure no output files were created\n self.assertFalse(os.path.exists(out_meta))\n self.assertFalse(os.path.exists(out_source))", "def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()", "def test_nodes_exist(graph_no_edges):\n for node in graph_no_edges:\n assert graph_no_edges.has_node(node)", "def get_naked_names(graph: BELGraph) -> Set[str]:\n return set(_naked_names_iter(graph))", "def check_unique_names_for_geometry(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n non_unique_names = []\n\n geo_shapes = pm.ls(type=\"mesh\")\n progress_controller.maximum = len(geo_shapes)\n\n for shape in geo_shapes:\n if (\n shape.getParent().isUniquelyNamed() is False\n and shape.getParent().isReferenced() is False\n ):\n non_unique_names.append(shape.getParent())\n progress_controller.increment()\n\n if non_unique_names:\n mc.select(non_unique_names)\n raise PublishError(\n \"Some geometry objects are not <b>Uniquely Named</b><br><br>\"\n \"%s<br><br>Please rename them.\"\n )\n progress_controller.complete()", "def testSynonymDuplicate(self):\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\t\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\t\n\t\t\t\tsyn2 = spinner.Synonym.objects.get_single(two, one, True)\n\n\t\t\t\tassert syn == syn2\n\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()", "def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True", "def test_is_an_element_name():\n for el in roentgen.elements['name']:\n assert(is_an_element(el))", "def all_unique_set(string):\n return len(string) == len(set(string))", "def test_unique_id(self):\n session1 = _create_test_session()\n session2 = _create_test_session()\n self.assertNotEqual(session1.id, session2.id)", "def is_unique_string(s):\n return len(s) == len(set(s))", "def _check_unique_insesitive(self, cr, uid, ids, context=None):\n for category in self.browse(cr, uid, ids, context=context):\n if len(self.search(cr, uid, [('name','=ilike',category.name)], context=context)) > 1:\n raise osv.except_osv(_('Constraint Error'), _(\"The Name Must Be Unique!\"))\n return True", "def test_synonym(self): \n pass", "def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)", "def test_id(self):\n node = Node()\n node.id = \"1234\"\n self.assertEqual(node.getId(), node.id)", "def check_root_node_name___fix():\n from stalker import Asset\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n t = v.task\n asset_name = None\n if isinstance(t.parent, Asset):\n asset_name = t.parent.name\n\n root_nodes = auxiliary.get_root_nodes()\n root_node_name = root_nodes[0].name()\n\n if asset_name is not None:\n correct_node_name = asset_name\n correct_node_name = correct_node_name.replace(\" \", \"_\")\n if correct_node_name[0].isdigit():\n correct_node_name = \"_%s\" % correct_node_name\n if correct_node_name[-1].isdigit():\n correct_node_name += \"_grp\"\n else:\n correct_node_name = root_node_name\n if correct_node_name[0].isdigit():\n correct_node_name = \"_%s\" % correct_node_name\n if correct_node_name[-1].isdigit():\n correct_node_name += \"_grp\"\n\n root_nodes[0].rename(correct_node_name)", "def test_create_anonymous_classical_registers(self):\n cr1 = ClassicalRegister(size=3)\n cr2 = ClassicalRegister(size=3)\n self.assertNotEqual(cr1.name, cr2.name)", "def test_node_exists():\n assert Node", "def test_node_exists():\n assert Node", "def __setitem__(self, nodename, node):\n\n for hash_ in self._repl_iterator(nodename):\n if hash_ in self._nodes:\n raise ValueError(\"Node name %r is \"\n \"already present\" % nodename)\n self._nodes[hash_] = node\n bisect.insort(self._keys, hash_)", "def __setitem__(self, nodename, node):\n\n for hash_ in self._repl_iterator(nodename):\n if hash_ in self._nodes:\n raise ValueError(\"Node name %r is \"\n \"already present\" % nodename)\n self._nodes[hash_] = node\n bisect.insort(self._keys, hash_)" ]
[ "0.77317005", "0.7141291", "0.70312846", "0.6989006", "0.65257645", "0.65124905", "0.6434736", "0.64302534", "0.6336855", "0.6324279", "0.6158565", "0.6136917", "0.6109544", "0.60242057", "0.5961925", "0.5955324", "0.5897734", "0.58906686", "0.58848387", "0.5879582", "0.58682114", "0.5862924", "0.57981235", "0.57898164", "0.57813835", "0.5780852", "0.57747984", "0.57273054", "0.57234496", "0.57217705", "0.57202154", "0.5719075", "0.5719075", "0.57142466", "0.5704289", "0.56947064", "0.56930333", "0.5685541", "0.56804734", "0.56800306", "0.5677887", "0.5671903", "0.5671701", "0.5668507", "0.56460065", "0.56400126", "0.56281", "0.562806", "0.5625475", "0.56253225", "0.56049573", "0.5603679", "0.5598733", "0.5575745", "0.5570731", "0.5560971", "0.5560971", "0.5555528", "0.5555528", "0.5550396", "0.55382055", "0.55382055", "0.55208033", "0.5509863", "0.54936814", "0.5489508", "0.54815716", "0.5481337", "0.5480385", "0.5468004", "0.546531", "0.54652923", "0.54614556", "0.54598886", "0.5458542", "0.545789", "0.5440427", "0.54295874", "0.54273736", "0.5417524", "0.5415243", "0.5414287", "0.541106", "0.54040545", "0.5403094", "0.53946114", "0.5390756", "0.5378286", "0.5376753", "0.5375569", "0.5372862", "0.53715855", "0.5370676", "0.5364389", "0.5362533", "0.5359298", "0.53569484", "0.53569484", "0.5355685", "0.5355685" ]
0.61442566
11
Test that node names are uniquely set.
def test_non_leaf_module_names(self): class Net(torch.nn.Module): """ Model using multiply as functional and module at different depths """ def __init__(self): super().__init__() self.layer = HierarchicalMultiplyModule() def forward(self, x): return self.layer(x) model = Net() dummy_input = torch.randn(10, 1, 3) onnx_path = './data/MyModel.onnx' torch.onnx.export(model, dummy_input, onnx_path) onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input) onnx_model = onnx.load(onnx_path) onnx.checker.check_model(onnx_model) self.check_onnx_node_name_uniqueness(onnx_model) expected_names = [ # names compatible with torch 1.9.1 version (should be removed in the future) 'layer.mul1.mul', 'layer.mul1.Mul_7', 'layer.mul2.mul', 'layer.mul2.Mul_15', 'layer.Mul_18', # names compatible with torch 1.13.1 version '/layer/mul1/Mul', '/layer/mul2/Mul', '/layer/Mul' ] for node in onnx_model.graph.node: assert 'Constant' in node.name or node.name in expected_names if os.path.exists(onnx_path): os.remove(onnx_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_onnx_node_name_uniqueness(onnx_model):\n onnx_node_names = [node.name for node in onnx_model.graph.node]\n assert len(onnx_node_names) == len(set(onnx_node_names)), f'list size mismatch, check if names are unique'", "def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return len({node.get_name() for node in self.get_nodeset()}) != len(self.get_nodeset())", "def test_unique_atom_names(self, molecule):\n # The dataset we load in has atom names, so let's strip them first\n # to ensure that we can fail the uniqueness check\n for atom in molecule.atoms:\n atom.name = \"\"\n assert not (molecule.has_unique_atom_names)\n # Then genreate unique atom names using the built in algorithm\n molecule.generate_unique_atom_names()\n # Check that the molecule has unique atom names\n assert molecule.has_unique_atom_names\n # Check molecule.has_unique_atom_names is working correctly\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n molecule.atoms[1].name = molecule.atoms[0].name # no longer unique\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names", "def test_unique_atom_names(self, molecule):\n # The dataset we load in has atom names, so let's strip them first\n # to ensure that we can fail the uniqueness check\n for atom in molecule.atoms:\n atom.name = \"\"\n assert not (molecule.has_unique_atom_names)\n # Then genreate unique atom names using the built in algorithm\n molecule.generate_unique_atom_names()\n # Check that the molecule has unique atom names\n assert molecule.has_unique_atom_names\n # Check molecule.has_unique_atom_names is working correctly\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n molecule.atoms[1].name = molecule.atoms[0].name # no longer unique\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n assert all(\"x\" in a.name for a in molecule.atoms)", "def checkNamesUniqueness(names):\n topNames, deeperNames = getLevelNames(names)\n## print topNames\n## print deeperNames\n for name in topNames[:-1]:\n if topNames.count(name) > 1:\n raise ValueError(\"\"\"\\Names at every level must be unique!\"\"\")\n if deeperNames:\n checkNamesUniqueness(deeperNames)", "def _ValidateUniqueNames(pools):\n used_names = set()\n for pool in pools:\n name = pool.nodePool\n if name in used_names:\n raise exceptions.InvalidArgumentException(\n '--pools', 'Pool name \"%s\" used more than once.' % name)\n used_names.add(name)", "def test_consistent_ids(self) -> None:\n bnode = BNode()\n g0_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Golan Trevize\")),\n (bnode, RDF.type, FOAF.Person),\n }\n bnode = BNode()\n g1_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Janov Pelorat\")),\n (bnode, RDF.type, FOAF.Person),\n }\n\n g0 = Graph()\n g0 += g0_ts\n cg0 = to_canonical_graph(g0)\n cg0_ts = GraphHelper.triple_set(cg0)\n\n g1 = Graph()\n g1 += g1_ts\n cg1 = to_canonical_graph(g1)\n cg1_ts = GraphHelper.triple_set(cg1)\n\n assert cg0_ts.issubset(\n cg1_ts\n ), \"canonical triple set cg0_ts should be a subset of canonical triple set cg1_ts\"", "def test_node_name(self):\n xmlns = {\n \"a\": \"_a\",\n \"g\": \"_g\"\n }\n self.assertEqual(\n utils._node_name(\"a\", \"g\", xmlns),\n (False, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"a@a\", \"g\", xmlns),\n (False, \"a\", \"{_a}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@@a\", \"g\", xmlns),\n (True, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@a@a\", \"g\", xmlns),\n (True, \"a\", \"{_a}a\")\n )\n # something not equal to _\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"1@a@a\", \"g\", xmlns),\n # too many @\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"@@a@a\", \"g\", xmlns),", "def test_duplicate_name_refs(renderer):\n assert renderer.name_ref(User.age) == renderer.name_ref(User.age) == \"#n0\"", "def verify_unique_names(items):\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException(\"Error: Duplicate sequence names found.\", ErrorType.INVALID_SEQUENCE_DATA)", "def check_unique(self):\n pass", "def test_set_unique_node_names(self):\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def check_name_duplication(self, other):\n self_names = set(\n [node.get(\"name\") for node in self.root.findall(\"./*[@name]\")])\n other_names = set(\n [node.get(\"name\") for node in other.root.findall(\"./*[@name]\")])\n if len(set.intersection(self_names, other_names)):\n raise NameDuplicationError()", "def make_unique_node(graph, name):\n if name not in graph:\n return name\n ctr = 1\n while True:\n name_ = name + '_' * ctr\n if name_ not in graph:\n return name_\n ctr += 1", "def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True", "def test_unique(self):\n env = pike.Environment()\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n with self.assertRaises(KeyError):\n env.add(graph)", "def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )", "def _uniqueness_check(self, cls, unique_in = None, **attr):\n # under the same datasource, only 1 subsystem, 1 neuropil, 1 tract of the name can exist\n # under the same neuropil, only 1 neuron of the name can exist\n # multiple (collections of) synapses can exist between two neurons\n if cls == 'Species':\n tmp = self.sql_query(\n \"\"\"select from Species where (name = \"{name}\" or \"{name}\" in synonyms) and stage = \"{stage}\" and sex = \"{sex}\" \"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Species {name} at {stage} stage ({sex}) already exists with rid = {rid}\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Species {name} (as its synonym) at {stage} stage ({sex}) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = obj._id, formalname = obj.name))\n elif cls == 'DataSource':\n objs = self.find_objs('DataSource', name=attr['name'], version=attr['version'])\n #if self.exists(cls, name = attr['name'], version = attr['version']):\n if len(objs):\n raise NodeAlreadyExistError(\"\"\"{} Node with attributes {} already exists with rid = {}\"\"\".format(\n cls, ', '.join([\"\"\"{} = {}\"\"\".format(key, value) \\\n for key, value in attr.items()]), objs[0]._id))\n elif cls == 'Neurotransmitter':\n tmp = self.sql_query(\n \"\"\"select from Neurotransmitter where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n name = attr['name']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Neurotransmitter {name} already exists with rid = {rid}\"\"\".format(\n name = attr['name'], rid = objs[0]._id))\n return objs\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Neurotransmitter {name} (as its synonym) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], rid = obj._id, formalname = obj.name))\n elif cls in ['Subsystem', 'Neuropil', 'Subregion', 'Tract']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'], formalname = obj.name,\n ds = unique_in.name,\n version = unique_in.version, rid = obj._id))\n # Alternatively, try:\n # tmp = self.sql_query(\n # \"\"\"select from {cls} where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n # cls = cls, name = attr['name']))\n # ds = tmp.owned_by(cls = 'DataSource').has(rid = datasource)\n # if len(ds):\n # tmp1 = tmp.has(name = attr['name'])\n # if len(tmp1.owned_by(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = datasource.name,\n # version = datasource.version))\n # else:\n # all_synonym_objs = (tmp - tmp1).node_objs\n # for obj in objs:\n # if len(QueryWrapper.from_rids(obj._id).has(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = datasource.name,\n # version = datasource.version))\n\n # Alternatively 2, try: (will be slow when it has a lot of Owns edges)\n # tmp = sql_query(\n # \"\"\"\n # select from (select expand(out('Owns')[@class = \"{cls}\"]) from {rid}) where name = \"{name}\" or \"{name}\" in synonyms\n # \"\"\"\n # )\n # elif cls in ['Subregion']:\n # if not isinstance(unique_in, models.Neuropil):\n # raise TypeError('To check the uniqueness of a {} instance, unique_in must be a Neuropil object'.format(cls))\n # tmp = self.sql_query(\n # \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='ucls' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n # rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n # if len(tmp):\n # objs = tmp.node_objs\n # if attr['name'] in [obj.name for obj in objs]:\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = unique_in.name))\n # else:\n # for obj in objs:\n # if name in obj.synonyms:\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = unique_in.name))\n elif cls in ['Neuron', 'NeuronFragment']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where uname = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists with rid = {rid}, under DataSource {ds} version {version}\"\"\".format(\n cls = cls, name = attr['name'], rid = objs[0]._id,\n ds = unique_in.name,\n version = unique_in.version))\n elif cls == 'Circuit':\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n elif cls == 'ArborizationData':\n if not isinstance(unique_in, (models.Neuron, models.Synapse)):\n raise TypeError('To check the uniqueness of a ArborizationData instance, unique_in must be a Neuron or a Synapse object')\n tmp = self.sql_query(\n \"\"\"select from (select expand(out(HasData)) from {rid}) where @class = 'ArborizationData' \"\"\".format(rid = unique_in._id))\n if len(tmp):\n raise NodeAlreadyExistError(\"\"\"ArborizationData already exists for {node} {uname} with rid = {rid}. Use NeuroArch.update_{node}_arborization to update the record\"\"\".format(\n node = unique_in.element_type.lower(), rid = tmp.node_objs[0]._id, uname = unique_in.uname))\n else:\n raise TypeError('Model type not understood.')\n return True", "def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s", "def _match_identical_nodes(self):\n\n for job_name_b in self._topo_b_nodes:\n for job_name_a in self._unresolved_a_nodes:\n if self._is_node_identical(job_name_a, job_name_b):\n self._identical_nodes[job_name_b] = job_name_a\n self._unresolved_a_nodes.remove(job_name_a)\n self._unresolved_b_nodes.remove(job_name_b)\n break", "def add_node(self, name):\n if not name in self._main_dictionary:\n self._main_dictionary[name] = set()", "def is_unique_n_set(string: str) -> bool:\n\n return len(set(string)) == len(string)", "def has_node(self, u: Hashable) -> bool:\n return u in self._names", "def test_get_num_unique_name(self):\n\n list1 = self.test_num_unique_name\n list2 = get_num_unique_name(self.test_sorted_tuple, self.test_dict)\n self.assertEqual(list1, list2)", "def test_inconsistent_name(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'givenName': ['Maarten'],\n 'sn': ['Visscher'],\n 'cn': ['Wessel']}\n }\n with self.assertRaises(CloneError):\n clone(entries)", "def test_unique_id_1():\n id1 = tasks.unique_id()\n id2 = tasks.unique_id()\n assert id1 != id2", "def test_5_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(5):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def testNames(self):\n self.assertEquals(\n set(['BLUE', 'GREEN', 'INDIGO', 'ORANGE', 'RED',\n 'VIOLET', 'YELLOW']),\n set(Color.names()))", "def test_unique_id_2():\n ids = []\n ids.append(tasks.add(Task('one')))\n ids.append(tasks.add(Task('two')))\n ids.append(tasks.add(Task('three')))\n\n uid = tasks.unique_id()\n assert uid not in ids", "def test_adding_same_node(self):\n cons_hash = ConsistentHash(2)\n cons_hash.add('192.168.1.1') \n\n threw_value_error = False\n try:\n cons_hash.add('192.168.1.1')\n except exceptions.ValueError:\n threw_value_error = True\n self.assertTrue(threw_value_error) \n \n self.assertTrue(cons_hash._is_consistent())", "def testIdUnique(self):\n ids = {}\n # Vary parameters that affect the work or input data,\n # verify each id is unique\n for params in itertools.product(RequestNames, TaskNames, Inputs,\n Masks, Dbses, Acdcs):\n ele = WorkQueueElement(RequestName=params[0], TaskName=params[1],\n Inputs=params[2], Mask=params[3],\n Dbs=params[4], ACDC=params[5]\n )\n self.assertFalse(ele.id in ids)\n ids[ele.id] = None", "def test_2_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(2):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def getSet(unique_name):", "def getSet(unique_name):", "def test_6_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(6):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def check_name_uniqueness(cls, user_id, name):\n data_with_same_name = Data.objects.only('id').filter(user_id=user_id, name = name)\n return len(data_with_same_name) == 0", "def is_unique(x):\n return len(set(x)) == len(x)", "def test_uniqueness(self):\n passwords = tuple(generate_password(8) for i in range(100))\n self.assertEqual(len(passwords), len(set(passwords)))", "def test_equality(self):\n\n for name in TEST_NAMES:\n self.colorspace.setEqualityGroup(name)\n self.assertEqual(name, self.colorspace.getEqualityGroup())", "def nodes_row_similar(all_rows: set[tuple[str, str, str, int, int, int]],\n identifier: tuple[str, str, str, int, int, int]) -> bool:\n for row in all_rows:\n if row[0] == identifier[0] and row[1] == identifier[1]:\n return True\n return False", "def test_unicode_names(self):\n self.assertArrayEqual(self.dset['a'], self.data['a'])\n self.dset['a'] = 42\n data = self.data.copy()\n data['a'] = 42\n self.assertArrayEqual(self.dset['a'], data['a'])", "def test_7_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(7):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def get_node_names(self):\n return set({node.get_name() for node in self.get_nodeset()}) # return the set of names", "def __unpack_node_names(node_names):\n return \\\n set({\n unpacked_node_name\n for node_name in node_names\n for unpacked_node_name in node_name.split(StoerWagner.MERGED_NODE_NAME_DELIMITER)\n }) # create and return the set of unpacked node names", "def test_4_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(4):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def test_good_node():\n node_a = Node({'A':['B','C']})\n assert node_a.name == 'A'\n assert node_a.connections == ['B','C']", "def test_9_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(9):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def check_dataset_duplicate_ids(self, dataset):\n ids = [a['_id'] for a in dataset]\n # Construct list of duplicates\n dupe_ids = [a for n, a in enumerate(ids) \n if a in ids[:n]]\n if len(dupe_ids) > 0:\n # Get list of names for the duplicate pandas\n dupe_names = [a['en.name'] for a in dataset \n if a['_id'] in dupe_ids]\n raise IdError(\"ERROR: duplicate ids for en.names: %s\" \n % str(dupe_names))", "def is_unique(w):\n chars = {}\n for c in w:\n if c in chars:\n return False\n chars[c] = True\n return True", "def has_duplicates(tree):\n taxa = [tip.name for tip in tree.tips()]\n if '' in taxa or None in taxa:\n raise ValueError('Empty taxon name(s) found.')\n return len(set(taxa)) < len(taxa)", "def test_3_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(3):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))", "def _repair_names_check_unique(names: Iterable[str]) -> Iterable[str]:\n for name in names:\n if names.count(name) > 1:\n raise NameNonUniqueError(f\"Names must be unique: {name}\")\n if name == \"\" or name is numpy.nan:\n raise NameNonUniqueError(f\"Names can't be empty: {name}\")\n if re.search(r\"(?:(?<!_)_{2}\\d+|(?<!_)__)+$\", str(name)):\n raise NameNonUniqueError(\n f\"Names can't be of the form `__` or `_j`: {name}\"\n )\n return names", "def test_8_node_positional_itegrity(self):\n ring = ConsistentHashRing([])\n for n in range(8):\n ring.add_node((\"192.168.10.%s\" % str(10+n),\"%s\" % str(10+n)))\n self.assertEqual(\n len([n[0] for n in ring.ring]),\n len(set([n[0] for n in ring.ring])))", "def test_duplicate_entries(self):", "def _raise_if_duplicates(counts: Dict[str, int]) -> None:\n duplicates: List[str] = []\n for nickname, count in counts.items():\n if count > 1:\n duplicates.append(nickname)\n if len(duplicates) > 0:\n # TODO This is not always nickname\n raise ValueError(f'\\'nickname\\' not unique {duplicates}')", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def test_name(self):\n molecule1 = Molecule()\n molecule1.name = None\n\n molecule2 = Molecule()\n molecule2.name = \"\"\n assert molecule1.name == molecule2.name\n\n name = \"benzene\"\n molecule = Molecule()\n molecule.name = name\n assert molecule.name == name", "def test_name(self):\n molecule1 = Molecule()\n molecule1.name = None\n\n molecule2 = Molecule()\n molecule2.name = \"\"\n assert molecule1.name == molecule2.name\n\n name = \"benzene\"\n molecule = Molecule()\n molecule.name = name\n assert molecule.name == name", "def test_name(self):\n\n for name in TEST_NAMES:\n self.colorspace.setName(name)\n self.assertEqual(name, self.colorspace.getName())", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def check_duplicate_class_names(class_names):\n duplicates = get_duplicates(class_names)\n if duplicates:\n logger.error(f'Only globally unique class names are allowed. Found duplicates {duplicates}')\n raise SystemExit(0)", "def test_tree_intersection_name_exists():\n assert tree_intersection", "def test_ids(self):\n state1 = State()\n state2 = State()\n state3 = State()\n self.assertFalse(state1.id == state2.id)\n self.assertFalse(state1.id == state3.id)\n self.assertFalse(state2.id == state3.id)", "def test_random_id(self):\r\n ids = \\\r\n \"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969\".split(\r\n )\r\n assert random_id(ids, {}) in ids\r\n # just test we got something from the list, don't add stochastic test\r", "def check_name(self, node):\n assert \"name\" in node, \"Package node does not contain attribute 'node'\"\n assert len(node[\"name\"]) >= 1, \"Expecting at least one 'name' value\"\n # TODO: add more thorough checks", "def name_is_unique(self, name):\n unique = True\n for client in self.clients:\n unique = unique and (False if name == client.get_name() else True)\n return unique", "def test_getting_keys(self): \n cons_hash = ConsistentHash(2) \n \n nodes = ['192.168.1.1:20000',\n '192.168.1.1:20001',\n '192.168.1.1:20002',\n '192.168.1.1:20003'] \n\n for node in nodes:\n cons_hash.add(node)\n \n self.assertEquals(len(cons_hash), 8)\n node_counts = defaultdict(int)\n for i in xrange(0,100):\n key = str(uuid.uuid4())\n node = cons_hash.get_node(key)\n \n self.assertTrue(node in nodes)\n node_counts[node] += 1\n\n self.assertTrue(cons_hash._is_consistent())", "def _assert_thread_names(self):\n threads = [t for t in threading.enumerate()]\n thread_names = [t.name for t in threads]\n\n thread_names_set = set(thread_names)\n expected_names = {'MainThread', 'SQLiteExecutor',\n 'OutputManager', 'QueueFeederThread'}\n\n self.assertEqual(thread_names_set, expected_names)", "def test_instance_naming_creation(os_info):\n NEUTRON.list_security_groups = mock.MagicMock(\n return_value=iter([{\"security_groups\": []}]))\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n\n instance_names = os_info.nodes_names\n for i in range(len(instance_names)):\n assert instance_names[i] == 'test-node-{}'.format(i + 1)", "def test_install_set_multi(self):\n expected = copy.deepcopy(test_xdata)\n for thing in expected.xpath(\"Children[@identical='true']/Thing\"):\n thing.text = \"same\"\n self._install(\n [lxml.etree.Element(\n \"SetMulti\", value=\"same\",\n base='Test/Children[#attribute/identical = \"true\"]',\n sub=\"Thing/#text\")],\n expected)", "def make_unique(self, unnamed_prefix: str = '') -> 'Entity':\n orig_name = self['targetname']\n if orig_name:\n self['targetname'] = '' # Remove ourselves from the .by_target[] set.\n else:\n orig_name = unnamed_prefix\n\n base_name = orig_name.rstrip('0123456789')\n\n if self.map.by_target[base_name]:\n # Check every index in order.\n i = 1\n while True:\n name = base_name + str(i)\n if not self.map.by_target[name]:\n self['targetname'] = name\n break\n i += 1\n else:\n # The base name is free!\n self['targetname'] = base_name\n\n return self", "def test_unique_keys(self):\n registry = ClassRegistry(attr_name='element', unique=True)\n\n # We can register any class like normal...\n # noinspection PyUnusedLocal\n registry.register(Charmander)\n\n # ... but if we try to register a second class with the same\n # key, we get an error.\n with self.assertRaises(RegistryKeyError):\n registry.register(Charmeleon)", "def test_should_raise_error_for_duplicate_names(self):\r\n self.edge_spec['label'] = 'updated_at'\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement(self.property_spec)\r\n self.spec_parser.parse_statement(self.edge_spec)", "def test_duplicate_named_input_edge(self):\n with self.assertRaises(ValidationError):\n with Graph('g'):\n n1, n2 = Node('a'), Node('b')\n n1 | 'bar' * n2\n n1 * 'foo' | 'bar' * n2", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars_no_set(\"ab\"))\n self.assertTrue(all_unique_chars_no_set(\"ba\"))\n self.assertTrue(all_unique_chars_no_set(\"make\"))\n self.assertTrue(all_unique_chars_no_set(\"thorn\"))\n self.assertTrue(all_unique_chars_no_set(\"malibu\"))\n self.assertTrue(all_unique_chars_no_set(string.ascii_letters))", "def make_unique(name, reserved_names):\n while name in reserved_names:\n name += '_'\n\n return name", "def test_if_nodes_are_in_same_chunk(self, node_ids: Sequence[np.uint64]\n ) -> bool:\n assert len(node_ids) == 2\n return self.get_chunk_id(node_id=node_ids[0]) == \\\n self.get_chunk_id(node_id=node_ids[1])", "def test_duplicate_local_name(self):\n # Setup test\n infilename = os.path.join(_SAMPLE_FILES_DIR, \"reg_good_simple.xml\")\n filename = os.path.join(_TMP_DIR, \"reg_duplicate_local_name.xml\")\n out_source_name = \"physics_types_duplicate_local_name\"\n out_source = os.path.join(_TMP_DIR, out_source_name + '.F90')\n out_meta = os.path.join(_TMP_DIR, out_source_name + '.meta')\n remove_files([out_source, out_meta])\n tree, root = read_xml_file(infilename)\n # Change output filename\n for obj in root:\n oname = obj.get('name')\n if (obj.tag == 'file') and (oname == 'physics_types_simple'):\n obj.set('name', out_source_name)\n new_var = ET.SubElement(obj, \"variable\")\n new_var.set(\"local_name\", \"latitude\")\n new_var.set(\"standard_name\", \"east_wind\")\n new_var.set(\"units\", \"radians\")\n new_var.set(\"type\", \"real\")\n new_var.set(\"kind\", \"kind_phys\")\n dims_elem = ET.SubElement(new_var, \"dimensions\")\n dims_elem.text = 'horizontal_dimension'\n break\n # End if\n # End for\n tree.write(filename)\n # Run test\n with self.assertRaises(ValueError) as verr:\n _ = gen_registry(filename, 'eul', {}, _TMP_DIR, 2,\n _SRC_MOD_DIR, _CAM_ROOT,\n loglevel=logging.ERROR,\n error_on_no_validate=True)\n # End with\n # Check exception message\n emsg = \"duplicate variable local_name, 'latitude', in \"\n emsg += \"physics_types_duplicate_local_name, already defined \"\n emsg += \"with standard_name, 'latitude'\"\n self.assertEqual(emsg, str(verr.exception))\n # Make sure no output files were created\n self.assertFalse(os.path.exists(out_meta))\n self.assertFalse(os.path.exists(out_source))", "def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()", "def test_nodes_exist(graph_no_edges):\n for node in graph_no_edges:\n assert graph_no_edges.has_node(node)", "def get_naked_names(graph: BELGraph) -> Set[str]:\n return set(_naked_names_iter(graph))", "def check_unique_names_for_geometry(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n non_unique_names = []\n\n geo_shapes = pm.ls(type=\"mesh\")\n progress_controller.maximum = len(geo_shapes)\n\n for shape in geo_shapes:\n if (\n shape.getParent().isUniquelyNamed() is False\n and shape.getParent().isReferenced() is False\n ):\n non_unique_names.append(shape.getParent())\n progress_controller.increment()\n\n if non_unique_names:\n mc.select(non_unique_names)\n raise PublishError(\n \"Some geometry objects are not <b>Uniquely Named</b><br><br>\"\n \"%s<br><br>Please rename them.\"\n )\n progress_controller.complete()", "def testSynonymDuplicate(self):\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\t\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\t\n\t\t\t\tsyn2 = spinner.Synonym.objects.get_single(two, one, True)\n\n\t\t\t\tassert syn == syn2\n\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()", "def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True", "def test_is_an_element_name():\n for el in roentgen.elements['name']:\n assert(is_an_element(el))", "def all_unique_set(string):\n return len(string) == len(set(string))", "def test_unique_id(self):\n session1 = _create_test_session()\n session2 = _create_test_session()\n self.assertNotEqual(session1.id, session2.id)", "def is_unique_string(s):\n return len(s) == len(set(s))", "def _check_unique_insesitive(self, cr, uid, ids, context=None):\n for category in self.browse(cr, uid, ids, context=context):\n if len(self.search(cr, uid, [('name','=ilike',category.name)], context=context)) > 1:\n raise osv.except_osv(_('Constraint Error'), _(\"The Name Must Be Unique!\"))\n return True", "def test_synonym(self): \n pass", "def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)", "def test_id(self):\n node = Node()\n node.id = \"1234\"\n self.assertEqual(node.getId(), node.id)", "def check_root_node_name___fix():\n from stalker import Asset\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n t = v.task\n asset_name = None\n if isinstance(t.parent, Asset):\n asset_name = t.parent.name\n\n root_nodes = auxiliary.get_root_nodes()\n root_node_name = root_nodes[0].name()\n\n if asset_name is not None:\n correct_node_name = asset_name\n correct_node_name = correct_node_name.replace(\" \", \"_\")\n if correct_node_name[0].isdigit():\n correct_node_name = \"_%s\" % correct_node_name\n if correct_node_name[-1].isdigit():\n correct_node_name += \"_grp\"\n else:\n correct_node_name = root_node_name\n if correct_node_name[0].isdigit():\n correct_node_name = \"_%s\" % correct_node_name\n if correct_node_name[-1].isdigit():\n correct_node_name += \"_grp\"\n\n root_nodes[0].rename(correct_node_name)", "def test_create_anonymous_classical_registers(self):\n cr1 = ClassicalRegister(size=3)\n cr2 = ClassicalRegister(size=3)\n self.assertNotEqual(cr1.name, cr2.name)", "def test_node_exists():\n assert Node", "def test_node_exists():\n assert Node", "def __setitem__(self, nodename, node):\n\n for hash_ in self._repl_iterator(nodename):\n if hash_ in self._nodes:\n raise ValueError(\"Node name %r is \"\n \"already present\" % nodename)\n self._nodes[hash_] = node\n bisect.insort(self._keys, hash_)", "def __setitem__(self, nodename, node):\n\n for hash_ in self._repl_iterator(nodename):\n if hash_ in self._nodes:\n raise ValueError(\"Node name %r is \"\n \"already present\" % nodename)\n self._nodes[hash_] = node\n bisect.insort(self._keys, hash_)" ]
[ "0.77317005", "0.7141291", "0.70312846", "0.6989006", "0.65257645", "0.65124905", "0.6434736", "0.64302534", "0.6336855", "0.6324279", "0.6158565", "0.61442566", "0.6136917", "0.6109544", "0.60242057", "0.5961925", "0.5955324", "0.5897734", "0.58906686", "0.58848387", "0.5879582", "0.58682114", "0.5862924", "0.57981235", "0.57898164", "0.57813835", "0.5780852", "0.57747984", "0.57273054", "0.57234496", "0.57217705", "0.57202154", "0.5719075", "0.5719075", "0.57142466", "0.5704289", "0.56947064", "0.56930333", "0.5685541", "0.56804734", "0.56800306", "0.5677887", "0.5671903", "0.5671701", "0.5668507", "0.56460065", "0.56400126", "0.56281", "0.562806", "0.5625475", "0.56253225", "0.56049573", "0.5603679", "0.5598733", "0.5575745", "0.5570731", "0.5560971", "0.5560971", "0.5555528", "0.5555528", "0.5550396", "0.55382055", "0.55382055", "0.55208033", "0.5509863", "0.54936814", "0.5489508", "0.54815716", "0.5481337", "0.5480385", "0.5468004", "0.546531", "0.54652923", "0.54614556", "0.54598886", "0.5458542", "0.545789", "0.5440427", "0.54295874", "0.54273736", "0.5417524", "0.5415243", "0.5414287", "0.541106", "0.54040545", "0.5403094", "0.53946114", "0.5390756", "0.5378286", "0.5376753", "0.5375569", "0.5372862", "0.53715855", "0.5370676", "0.5364389", "0.5362533", "0.5359298", "0.53569484", "0.53569484", "0.5355685", "0.5355685" ]
0.0
-1
Test that adversial case when the first input is feed to last node in onnx subgraph
def test_model_with_input_last_onnx_node(self): roi_model = RoiModel(height=7, width=7, scale=0.25) x = torch.rand(1, 1, 6, 6) rois = torch.tensor([ [0, -2.0, -2.0, 22.0, 22.0], ]) dummy_input = (x, rois) onnx_utils.OnnxSaver.set_node_names('./data/roi.onnx', roi_model, dummy_input, is_conditional=False, module_marker_map={}, onnx_export_args=(onnx_utils.OnnxExportApiArgs(opset_version=11)) ) onnx_model = onnx.load('./data/roi.onnx') end_nodes = [ n.name for n in onnx_model.graph.node if 'end' in n.name] assert len(end_nodes) == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_degree_relative_to_subgraph(self, dim):\r\n g = nx.disjoint_union(nx.complete_graph(dim), nx.complete_graph(dim + 1))\r\n g.add_edge(dim, dim - 1)\r\n subgraph = list(range(dim + 1))\r\n assert clique.shrink(subgraph, g) == list(range(dim))", "def are_pad_on_graph(self, subgraph) -> bool:\n self.visit(subgraph)\n return self.on_graph", "def test_add_02():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_tree_splay() -> None:\n t = generate_graph_resources(5)\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_2\", \"ds_2\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_3\", \"ds_3\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_4\", \"ds_4\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_5\", \"ds_5\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).identity = \"email\"\n traversal = Traversal(DatasetGraph(*t), {\"email\": \"X\"})\n\n assert incoming_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(\n FieldAddress(\"__ROOT__\", \"__ROOT__\", \"email\"),\n FieldAddress(\"dr_1\", \"ds_1\", \"f1\"),\n )\n }\n assert outgoing_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_3\", \"ds_3\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_4\", \"ds_4\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_5\", \"ds_5\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\")),\n }\n\n assert outgoing_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == set()\n assert incoming_edges(traversal, CollectionAddress(\"dr_2\", \"ds_2\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_3\", \"ds_3\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_3\", \"ds_3\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_4\", \"ds_4\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_4\", \"ds_4\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_5\", \"ds_5\", \"f1\"))\n }\n traversal_map, terminators = traversal.traversal_map()\n assert traversal_map == {\n \"__ROOT__:__ROOT__\": {\"from\": {}, \"to\": {\"dr_1:ds_1\": {\"email -> f1\"}}},\n \"dr_1:ds_1\": {\n \"from\": {\"__ROOT__:__ROOT__\": {\"email -> f1\"}},\n \"to\": {\n \"dr_2:ds_2\": {\"f1 -> f1\"},\n \"dr_3:ds_3\": {\"f1 -> f1\"},\n \"dr_4:ds_4\": {\"f1 -> f1\"},\n \"dr_5:ds_5\": {\"f1 -> f1\"},\n },\n },\n \"dr_2:ds_2\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_3:ds_3\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_4:ds_4\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_5:ds_5\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n }\n\n assert set(terminators) == {\n CollectionAddress(\"dr_2\", \"ds_2\"),\n CollectionAddress(\"dr_3\", \"ds_3\"),\n CollectionAddress(\"dr_4\", \"ds_4\"),\n CollectionAddress(\"dr_5\", \"ds_5\"),\n }", "def are_concatenate_on_graph(self, subgraph) -> bool:\n self.visit(subgraph)\n return self.on_graph", "def test_tree_linear() -> None:\n t = generate_graph_resources(5)\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_2\", \"ds_2\", \"f1\"), None)\n )\n field(t, (\"dr_2\", \"ds_2\", \"f1\")).references.append(\n (FieldAddress(\"dr_3\", \"ds_3\", \"f1\"), None)\n )\n field(t, (\"dr_3\", \"ds_3\", \"f1\")).references.append(\n (FieldAddress(\"dr_4\", \"ds_4\", \"f1\"), None)\n )\n field(t, (\"dr_4\", \"ds_4\", \"f1\")).references.append(\n (FieldAddress(\"dr_5\", \"ds_5\", \"f1\"), None)\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).identity = \"email\"\n traversal = Traversal(DatasetGraph(*t), {\"email\": \"X\"})\n\n assert set(incoming_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\"))) == {\n Edge(\n FieldAddress(\"__ROOT__\", \"__ROOT__\", \"email\"),\n FieldAddress(\"dr_1\", \"ds_1\", \"f1\"),\n )\n }\n assert outgoing_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\"))\n }\n assert outgoing_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == set()\n assert set(incoming_edges(traversal, CollectionAddress(\"dr_2\", \"ds_2\"))) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\"))\n }\n traversal_map, terminators = traversal.traversal_map()\n assert traversal_map == {\n \"__ROOT__:__ROOT__\": {\"from\": {}, \"to\": {\"dr_1:ds_1\": {\"email -> f1\"}}},\n \"dr_1:ds_1\": {\n \"from\": {\"__ROOT__:__ROOT__\": {\"email -> f1\"}},\n \"to\": {\"dr_2:ds_2\": {\"f1 -> f1\"}},\n },\n \"dr_2:ds_2\": {\n \"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}},\n \"to\": {\"dr_3:ds_3\": {\"f1 -> f1\"}},\n },\n \"dr_3:ds_3\": {\n \"from\": {\"dr_2:ds_2\": {\"f1 -> f1\"}},\n \"to\": {\"dr_4:ds_4\": {\"f1 -> f1\"}},\n },\n \"dr_4:ds_4\": {\n \"from\": {\"dr_3:ds_3\": {\"f1 -> f1\"}},\n \"to\": {\"dr_5:ds_5\": {\"f1 -> f1\"}},\n },\n \"dr_5:ds_5\": {\"from\": {\"dr_4:ds_4\": {\"f1 -> f1\"}}, \"to\": {}},\n }\n\n assert terminators == [CollectionAddress(\"dr_5\", \"ds_5\")]", "def sub_graph_merging(self):", "def test_add_00():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [\n info(\"A\", TensorProto.FLOAT, a_shape),\n info(\"B\", TensorProto.FLOAT, b_shape),\n ]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n b = np.random.rand(*b_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a, \"B\": b}, outputs).run()", "def test_add_01():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (1, 1, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_add_03():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (3, 4)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_adjacent_nodes(graph_with_edges):\n assert graph_with_edges.adjacent('A', 'B')", "def run_adding_edges(self):\n indices = np.where(self.X==0)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)", "def test_same_node_is_reachable(self):\n # G is an arbitrary tournament on ten nodes.\n G = DiGraph(sorted(p) for p in combinations(range(10), 2))\n assert_true(all(is_reachable(G, v, v) for v in G))", "def test_input_valid_subgraph(self, dim):\r\n with pytest.raises(ValueError, match=\"Input is not a valid subgraph\"):\r\n clique.swap([0, dim], nx.empty_graph(dim))", "def creates_cycle(connections, test):\n node_in, node_out = test\n\n if node_in == node_out:\n # Self-loop\n return True\n\n visited = {node_out}\n while True:\n num_added = 0\n for a, b in connections:\n if a in visited and b not in visited:\n if b == node_in:\n return True\n\n visited.add(b)\n num_added += 1\n\n if num_added == 0:\n return False", "def test_dont_inplace_output_consumers(op_tester):\n d0 = np.asarray([1.0, 1.0]).astype(np.float32)\n\n def get_init_builder():\n def init_builder(builder):\n i0 = builder.addInputTensor(d0)\n subgraph_builder = builder.createSubgraphBuilder()\n subgraph_builder.addInputTensorFromParentGraph(i0)\n i1 = subgraph_builder.aiGraphcore.scale([i0], 2.0, \"hoop1\")\n i2 = subgraph_builder.aiGraphcore.scale([i1], 2.0, \"hoop2\")\n i3 = subgraph_builder.aiGraphcore.scale([i2], 2.0, \"hoop3\")\n i4 = subgraph_builder.aiGraphcore.scale([i3], 2.0, \"hoop4\")\n i5 = subgraph_builder.aiGraphcore.scale([i4], 2.0, \"hoop5\")\n i6 = subgraph_builder.aiGraphcore.scale([i5], 2.0, \"hoop5\")\n subgraph_builder.addOutputTensor(i3) # 8\n subgraph_builder.addOutputTensor(i6) # 64\n outs = builder.aiGraphcore.call([i0], 2, subgraph_builder)\n summation = builder.aiOnnx.add([outs[0], outs[1]])\n builder.addOutputTensor(summation)\n return [summation]\n\n return init_builder\n\n def reference(_): # ref_data is an unused argument\n return [np.array([72.0, 72.0]).astype(np.float32)]\n\n op_tester.setPatterns([\"InPlace\"], enableRuntimeAsserts=False)\n op_tester.run(get_init_builder(), reference, \"infer\")", "def next_node_dfs(search_state, last_node_is_ok):\n log_T, initial_state, min_score, max_depth, maxtraversals, node, node_idx, it, order, score, sub_info = search_state\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n n_states = log_T.shape[0]\n if it == maxtraversals:\n assert False, \"Number of traversals exceeded\"\n while True:\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if last_node_is_ok and min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return [-1], score, search_state # end of the generator, can't increase even the root\n last_node_is_ok = True # We can now make progress again, regardless of whether we could at the beginning\n it += 1\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return list(node), score, search_state # the invocation to list here is to make a copy, don't remove!", "def test_circular_reference() -> None:\n\n t = generate_graph_resources(3)\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_2\", \"ds_2\", \"f1\"), \"to\")\n )\n field(t, (\"dr_2\", \"ds_2\", \"f2\")).references.append(\n (FieldAddress(\"dr_3\", \"ds_3\", \"f2\"), \"to\")\n )\n field(t, (\"dr_3\", \"ds_3\", \"f3\")).references.append(\n (FieldAddress(\"dr_1\", \"ds_1\", \"f3\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f2\")).identity = \"email\"\n traversal = Traversal(DatasetGraph(*t), {\"email\": \"X\"})\n traversal_map, terminators = traversal.traversal_map()\n assert traversal_map == {\n \"__ROOT__:__ROOT__\": {\"from\": {}, \"to\": {\"dr_1:ds_1\": {\"email -> f2\"}}},\n \"dr_1:ds_1\": {\n \"from\": {\"__ROOT__:__ROOT__\": {\"email -> f2\"}, \"dr_3:ds_3\": {\"f3 -> f3\"}},\n \"to\": {\"dr_2:ds_2\": {\"f1 -> f1\"}},\n },\n \"dr_2:ds_2\": {\n \"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}},\n \"to\": {\"dr_3:ds_3\": {\"f2 -> f2\"}},\n },\n \"dr_3:ds_3\": {\n \"from\": {\"dr_2:ds_2\": {\"f2 -> f2\"}},\n \"to\": {\"dr_1:ds_1\": {\"f3 -> f3\"}},\n },\n }\n\n assert terminators == [CollectionAddress(\"dr_1\", \"ds_1\")]\n # this visits (root -> 1,1 -> 2,2 -> 3,3 -> 1,1) and then terminates.\n assert generate_traversal_order(traversal) == {\n CollectionAddress(\"__ROOT__\", \"__ROOT__\"): [0],\n CollectionAddress(\"dr_1\", \"ds_1\"): [1, 4],\n CollectionAddress(\"dr_2\", \"ds_2\"): [2],\n CollectionAddress(\"dr_3\", \"ds_3\"): [3],\n }", "def test_adjacent_none(graph_with_edges):\n assert graph_with_edges.adjacent('B', 'A') is False", "def test_adjacency(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n handler = self.new_handler()\n state = handler.get_gamestate()\n ret = check_adjacency_list(handler.root, state['adjacency_list'])\n if ret:\n successes += 1\n else:\n failures += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly generate adjacency list! ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated adjacency list generation in {successes} trees.{BColors.ENDC}\")", "def test_walk_graph(self):\n def assertDependencyWalk(target, results, postorder=False):\n targets = []\n self.build_graph.walk_transitive_dependency_graph([target.address],\n lambda x: targets.append(x),\n postorder=postorder)\n self.assertEquals(results, targets)\n\n def assertDependeeWalk(target, results, postorder=False):\n targets = []\n self.build_graph.walk_transitive_dependee_graph([target.address],\n lambda x: targets.append(x),\n postorder=postorder)\n self.assertEquals(results, targets)\n\n a = self.make_target('a')\n b = self.make_target('b', dependencies=[a])\n c = self.make_target('c', dependencies=[b])\n d = self.make_target('d', dependencies=[c, a])\n e = self.make_target('e', dependencies=[d])\n\n assertDependencyWalk(a, [a])\n assertDependencyWalk(b, [b, a])\n assertDependencyWalk(c, [c, b, a])\n assertDependencyWalk(d, [d, c, b, a])\n assertDependencyWalk(e, [e, d, c, b, a])\n\n assertDependeeWalk(a, [a, b, c, d, e])\n assertDependeeWalk(b, [b, c, d, e])\n assertDependeeWalk(c, [c, d, e])\n assertDependeeWalk(d, [d, e])\n assertDependeeWalk(e, [e])\n\n assertDependencyWalk(a, [a], postorder=True)\n assertDependencyWalk(b, [a, b], postorder=True)\n assertDependencyWalk(c, [a, b, c], postorder=True)\n assertDependencyWalk(d, [a, b, c, d], postorder=True)\n assertDependencyWalk(e, [a, b, c, d, e], postorder=True)\n\n assertDependeeWalk(a, [e, d, c, b, a], postorder=True)\n assertDependeeWalk(b, [e, d, c, b], postorder=True)\n assertDependeeWalk(c, [e, d, c], postorder=True)\n assertDependeeWalk(d, [e, d], postorder=True)\n assertDependeeWalk(e, [e], postorder=True)\n\n #Try a case where postorder traversal is not identical to reversed preorder traversal\n c = self.make_target('c1', dependencies=[])\n d = self.make_target('d1', dependencies=[c])\n b = self.make_target('b1', dependencies=[c, d])\n e = self.make_target('e1', dependencies=[b])\n a = self.make_target('a1', dependencies=[b, e])\n\n assertDependencyWalk(a, [a, b, c, d, e])\n assertDependencyWalk(a, [c, d, b, e, a], postorder=True)", "def test_multi_source_explicit(self):\n with Graph('g') as graph:\n graph.source | Node('a') | graph.sink\n graph.source * 'out2' | Node('b') | 'in2' * graph.sink", "def backprop(node, result):\n while node:\n node.addOutcome(result)\n node = node.getParent()", "def test_local_useless_inc_subtensor_increment_zeros():\n y = matrix(\"y\")\n\n s = at.zeros((2, 2))[:, :]\n o_shape = inc_subtensor(s, specify_shape(y, s.shape))\n\n mode = get_default_mode().including(\"local_useless_inc_subtensor\")\n f_shape = function([y], o_shape, mode=mode)\n\n topo = f_shape.maker.fgraph.toposort()\n assert not any(isinstance(n.op, IncSubtensor) for n in topo)", "def test_cycle(self):\n g = Graph(3)\n g.add_edge(0, 1)\n g.add_edge(0, 2)\n # g.add_edge(0, 0)\n assert g.contains_cycle() is False\n g.add_edge(1, 2)\n assert g.contains_cycle() is True", "def test_case2(self):\n\n graph = BipartiteGraph()\n\n graph.addEdge(\"supervisor1\",\"student1\")\n graph.addEdge(\"supervisor2\",\"student4\")\n graph.addEdge(\"supervisor3\",\"student3\")\n\n val1 = graph.getSupervisorDegree(\"supervisor1\")\n\n graph.addEdge(\"supervisor1\",\"student2\")\n\n curr = graph.getSupervisorDegree(\"supervisor1\")\n val2 = graph.getSupervisors(\"student2\")\n expected2 = [\"supervisor1\"]\n\n self.assertEqual((curr-1,expected2),(val1,val2))", "def test_stable_ordering(self):\n with Graph('g') as graph:\n a = ParrotNode(['a'])\n p = a | pike.merge()\n b = ParrotNode(['b'])\n graph.source | b | p\n # Make sure that b runs before a\n if graph.nodes.index(b) > graph.nodes.index(a):\n graph.nodes.remove(b)\n graph.nodes.insert(graph.nodes.index(a), b)\n ret = graph.run()\n self.assertEqual(list(ret['default']), ['a', 'b'])", "def test_outgoing_edge_traversals(self):\r\n e1 = TestEdge.create(self.v1, self.v2, numbers=12)\r\n e2 = TestEdge.create(self.v1, self.v3, numbers=13)\r\n e3 = OtherTestEdge.create(self.v2, self.v3, numbers=14)\r\n\r\n results = self.v2.outE()\r\n assert len(results) == 1\r\n assert e3 in results\r\n\r\n results = self.v2.outE(types=[TestEdge])\r\n assert len(results) == 0", "def test_1():\n for _ in range(100):\n G = NetworkTopo()\n flows = MulticastFlows(G, 10, 40)\n\n spt = ShortestPathTree(G, flows)\n\n for T in spt.multicast_trees:\n assert len(nx.cycle_basis(T)) == 0", "def test_input_not_subgraph(self, dim):\r\n with pytest.raises(ValueError, match=\"Input is not a valid subgraph\"):\r\n clique.grow([dim + 1], nx.empty_graph(dim))", "def _adjoint(op):\n if isinstance(op, list):\n adjoint_op = []\n for item in op:\n if isinstance(item, list):\n assert len(item) == 2\n adjoint_op.append([item[0].dag(), item[1]])\n else:\n adjoint_op.append(item.dag())\n return adjoint_op\n else:\n return op.dag()", "def back_prop(nodes_in_path, playout_result):\n for temp_node in nodes_in_path:\n temp_node.visited += 1\n if str(playout_result) == str(temp_node.side):\n temp_node.winning += 1", "def test_add_to_graph():\n node_list = []\n node_list.append(Node({'A':['B','C']}))\n node_list.append(Node({'B':['C','D']}))\n node_list.append(Node({'C':['D']}))\n node_list.append(Node({'D':['C']}))\n g = Graph()\n for node in node_list:\n g.add(node)\n assert len(g.nodes) == len(node_list)", "def addInLink(source, target):\n if inlinkGraph.has_key(source):\n # if target not in inlinkGraph[source]:# uncomment to remove repetitives\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = inlinkGraphDegree[source] + 1\n else:\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = 1", "def test_initial_nodes_to_merge(self):\r\n t = mergeorder(['A', 'B', 'C', 'D', 'E'], 'foo')\r\n exp = set([t.Children[0], t.Children[1].Children[1]])\r\n obs = initial_nodes_to_merge(t)\r\n self.assertEqual(obs, exp)", "def next_leaf(node):\n return len(node[1][0][1]) == 0", "def test_induction_missing(self):\n node1 = protein(namespace=n(), name=n())\n node2 = protein(namespace=n(), name=n())\n node3 = protein(namespace=n(), name=n())\n node4 = protein(namespace=n(), name=n())\n graph = BELGraph()\n graph.add_qualified_edge(node1, node2, relation=n(), citation=n(), evidence=n())\n graph.add_qualified_edge(node1, node4, relation=n(), citation=n(), evidence=n())\n\n res = get_subgraph_by_induction(graph, [node1.as_tuple()])\n self.assertIsNotNone(res)\n self.assertIsInstance(res, BELGraph)\n self.assertEqual(1, res.number_of_nodes())\n self.assertEqual(0, res.number_of_edges())\n\n res = get_subgraph_by_induction(graph, [node1.as_tuple(), node2.as_tuple()])\n self.assertIsNotNone(res)\n self.assertIsInstance(res, BELGraph)\n self.assertEqual(2, res.number_of_nodes())\n self.assertEqual(1, res.number_of_edges())\n\n res = get_subgraph_by_induction(graph, [node3.as_tuple()])\n self.assertIsNone(res, msg='Should return none since node3 is not in graph')\n\n self.assertEqual(3, graph.number_of_nodes(), msg='Original graph nodes should not change')\n self.assertEqual(2, graph.number_of_edges(), msg='Original graph edges should not change')", "def test_1(self):\r\n r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)\r\n o = MyOp.make_node(r1, r1)\r\n o2 = MyOp.make_node(o.outputs[0], r5)\r\n all = general_toposort(o2.outputs, prenode)\r\n assert all == [r5, r1, o, o.outputs[0], o2, o2.outputs[0]]", "def test_duplicate_named_input_edge(self):\n with self.assertRaises(ValidationError):\n with Graph('g'):\n n1, n2 = Node('a'), Node('b')\n n1 | 'bar' * n2\n n1 * 'foo' | 'bar' * n2", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 3)\n self.small_tree.add_edge(4, 3)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(0, 1) # deg(0) = 1\n\n self.deterministic_graph.add_edge(1, 2) # deg(1) = 2\n\n self.deterministic_graph.add_edge(2, 3)\n self.deterministic_graph.add_edge(2, 4) # deg(2) = 3\n\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(3, 6) # deg(3) = 4\n\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(4, 6)\n self.deterministic_graph.add_edge(4, 7) # deg(4) = 5\n\n self.deterministic_graph.add_edge(5, 6)\n self.deterministic_graph.add_edge(5, 7)\n self.deterministic_graph.add_edge(5, 8)\n self.deterministic_graph.add_edge(5, 9) # deg(5) = 6\n\n self.deterministic_graph.add_edge(6, 7)\n self.deterministic_graph.add_edge(6, 8)\n self.deterministic_graph.add_edge(6, 9) # deg(6) = 6\n\n self.deterministic_graph.add_edge(7, 8)\n self.deterministic_graph.add_edge(7, 9) # deg(7) = 5\n\n self.deterministic_graph.add_edge(8, 9) # deg(8) = 4", "def test_multiple_label_traversals(self):\r\n TestEdge.create(self.v1, self.v2)\r\n OtherTestEdge.create(self.v1, self.v3)\r\n YetAnotherTestEdge.create(self.v1, self.v4)\r\n\r\n assert len(self.v1.outV()) == 3\r\n\r\n assert len(self.v1.outV(TestEdge)) == 1\r\n assert len(self.v1.outV(OtherTestEdge)) == 1\r\n assert len(self.v1.outV(YetAnotherTestEdge)) == 1\r\n\r\n out = self.v1.outV(TestEdge, OtherTestEdge)\r\n assert len(out) == 2\r\n assert self.v2.vid in [v.vid for v in out]\r\n assert self.v3.vid in [v.vid for v in out]\r\n\r\n out = self.v1.outV(OtherTestEdge, YetAnotherTestEdge)\r\n assert len(out) == 2\r\n assert self.v3.vid in [v.vid for v in out]\r\n assert self.v4.vid in [v.vid for v in out]", "def test_incoming_edge_traversals(self):\r\n e1 = TestEdge.create(self.v1, self.v2, numbers=12)\r\n e2 = TestEdge.create(self.v1, self.v3, numbers=13)\r\n e3 = OtherTestEdge.create(self.v2, self.v3, numbers=14)\r\n\r\n results = self.v2.inE()\r\n assert len(results) == 1\r\n assert e1 in results\r\n\r\n results = self.v2.inE(types=[OtherTestEdge])\r\n assert len(results) == 0", "def all_nodes_dfs(log_T, initial_state, min_score, sub_info, max_depth=1000000000000000000, maxtraversals=1000000000000000000):\n # default argument for sub_info: empty_sub_info = (np.array([], dtype=int), np.array([], dtype=int), 1000000000000000000)\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n order = np.zeros(log_T.shape, np.int64)\n for i in xrange(order.shape[1]):\n order[i] = (-log_T[i]).argsort()\n n_states = log_T.shape[0]\n node = [order[initial_state, 0]] # most likely first node\n node_idx = [0]\n lengths_dfs = [-1.0]\n nodes_dfs = [[-1, ]]\n for it in xrange(maxtraversals):\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n lengths_dfs.append(-score)\n nodes_dfs.append(list(node))\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n break # end of the generator, can't increase even the root\n else:\n assert False, \"Number of traversals exceeded\"\n\n return lengths_dfs[1:], nodes_dfs[1:]", "def test_self_loops(self):\n G = DiGraph()\n G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)])\n G.add_edge(0, 0)\n assert_false(is_tournament(G))", "def test_restore_with_subgraph(self):\n subgraph = self._subgraph()\n task = self._remote_task()\n subgraph['id'] = 15\n task['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task])\n assert len(graph.tasks) == 2\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n assert len(subgraphs) == 1\n assert len(remote_tasks) == 1\n\n assert len(subgraphs[0].tasks) == 1\n assert remote_tasks[0].containing_subgraph is subgraphs[0]", "def test_restore_multiple_in_subgraph(self):\n subgraph = self._subgraph()\n subgraph['id'] = 15\n task1 = self._remote_task()\n task1['id'] = 1\n task2 = self._remote_task()\n task2['id'] = 2\n task1['parameters']['containing_subgraph'] = 15\n task2['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task1, task2])\n assert len(graph.tasks) == 3\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n # those are all references to the same subgraph, the subgraph was\n # NOT restored multiple times\n assert remote_tasks[0].containing_subgraph \\\n is remote_tasks[1].containing_subgraph \\\n is subgraphs[0]\n\n assert len(subgraphs[0].tasks) == 2", "def visit_next(self, edges):\r\n next_up = self.next_node()\r\n if next_up is not None:\r\n self.visit(next_up, edges[next_up])\r\n return True\r\n return False", "def test_local_efficiency_complete_graph(self):\n for n in range(3, 10):\n G = nx.complete_graph(n)\n assert_equal(nx.local_efficiency(G), 1)", "def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm", "def test_control_outputs(self):\n g0_graph = tf.Graph()\n with g0_graph.as_default():\n a0_tensor = tf.constant(1, name=\"a0\")\n b0_tensor = tf.constant(2, name=\"b0\")\n x0_tensor = tf.constant(3, name=\"x0\")\n with tf.control_dependencies([x0_tensor.op]):\n tf.add(a0_tensor, b0_tensor, name=\"c0\")\n\n g0 = gde.Graph(g0_graph)\n x0_node = g0[\"x0\"]\n c0_node = g0[\"c0\"]\n control_outputs = gde.util.ControlOutputs(g0).get_all()\n self.assertEqual(len(control_outputs), 1)\n self.assertEqual(len(control_outputs[x0_node]), 1)\n self.assertIs(list(control_outputs[x0_node])[0], c0_node)", "def test_outgoing_vertex_traversal(self):\r\n e1 = TestEdge.create(self.v1, self.v2, numbers=12)\r\n e2 = TestEdge.create(self.v1, self.v3, numbers=13)\r\n e3 = TestEdge.create(self.v2, self.v3, numbers=14)\r\n\r\n results = self.v1.outV(TestEdge)\r\n assert len(results) == 2\r\n assert self.v2 in results\r\n assert self.v3 in results\r\n\r\n results = self.v1.outV(TestEdge, types=[OtherTestModel])\r\n assert len(results) == 1\r\n assert self.v3 in results", "def test_add_node(num_mutations):\n net = WeightAgnosticNetwork(10, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n num_connections_pre, num_neurons_pre, num_layers_pre = get_network_stats(net)\n net.add_node()\n assert net.get_num_connections() == num_connections_pre + 1\n assert net.num_neurons == num_neurons_pre + 1\n assert len(net.neurons_in_layer) == num_layers_pre or len(\n net.neurons_in_layer) == num_layers_pre + 1", "def test_graph_directed():\n topo = complete_topology(5)\n assert isinstance(topo.get_graph(), networkx.DiGraph)\n # even if original graph is undirected\n topo = Topology(u'noname', networkx.star_graph(8))\n assert topo.get_graph().is_directed()", "def test_adjoint():\n dev = DummyDevice(wires=2)\n np.random.seed(42)\n\n shapes = qml.CVNeuralNetLayers.shape(n_layers=1, n_wires=2)\n weights = [np.random.random(shape) for shape in shapes]\n\n @qml.qnode(dev)\n def circuit():\n qml.CVNeuralNetLayers(*weights, wires=[0, 1])\n qml.adjoint(qml.CVNeuralNetLayers)(*weights, wires=[0, 1])\n return qml.expval(qml.QuadX(0))\n\n assert qml.math.allclose(circuit(), 0)", "def test_insert_node_singleton_content_2():\n first = 0\n second = first\n chain = N.Node(second)\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def every_other(s):\n\n ###############\n # My Solution #\n ###############\n\n if s == Link.empty or s.rest == Link.empty:\n return\n else:\n s.rest = s.rest.rest\n every_other(s.rest)", "def test_advinc_subtensor1():\r\n for shp in [(3, 3), (3, 3, 3)]:\r\n shared = cuda.shared_constructor\r\n xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1\r\n yval = numpy.empty((2,) + shp[1:], dtype='float32')\r\n yval[:] = 10\r\n x = shared(xval, name='x')\r\n y = T.tensor(dtype='float32',\r\n broadcastable=(False,) * len(shp),\r\n name='y')\r\n expr = T.advanced_inc_subtensor1(x, y, [0, 2])\r\n f = theano.function([y], expr, mode=mode_with_gpu)\r\n assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1)\r\n for node in f.maker.fgraph.toposort()]) == 1\r\n rval = f(yval)\r\n rep = xval.copy()\r\n rep[[0, 2]] += yval\r\n assert numpy.allclose(rval, rep)", "def test_prune_dnnl_subgraph(run_module):\n\n def get_graph():\n x1 = relay.var(\"x1\", shape=(1, 32, 56, 56))\n x2 = relay.var(\"x2\", shape=(1, 32, 56, 56))\n bias = relay.var(\"bias\", shape=(32,))\n weight = relay.var(\"weight\", shape=(32, 32, 3, 3))\n y = relay.nn.conv2d(\n x1,\n weight,\n channels=32,\n kernel_size=(3, 3),\n padding=(1, 1),\n )\n y = relay.nn.bias_add(y, bias)\n y = relay.nn.relu(y)\n y = relay.nn.global_max_pool2d(y)\n y = relay.add(y, x2)\n dic = {\n \"x1\": (1, 32, 56, 56),\n \"x2\": (1, 32, 56, 56),\n \"weight\": (32, 32, 3, 3),\n \"bias\": (32,),\n }\n param_lst = [\"weight\", \"bias\"]\n out = tvm.IRModule.from_expr(y)\n return out, dic, param_lst\n\n run_and_verify_func(get_graph(), subgraph_num=1, run_module=run_module, test_bf16=False)", "def test_restore_with_finished_subgraph(self):\n subgraph = self._subgraph()\n task = self._remote_task()\n subgraph['id'] = 15\n task['parameters']['containing_subgraph'] = 15\n\n subgraph['state'] = tasks.TASK_SUCCEEDED\n\n graph = self._restore_graph([subgraph, task])\n assert len(graph.tasks) == 2\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n assert len(subgraphs) == 1\n assert len(remote_tasks) == 1\n\n assert len(subgraphs[0].tasks) == 1\n assert remote_tasks[0].containing_subgraph is subgraphs[0]", "def seq_in_link(link, sub_link):\r\n # first_link = []\r\n # while link != Link.empty:\r\n # first_link.append(link.first)\r\n # link = link.rest\r\n #\r\n # while sub_link != Link.empty:\r\n # if sub_link.first in first_link:\r\n # index = first_link.index(sub_link.first)\r\n # first_link = first_link[index:]\r\n # sub_link = sub_link.rest\r\n # else:\r\n # return False\r\n # return True\r\n# this method is too complicated!\r\n while link != Link.empty and sub_link != Link.empty:\r\n if sub_link.first == link.first:\r\n sub_link = sub_link.rest\r\n link = link.rest\r\n\r\n if sub_link == Link.empty:\r\n return True\r\n else:\r\n return False", "def test_insert_node_multiple_content_1():\n first = 0\n second = 1\n third = 3\n chain = N.Node(second, N.Node(third))\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at start)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"", "def is_next(first: Node, second: Node) -> bool:\n dests = first.out_port(0).get_destinations()\n if node_has_one_consumer(first):\n return second.id == dests[0].node.id\n elif first.soft_get('maybe_part_of_sequence', False):\n return len(dests) == 2 and second.id in [d.node.id for d in dests]\n return False", "def test_add_znode(self):\n z = self.test_start_empty()\n self.test_start_one_value(z)", "def is_valid_adjacency_matrix(connections, num_intermediate, num_input, num_output):\n\n num_emitting = num_intermediate + num_input\n num_receiving = num_intermediate + num_output\n\n if connections.size(0) < num_receiving:\n return False\n if connections.size(1) < num_emitting:\n return False\n\n embedded_intermediate_size = connections.size(0) - num_output\n #check that dimensions of the connectivity tensor are consistent with single fixed intermediate size\n if embedded_intermediate_size < 0 or embedded_intermediate_size != connections.size(1) - num_input:\n return False\n\n # check left-justified\n if connections[num_receiving:, :].sum().item() > 0:\n return False\n if connections[:, num_emitting:].sum().item() > 0:\n return False\n # check that vertices only receive input from ancestors\n for i in range(num_receiving):\n if connections[i, i+ num_input:].sum().item() > 0:\n return False\n return True", "def is_dominated(g,n1,n2): # g: graph; n1,n2: node addresses\n if 'head' in g.nodes[n1]:\n head = g.nodes[n1]['head']\n if head==n2:\n return True\n if is_dominated(g,head,n2):\n return True\n return False", "def test_insert_node_singleton_content_1():\n first = 0\n second = 1\n chain = N.Node(second)\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def test_propagate(self):\n # Get network components\n data = array([[0], [1]])\n cdata = LabeledCData(data, labels=array([0, 1]))\n encoder = BinaryEncoding(cdata)\n unitary = ProductAnsatz(1)\n measure = Measurement(1, [0])\n qnn = Network([encoder, unitary, measure], \"1q-qvm\")\n\n # Propagate the zeroth data point\n out = qnn.propagate(0, shots=10)\n\n print(out)", "def test_cycle_present_true(test_linkedlist):\n test_linkedlist.insert_end('A')\n test_linkedlist.insert_end('B')\n test_linkedlist.insert_end('C')\n # create a cycle - connection from 3rd to 2nd element\n test_linkedlist.head.next.next.next = test_linkedlist.head.next\n # assert cycle_present() function returns True\n assert test_linkedlist.cycle_present()", "def test_advinc_subtensor1():\r\n for shp in [(3, 3), (3, 3, 3)]:\r\n shared = gpuarray_shared_constructor\r\n xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1\r\n yval = numpy.empty((2,) + shp[1:], dtype='float32')\r\n yval[:] = 10\r\n x = shared(xval, name='x')\r\n y = tensor.tensor(dtype='float32',\r\n broadcastable=(False,) * len(shp),\r\n name='y')\r\n expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])\r\n f = theano.function([y], expr, mode=mode_with_gpu)\r\n assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)\r\n for node in f.maker.fgraph.toposort()]) == 1\r\n rval = f(yval)\r\n rep = xval.copy()\r\n rep[[0, 2]] += yval\r\n assert numpy.allclose(rval, rep)", "def test_removes_empty_subgraph(self):\n ctx = MockWorkflowContext()\n g = TaskDependencyGraph(ctx)\n\n # sg1 is just empty, no tasks inside it\n sg1 = g.subgraph(ctx)\n # sg2 contains only a NOPTask\n sg2 = g.subgraph(ctx)\n sg2.add_task(tasks.NOPLocalWorkflowTask(ctx))\n\n # sg3 contains sg4, which is empty behcause it only contains a NOPTask\n sg3 = g.subgraph(ctx)\n sg4 = g.subgraph(ctx)\n sg4.add_task(tasks.NOPLocalWorkflowTask(ctx))\n sg3.add_task(sg4)\n\n # sg5 is a subgraph that contains a real task! it is not removed\n sg5 = g.subgraph(ctx)\n real_task = tasks.WorkflowTask(ctx)\n sg5.add_task(real_task)\n\n assert set(g.tasks) > {sg1, sg2, sg3, sg4, sg5, real_task}\n g.optimize()\n assert set(g.tasks) == {sg5, real_task}", "def addOutLink(source, target):\n if outlinkGraph.has_key(source):\n # if target not in outlinkGraph[source]: # uncomment to remove repetitives\n outlinkGraph[source].append(target)\n outlinkGraphDegree[source] = outlinkGraphDegree[source] + 1\n else:\n outlinkGraph[source].append(target)\n outlinkGraphDegree[source] = 1", "def local_inplace_incsubtensor1(node):\r\n if isinstance(node.op, AdvancedIncSubtensor1) and not node.op.inplace:\r\n new_op = node.op.__class__(\r\n inplace=True, set_instead_of_inc=node.op.set_instead_of_inc)\r\n new_node = new_op(*node.inputs)\r\n return [new_node]\r\n return False", "def test_graph_adds_and_lists_nodes(graph_no_edges):\n listy = ['BB', 82, 99, 'AA']\n for node in listy:\n assert node in graph_no_edges.nodes()", "def __isub__(self, other):\n\t\t#print(\"isub\")\n\t\t# merge other branch\n\t\tself.graph.update(other.graph)\n\t\tself.bottoms.update(other.bottoms)\n\t\tself.output_shape.update(other.output_shape)\n\t\tlayer_name = \"sub_{}\".format(len(self.graph))\n\t\tself.graph[layer_name] = layer_name\n\t\tself.bottoms[layer_name] = [self.cur_id, other.cur_id]\n\t\tself.output_shape[layer_name] = self.cur_tensor.size()\n\t\tself.cur_id = layer_name\n\t\t# save memory\n\t\tdel other\n\t\treturn self", "def test_insert_node_multiple_content_2():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(third))\n node = N.Node(second)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at mid)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"", "def test_incoming_vertex_traversal(self):\r\n e1 = TestEdge.create(self.v1, self.v2, numbers=12)\r\n e2 = TestEdge.create(self.v1, self.v3, numbers=13)\r\n e3 = TestEdge.create(self.v2, self.v3, numbers=14)\r\n\r\n results = self.v2.inV(TestEdge)\r\n assert len(results) == 1\r\n assert self.v1 in results\r\n\r\n results = self.v2.inV(TestEdge, types=[OtherTestModel])\r\n assert len(results) == 0", "def substitute_edge_by_network(half_edge_for_sub, network):\n\n assert half_edge_for_sub.opposite is not None\n assert half_edge_for_sub.next is not None\n assert half_edge_for_sub.opposite.next is not None\n\n # Plug it in.\n half_edge_for_sub.insert_all_before(network.zero_pole.prior)\n half_edge_for_sub.opposite.insert_all_after(network.inf_pole.next)\n\n # Remove the link edge of the network if it is not real.\n if not network.is_linked:\n network.zero_pole.remove()\n network.inf_pole.remove()\n\n # Remove the substituted half-edge.\n half_edge_for_sub.remove()\n half_edge_for_sub.opposite.remove()\n\n\n # # Extract the opposite half edge from the tree connected graph.\n # half_edge_for_sub_opp = half_edge_for_sub.opposite\n #\n # # Get the needed edges for manipulation from the network.\n # net_root_half_edge = network.zero_pole\n # net_root_half_edge_next = net_root_half_edge.next\n # net_root_half_edge_prior = net_root_half_edge.prior\n # net_root_half_edge_opp_next = net_root_half_edge.opposite.next\n # net_root_half_edge_opp_prior = net_root_half_edge.opposite.prior\n #\n # # Identify the zero pole with the the half_edge_for_sub vertex.\n # half_edge_for_sub.opposite = net_root_half_edge_next.opposite\n # net_root_half_edge_next.opposite.opposite = half_edge_for_sub\n # if net_root_half_edge_next is not net_root_half_edge_prior:\n # # Switch the pointers so that the network_root_edge and its next are\n # # not included\n # half_edge_for_sub_next = half_edge_for_sub.next\n #\n # half_edge_for_sub.next = net_root_half_edge_next.next\n # net_root_half_edge_next.next.prior = half_edge_for_sub\n #\n # half_edge_for_sub_next.prior = net_root_half_edge_prior\n # net_root_half_edge_prior.next = half_edge_for_sub_next\n #\n # # Update the node numbers in the _second network zero-pole half edges\n # half_edge_walker = half_edge_for_sub.next\n # while half_edge_walker != half_edge_for_sub:\n # half_edge_walker.node_nr = half_edge_for_sub.node_nr\n # half_edge_walker = half_edge_walker.next\n #\n # # Identify the inf pole with the the half_edge_for_sub_opp vertex.\n # half_edge_for_sub_opp.opposite = net_root_half_edge_opp_next.opposite\n # net_root_half_edge_opp_next.opposite.opposite = half_edge_for_sub_opp\n # if net_root_half_edge_opp_next is not net_root_half_edge_opp_prior:\n # # Switch the pointers so that the network_root_edge_opp and its next\n # # are not included\n # half_edge_for_sub_opp_next = half_edge_for_sub_opp.next\n #\n # half_edge_for_sub_opp.next = net_root_half_edge_opp_next.next\n # net_root_half_edge_opp_next.next.prior = half_edge_for_sub_opp\n #\n # half_edge_for_sub_opp_next.prior = net_root_half_edge_opp_prior\n # net_root_half_edge_opp_prior.next = half_edge_for_sub_opp_next\n #\n # half_edge_walker = half_edge_for_sub_opp.next\n # while half_edge_walker != half_edge_for_sub_opp:\n # half_edge_walker.node_nr = half_edge_for_sub_opp.node_nr\n # half_edge_walker = half_edge_walker.next", "def depth_first(vertex, the_rest, connected_component):\n for b in tuple(the_rest):\n if b in the_rest and is_edge(vertex,b):\n connected_component.append(b)\n the_rest.remove(b)\n depth_first(b, the_rest, connected_component)\n\n if the_rest:\n return False\n else: \n return True", "def test_insert_node_multiple_structure_2():\n chain = N.Node(1, N.Node(3))\n node = N.Node(2)\n\n result = A8.insert_node(node, chain)\n\n assert result is not None, \"insert_node returned empty chain given a node and chain length 2 (insert between)\"\n assert result.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert between)\"\n assert result.next.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert between)\"\n assert result.next.next.next is None, \"insert_node returned badly formed chain given a node and chain length 2 (insert between)\"", "def test_activation(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n cfg.genome.activation_default = 'a'\n cfg.genome.activation_options = {'a': 1, 'b': 2}\n gene1, gene2 = get_output_node_gene(0, cfg.genome)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertEqual(gene3.activation, D_TANH)", "def test_postorder_traversal(depth_one_tree):\n testlist = []\n depth_one_tree.post_order(lambda x: testlist.append(x))\n assert str(testlist) == str([1, 2, 3, 4, 0])", "def in_order_helper(self, node, alist=[], verbose=False):\n if node:\n in_order_helper(node.left, alist)\n if verbose:\n print(node.data)\n alist.append(node)\n in_order_helper(node.right, alist)", "def test_add(self):\n tensor1 = Tensor([2, 4, 6, 8], requires_grad=True)\n tensor2 = Tensor([5, 8, 1, 2], requires_grad=True)\n\n tensor3 = tensor1 + tensor2\n tensor3.backward(Tensor([-1., -2., -3., -4.]))\n\n assert tensor3.data.tolist() == [7, 12, 7, 10]\n assert tensor1.grad.data.tolist() == [-1, -2, -3, -4]\n assert tensor2.grad.data.tolist() == [-1, -2, -3, -4]", "def backpropagate(final_variable_with_deriv):\n\n queue = [final_variable_with_deriv]\n visited = set()\n while queue:\n cur = queue.pop(0)\n var = cur.variable\n visited.add(var.name)\n if is_leaf(var):\n cur.variable._add_deriv(cur.deriv)\n else:\n for prev in var.history.chain_rule(cur.deriv):\n if prev.variable.name not in visited:\n queue.append(prev)", "def test_post_order_0_2(bst_all_to_left):\n assert tuple(bst_all_to_left.post_order()) == (1, 3, 2, 5, 4)", "def test_insert_node_multiple_content_3():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(second))\n node = N.Node(third)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at end)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"", "def test_insert_node_multiple_structure_1():\n chain = N.Node(1, N.Node(3))\n node = N.Node(0)\n\n result = A8.insert_node(node, chain)\n\n assert result is not None, \"insert_node returned empty chain given a node and chain length 2 (insert at start)\"\n assert result.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert at start)\"\n assert result.next.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert at start)\"\n assert result.next.next.next is None, \"insert_node returned badly formed chain given a node and chain length 2 (insert at start)\"", "def is_dependency_subgraph(graph, subgraph_candidate,\n node_attrib='label', edge_attrib='label'):\n if len(subgraph_candidate) > 1:\n if nx.is_weakly_connected(subgraph_candidate):\n if includes_all_subgraph_rules(graph, subgraph_candidate,\n node_attrib=node_attrib,\n edge_attrib=edge_attrib):\n return True\n return False", "def test_Nin_1out(self):\r\n gval0 = theano.tensor.scalar()\r\n gval1 = theano.tensor.scalar()\r\n\r\n class O(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.scalar(), theano.tensor.scalar()]\r\n outputs = [theano.tensor.matrix()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n x0, x1 = inp\r\n gz, = grads\r\n return (gval0, gval1)\r\n a1 = O().make_node()\r\n g = grad_sources_inputs([(a1.outputs[0], one)], None)\r\n self.assertTrue(g[a1.inputs[0]] is gval0)\r\n self.assertTrue(g[a1.inputs[1]] is gval1)", "def test_variant_traversals() -> None:\n customers = Collection(\n name=\"customer\",\n fields=[\n Field(name=\"id\"),\n Field(name=\"email\", identity=\"email\"),\n Field(\n name=\"user_id\",\n references=[(FieldAddress(\"mysql\", \"user\", \"id\"), \"to\")],\n ),\n ],\n )\n users = Collection(\n name=\"user\",\n fields=[Field(name=\"id\"), Field(name=\"ssn\", identity=\"ssn\")],\n )\n\n graph = DatasetGraph(\n Dataset(\n name=\"mysql\",\n collections=[customers, users],\n connection_key=\"mock_connection_config_key\",\n )\n )\n\n traversal_map1, terminators1 = Traversal(\n graph,\n {\"email\": \"X\"},\n ).traversal_map()\n\n assert traversal_map1 == {\n \"__ROOT__:__ROOT__\": {\"from\": {}, \"to\": {\"mysql:customer\": {\"email -> email\"}}},\n \"mysql:customer\": {\n \"from\": {\"__ROOT__:__ROOT__\": {\"email -> email\"}},\n \"to\": {\"mysql:user\": {\"user_id -> id\"}},\n },\n \"mysql:user\": {\"from\": {\"mysql:customer\": {\"user_id -> id\"}}, \"to\": {}},\n }\n assert terminators1 == [CollectionAddress(\"mysql\", \"user\")]\n\n traversal_map2, terminators2 = Traversal(\n graph,\n {\"email\": \"X\", \"ssn\": \"Y\"},\n ).traversal_map()\n assert traversal_map2 == {\n \"__ROOT__:__ROOT__\": {\n \"from\": {},\n \"to\": {\"mysql:user\": {\"ssn -> ssn\"}, \"mysql:customer\": {\"email -> email\"}},\n },\n \"mysql:user\": {\n \"from\": {\n \"__ROOT__:__ROOT__\": {\"ssn -> ssn\"},\n \"mysql:customer\": {\"user_id -> id\"},\n },\n \"to\": {},\n },\n \"mysql:customer\": {\n \"from\": {\"__ROOT__:__ROOT__\": {\"email -> email\"}},\n \"to\": {\"mysql:user\": {\"user_id -> id\"}},\n },\n }\n assert terminators2 == [CollectionAddress(\"mysql\", \"user\")]\n\n with pytest.raises(TraversalError):\n Traversal(\n graph,\n {\"ssn\": \"Y\"},\n ).traversal_map()", "def test_node_error_if_nonpresent(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.adjacent('Raccoon', 'Rocket')", "def test_propagate_backward_first_hidden(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n # Walk forward\n nn.propagate_forward([2, 3], test=True)\n\n # Walk backward\n nn.propagate_backward([0])\n\n test_weight = nn.layers[1].nodes[0].weights[0]\n self.assertEqual(round(test_weight, 6), 0.999983)", "def check_adjacency(layer_a, layer_b, topology):\n adjacency = None\n for node in topology.graph.node.keys():\n if layer_a in node:\n # print topology.graph.edge[node].keys()\n for edge in topology.graph.edge[node].keys():\n if layer_b in edge:\n # print topology.graph.edge[node][edge]\n # print \"Node \",layerA,\" and \",layerB,\" share an edge!\"\n adjacency = True\n if adjacency is True:\n return True\n else:\n return False", "def test_dfs():\r\n assert DFS(valid_graph, sorted(list(valid_graph.get_graph().nodes))[0]) == \\\r\n list(nx.dfs_preorder_nodes(valid_graph.get_graph(), sorted(list(valid_graph.get_graph().nodes))[0]))", "def test_reachable_pair(self):\n G = DiGraph([(0, 1), (1, 2), (2, 0)])\n assert_true(is_reachable(G, 0, 2))", "def test_full_graph(self):\r\n x, y, z = tensor.vectors('x', 'y', 'z')\r\n t = x * y\r\n self.check([\r\n (x * 2, x * 2, (({}, True), )),\r\n (x * 2, y * 2, (({}, False), ({y: x}, True), )),\r\n (x * 2, y * 2, (({}, False), ({x: y}, True), )),\r\n (x * 2, y * 3, (({}, False), ({y: x}, False), )),\r\n (t * 2, z * 2, (({}, False), ({t: z}, True), )),\r\n (t * 2, z * 2, (({}, False), ({z: t}, True), )),\r\n (x * (y * z), (x * y) * z, (({}, False), )),\r\n ])", "def fL():\n for n in b.allNodes():\n n.autoplace()", "def transition(self):\n for node in self.net.nodes():\n if node not in self.evidence:\n self.update_node(node)", "def test_insert_node_singleton_content_3():\n first = 0\n second = 1\n chain = N.Node(first)\n node = N.Node(second)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def test_add_outgoing_connection():\n\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n i = Intersection(center, radius, speed_limit)\n i2 = Intersection(center, radius, speed_limit)\n i2.add_connection(10.0, 20, 2, 2, 40, 'test2')\n\n start = Coordinates(1,1)\n end = Coordinates(7, 9)\n len = 15\n out_ln = 2\n in_ln = 1\n ang = 3 * math.pi / 2\n\n road = Road(start, end, len, out_ln, in_ln, ang, 20, 'Test')\n\n l = i.get_connections()\n\n assert not l\n\n i.add_outgoing_connection(road)\n\n assert l\n assert l[0].get_length() == 15\n\n l2 = i2.get_connections()\n\n assert l2\n\n i2.add_outgoing_connection(road)\n\n assert l2\n assert l2[1].get_length() == 15", "def test_bst_empty_post_order(bst_empty):\n check_list = []\n bst_empty.post_order_trav(lambda x: check_list.append(x.val))\n assert check_list == []" ]
[ "0.61481655", "0.5825503", "0.575738", "0.5744691", "0.5741752", "0.5737737", "0.57052994", "0.57017195", "0.5676081", "0.56566423", "0.5643218", "0.5618495", "0.5523244", "0.5501655", "0.5474851", "0.54511756", "0.5438007", "0.54232174", "0.5415555", "0.53765196", "0.53371644", "0.5301939", "0.52813953", "0.52788365", "0.52620214", "0.5251265", "0.5250705", "0.5233283", "0.52233946", "0.52207065", "0.52053654", "0.52021235", "0.51950544", "0.5187478", "0.51854694", "0.51690423", "0.5165326", "0.5154277", "0.5151257", "0.5150149", "0.5146293", "0.5136638", "0.513237", "0.51237464", "0.5101746", "0.5093778", "0.5084224", "0.5080339", "0.5071793", "0.5061573", "0.5045897", "0.50390804", "0.50384206", "0.50359535", "0.50333774", "0.5025576", "0.50222915", "0.50200164", "0.50078565", "0.5004275", "0.50040156", "0.50037515", "0.5001868", "0.50000083", "0.49914822", "0.4991406", "0.4991113", "0.49836168", "0.49822497", "0.49748394", "0.4973674", "0.49682292", "0.4963527", "0.49538374", "0.4947095", "0.4940799", "0.49386886", "0.4931825", "0.49312517", "0.49277344", "0.49266782", "0.49243686", "0.49233443", "0.49189362", "0.4918766", "0.4913267", "0.4912718", "0.49097303", "0.4909697", "0.4908935", "0.49077755", "0.49074", "0.49055701", "0.4902039", "0.48938325", "0.4893328", "0.48911402", "0.48902526", "0.48891708", "0.48871893", "0.48850897" ]
0.0
-1
test dictionary input and output layers
def test_export_dict_input_output(self): class Net(torch.nn.Module): """ Model using multiply as functional and module at different depths """ def __init__(self): super().__init__() self.layer = InputOutputDictModel() def forward(self, x): return self.layer(x) model = Net() # Add an empty dictionary as the last element to not treat as named arguments. # see torch.onnx.export() API for more details. dummy_input = ( {'a': torch.randn(1, 10, 10, 10), 'b': torch.randn(1, 10, 10, 10), 'c': torch.randn(1, 10, 10, 10) }, {} ) onnx_path = './data/MyModel.onnx' torch.onnx.export(model, dummy_input, onnx_path) onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input) onnx_model = onnx.load(onnx_path) onnx.checker.check_model(onnx_model) self.check_onnx_node_name_uniqueness(onnx_model) for node in onnx_model.graph.node: print(node.name) assert node.name.startswith('layer')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kwargs_input_dict_output(self):\n\n class KwargModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.mul = aimet_torch.elementwise_ops.Multiply()\n\n def forward(self, a, b, c):\n ab = a * b\n bc = b * c\n ca = self.mul(c, a)\n\n return {'ab': ab, 'bc': bc, 'ca': ca}\n\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = KwargModel()\n\n def forward(self, x):\n return self.layer(**x)\n\n model = Net()\n\n # Add an empty dictionary as the last element to not treat as named arguments.\n # see torch.onnx.export() API for more details.\n dummy_input = (\n {'a': torch.randn(1, 10, 10, 10),\n 'b': torch.randn(1, 10, 10, 10),\n 'c': torch.randn(1, 10, 10, 10)\n }, {}\n )\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n for node in onnx_model.graph.node:\n assert node.name.startswith('layer') or node.name.startswith('/layer')\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def test_convert(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]).data, test[1])", "def test_get_cases_for_dict(self):\n pass", "def test_extract_pytorch_statedict(self):\n\t\t\n\t\t\n\t\tmodel = models.resnet18().state_dict()\n\t\tmodel_name = \"resnet18\"\n \n\t\tlayer_names = model.keys()\n\t\texpected_layer_names = [name for name in layer_names if 'weight' in name or 'bias' in name]\n\t\texpected_num_files = len(expected_layer_names)\t\n\t\t\n\t\t# there are 18 real layers with weights\n\t\tlayer_weightfiles = [name for name in layer_names if 'weight' in name and 'bn' not in name and 'downsample' not in name ]\t\n\t\texpected_num_weightfiles = 18\n\t\tactual_num_weightfiles = (len(layer_weightfiles))\n\t\tself.assertEqual(expected_num_weightfiles,actual_num_weightfiles)\n\n\n\t\twith TemporaryDirectory(dir=TEST_TMP_DIR, prefix=\"ww_\") as model_dir:\n\t\t\tprint(f\"using {model_dir} as model_dir\")\n\t\t\tself.assertTrue(model_dir.startswith(TEST_TMP_DIR))\n\t\t\t\n\t\t\twith TemporaryDirectory(dir=TEST_TMP_DIR, prefix=\"ww_\") as weights_dir:\n\t\t\t\tprint(f\"using {weights_dir} as weights_dir\")\n\t\t\t\tself.assertTrue(weights_dir.startswith(TEST_TMP_DIR))\n\t\t\t\n\t\t\t\tstate_dict_filename = os.path.join(model_dir, \"pys.bin\")\n\t\t\t\ttorch.save(model, state_dict_filename)\n\t\t\t\t\n\t\t\t\tww.WeightWatcher.extract_pytorch_statedict_(weights_dir, model_name, state_dict_filename, format=MODEL_FILE_FORMATS.PYTORCH)\n\t\t\t\n\t\t\t\tweightfiles = [f for f in listdir(weights_dir) if isfile(join(weights_dir, f))]\t\n\t\t\t\tactual_num_files = len(weightfiles)\n\t\t\t\tself.assertEqual(expected_num_files,actual_num_files)\t\t\t\t\n\t\t\t\t\n\t\t\t\t# test that we can read the files ?\t\n\t\t\t\tfor filename in weightfiles:\n\t\t\t\t\tW = np.load(os.path.join(weights_dir,filename))\n\t\t\t\t\tself.assertIsNotNone(W)\n\t\t\t\n\t\t\t\t\t\t\n\t\tself.assertFalse(os.path.isdir(model_dir))\n\t\tself.assertFalse(os.path.isdir(weights_dir))\n\t\t\n\t\treturn", "def test_layer_ok(self):\n self.assertTrue(self.vector)", "def test_layer_instantiation(self):\n model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4)\n\n # Assert the number of elements of the weights.\n tile_weights, tile_biases = model.analog_tile.get_weights()\n\n self.assertEqual(tile_weights.numel(), 2*3*4)\n if model.use_bias:\n self.assertEqual(tile_biases.numel(), 3)", "def test_map_basics(self):\n self.assertDigitizerMapBasics(self.map, self.dgroup)", "def test_layer_instantiation(self):\n model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4)\n\n # Assert the number of elements of the weights.\n tile_weights, tile_biases = model.analog_tile.get_weights()\n\n self.assertEqual(tile_weights.numel(), 2*3*4*4)\n if model.use_bias:\n self.assertEqual(tile_biases.numel(), 3)", "def test_layer_instantiation(self):\n model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4)\n\n # Assert the number of elements of the weights.\n tile_weights, tile_biases = model.analog_tile.get_weights()\n\n self.assertEqual(tile_weights.numel(), 2*3*4*4*4)\n if model.use_bias:\n self.assertEqual(tile_biases.numel(), 3)", "def __call__(self, input_dict):\n gt_labels_3d = input_dict[\"gt_labels_3d\"]\n gt_bboxes_mask = np.array(\n [n in self.labels for n in gt_labels_3d], dtype=np.bool_\n )\n\n input_dict[\"gt_bboxes_3d\"] = input_dict[\"gt_bboxes_3d\"][gt_bboxes_mask]\n input_dict[\"gt_labels_3d\"] = input_dict[\"gt_labels_3d\"][gt_bboxes_mask]\n\n if \"gt_tokens\" in input_dict:\n input_dict[\"gt_tokens\"] = input_dict[\"gt_tokens\"][gt_bboxes_mask]\n\n return input_dict", "def run(layers):", "def evaluate_mapped_inputs(self,**kwargs):\n print(\"DEBUG\")\n pprint(kwargs)\n return {}", "def test_comparing(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]), test[1])", "def test_input_dict(self):\n self.app.app.preprocess_request()\n\n input_dict = {'foo': 'bar'}\n\n resp = self.r(input_dict)\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertIn(\n 'foo:bar',\n resp.data.decode()\n )", "def test_predictor():", "def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)", "def test_op_apply_types(self) -> None:\n\n op_add_1 = OpLambda(func=lambda x: x + 1, func_reverse=lambda x: x - 1)\n op_mul_2 = OpLambda(func=lambda x: x * 2, func_reverse=lambda x: x // 2)\n op_mul_4 = OpLambda(func=lambda x: x * 4, func_reverse=lambda x: x // 4)\n\n sample_dict = NDict({})\n sample_dict[\"data.val.img_for_testing\"] = 3\n sample_dict[\"data.test.img_for_testing\"] = 3\n sample_dict[\"data.test.seg_for_testing\"] = 3\n sample_dict[\"data.test.bbox_for_testing\"] = 3\n sample_dict[\"data.test.meta\"] = 3\n\n types_dict = {\n DataTypeForTesting.IMAGE_FOR_TESTING: (op_add_1, dict()),\n DataTypeForTesting.SEG_FOR_TESTING: (op_mul_2, dict()),\n DataTypeForTesting.BBOX_FOR_TESTING: (op_mul_4, dict()),\n }\n\n op_apply_type = OpApplyTypesImaging(types_dict)\n\n sample_dict = op_apply_type(sample_dict, \"_.test_apply_type\")\n self.assertEqual(sample_dict[\"data.val.img_for_testing\"], 4)\n self.assertEqual(sample_dict[\"data.test.img_for_testing\"], 4)\n self.assertEqual(sample_dict[\"data.test.seg_for_testing\"], 6)\n self.assertEqual(sample_dict[\"data.test.bbox_for_testing\"], 12)\n self.assertEqual(sample_dict[\"data.test.meta\"], 3)\n\n sample_dict[\"model.a_seg_for_testing\"] = 3\n op_apply_type.reverse(\n sample_dict,\n key_to_follow=\"data.val.img_for_testing\",\n key_to_reverse=\"model.a_seg_for_testing\",\n op_id=\"_.test_apply_type\",\n )\n self.assertEqual(sample_dict[\"data.val.img_for_testing\"], 4)\n self.assertEqual(sample_dict[\"model.a_seg_for_testing\"], 2)", "def get_outputs(self, input_dict: Dict) -> Dict[str, np.ndarray]:\n activation_values = self.session.run(self.activation_names, input_dict)\n return dict(zip(self.sanitized_activation_names, activation_values))", "def test_output_dim_user_input():\n inputs_that_should_fail = [-1, \"aa\", [\"dd\"], [2], 0, 2.5, {2}]\n for input_value in inputs_that_should_fail:\n with pytest.raises(AssertionError):\n RNN(input_dim=3, layers_info=[2, input_value], hidden_activations=\"relu\", output_activation=\"relu\")\n with pytest.raises(AssertionError):\n RNN(input_dim=6, layers_info=input_value, hidden_activations=\"relu\", output_activation=\"relu\")", "def __test_input_fn(self):\n ## Test labels\n labels = self.labels_test\n ## Recast spectra into dictionary for estimator\n features = {'flux': self.spectra_test}\n ## Convert labels to integers\n ilabels = [self.label_index_lookup[l] for l in labels]\n return features, ilabels", "def test_ops_shape(self):\n confs = {'C_in': 3, 'C_out': 8, 'stride': 1, 'affine': True}\n\n for name, layer in OPS.items():\n net = layer(**confs)\n x = torch.rand((16, confs['C_in'], 32, 32))\n y = net(x)\n self.assertEqual(list(y.shape), [16, confs['C_out'], 32, 32])", "def train(self, tdict):\n pass", "def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')", "def test_check_xyz_dict(self):\n xyz1 = converter.check_xyz_dict(self.xyz1['str'])\n self.assertEqual(xyz1, self.xyz1['dict'])\n\n xyz2 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n xyz2 = converter.check_xyz_dict(xyz2)\n expected_xyz2 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n self.assertEqual(xyz2, expected_xyz2)\n\n xyz3 = 3.0\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz3)\n\n xyz4 = {'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz4)\n\n xyz5 = {'symbols': ('C', 'H', 'H', 'H', 'H', 'S', 'S', 'S'),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz5)\n\n # test a zmat input\n zmat6 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2_1', 'A_2_1_0', None),\n ('R_3_2', 'A_3_2_0', 'D_3_2_0_1')),\n 'vars': {'R_1_0': 1.2451214479859707, 'R_2_1': 1.8953164901754294, 'A_2_1_0': 30.18165946689929,\n 'R_3_2': 2.785552137148173, 'A_3_2_0': 24.405141545817347,\n 'D_3_2_0_1': 3.6222548091772e-06}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n xyz6 = converter.check_xyz_dict(zmat6)\n expected_xyz6 = {'symbols': ('N', 'N', 'H', 'H'),\n 'isotopes': (14, 14, 1, 1),\n 'coords': ((-2.4426534384901547e-09, -4.375090750708016e-09, -0.622560729110669),\n (-2.4426534384901547e-09, -4.375090750708016e-09, 0.6225607188753017),\n (-2.4426534384901547e-09, 0.9528575945413793, -1.015818661524137),\n (7.032081834243086e-08, -0.9528574729632926, 1.015818803737915))}\n\n self.assertEqual(xyz6, expected_xyz6)", "def test_all_layer_types(self):\n\n\t\tdetails = self.watcher.describe()\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8, \"8 conv2D layers, but {} found\".format(denseCount))", "def test_predict():\n args = get_layer('predict', 'manual', 'temporal', False, False, window=2, step_size=3)\n run_layer(*args)", "def test_dict_keywords(self):\n output, _err = self.executor.docker.run('lego:1', env=dict(SPECIAL='emett',\n SONG='awesome')).batch()\n self.assertEqual(output, 'everything')", "def test_extract_safetensors_statedict(self):\n\n\n\t\tmodel = models.resnet18().state_dict()\n\t\tmodel_name = \"resnet18\"\n \n\t\tlayer_names = model.keys()\n\t\texpected_layer_names = [name for name in layer_names if 'weight' in name or 'bias' in name]\n\t\texpected_num_files = len(expected_layer_names)\t\n\t\tprint(f\"we expect {expected_num_files} files\")\n\t\t\n\t\t# there are 18 real layers with weights\n\t\tlayer_weightfiles = [name for name in layer_names if 'weight' in name and 'bn' not in name and 'downsample' not in name ]\t\n\t\texpected_num_weightfiles = 18\n\t\tactual_num_weightfiles = (len(layer_weightfiles))\n\t\tself.assertEqual(expected_num_weightfiles,actual_num_weightfiles)\n\n\n\t\twith TemporaryDirectory(dir=TEST_TMP_DIR, prefix=\"ww_\") as model_dir:\n\t\t\tprint(f\"using {model_dir} as model_dir\")\n\t\t\tself.assertTrue(model_dir.startswith(TEST_TMP_DIR))\n\t\t\t\n\t\t\twith TemporaryDirectory(dir=TEST_TMP_DIR, prefix=\"ww_\") as weights_dir:\n\t\t\t\tprint(f\"using {weights_dir} as weights_dir\")\n\t\t\t\tself.assertTrue(weights_dir.startswith(TEST_TMP_DIR))\n\t\t\t\n\t\t\t\tstate_dict_filename = os.path.join(model_dir, \"pys.safetensors\")\n\t\t\t\tsafe_save(model, state_dict_filename)\n\t\t\t\t\n\t\t\t\t# if save is false, we get no weightfiles\n\t\t\t\tconfig = ww.WeightWatcher.extract_pytorch_statedict_(weights_dir, model_name, state_dict_filename, format=MODEL_FILE_FORMATS.SAFETENSORS, save=False)\n\t\t\t\tweightfiles = [f for f in listdir(weights_dir) if isfile(join(weights_dir, f))]\t\n\t\t\t\tactual_num_files = len(weightfiles)\n\t\t\t\tself.assertEqual(0,actual_num_files)\t\n\t\t\t\t\n\t\t\t\tprint(len(config.keys()))\n\t\t\t\t\n\t\t\t\t# is save is true, safetensors are extracted\n\t\t\t\tww.WeightWatcher.extract_pytorch_statedict_(weights_dir, model_name, state_dict_filename, format=MODEL_FILE_FORMATS.SAFETENSORS, save=True)\n\t\t\t\tweightfiles = [f for f in listdir(weights_dir) if isfile(join(weights_dir, f))]\t\n\t\t\t\tprint(weightfiles)\n\t\t\t\tactual_num_files = len(weightfiles)\n\t\t\t\tself.assertEqual(expected_num_files,actual_num_files)\t\n\t\t\t\tprint(f\"checked {actual_num_files} weightfiles\")\t\t\t\n\t\t\t\t\n\t\t\t\t# test that we can read the files ?\t\n\t\t\t\tfor filename in weightfiles:\n\t\t\t\t\tW = np.load(os.path.join(weights_dir,filename))\n\t\t\t\t\tself.assertIsNotNone(W)\n\t\t\t\n\t\t\t\t\t\t\n\t\tself.assertFalse(os.path.isdir(model_dir))\n\t\tself.assertFalse(os.path.isdir(weights_dir))\n\t\t\n\t\treturn", "def test_dictionary_io(self):\n dict_val = {'blake':31, 'something_else':'that'}\n v1 = DictionaryTestVertex.create(test_id=5, map_val=dict_val)\n v2 = DictionaryTestVertex.get(v1.vid)\n\n assert v2.map_val == dict_val", "def test_dictionary_io(self):\r\n dict_val = {'blake':31, 'something_else':'that'}\r\n v1 = DictionaryTestVertex.create(test_id=5, map_val=dict_val)\r\n v2 = DictionaryTestVertex.get(v1.vid)\r\n\r\n assert v2.map_val == dict_val", "def test_output_named_tuple_vs_dictionary_1():\n assert isclose(average_age, average_age_d), \"Average age cannot be different for Named Tuple and Dictionary list\"", "def test_output_head_layers():\n for output_dim in [[[\"linear\", 3],[\"linear\", 9]], [[\"linear\", 4], [\"linear\", 20]], [[\"linear\", 1], [\"linear\", 1]]]:\n nn_instance = RNN(input_dim=5, layers_info=[[\"gru\", 20], [\"lstm\", 8], output_dim],\n hidden_activations=\"relu\", output_activation=[\"softmax\", None])\n assert nn_instance.output_layers[0].out_features == output_dim[0][1]\n assert nn_instance.output_layers[0].in_features == 8\n assert nn_instance.output_layers[1].out_features == output_dim[1][1]\n assert nn_instance.output_layers[1].in_features == 8", "def example_param_fn(layer_type, in_shape=None, **other_params):\n layer_type = layer_type.lower()\n d={}\n if layer_type == 'batchnormalization':\n d['momentum'] = 0.99\n d['epsilon'] = 1e-3\n elif layer_type == 'dense':\n # Le 'gain=2.0' ici (et pour la conv2D ci-dessous) suppose l'utilisation d'un Relu comme activation,\n # Voir l'article 'Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification'\n # Equation 12.\n d['kernel_initializer'] = initializers.Orthogonal(gain=2.0, seed=None)\n d['kernel_regularizer'] = None\n elif layer_type == 'conv2d':\n d['type'] = 'Conv2D'\n d['kernel_size'] = (3,3) # Kernel size of the spatial convolution (used only if no 'kernel_size' is specified)\n d['padding'] = 'same'\n d['kernel_initializer'] = initializers.Orthogonal(gain=2.0, seed=None)\n d['kernel_regularizer'] = None\n elif layer_type == 'averagepooling2d':\n d['padding'] = 'same'\n elif layer_type == 'shortcut':\n d['use_projection'] = True # Option (B) of paper 'Deep Residual Learning for Image Recognition' Pg 4\n elif layer_type == 'cardinality':\n # Number of convolutions in the grouped convolutions. For each 3x3 convolution,\n # the number of filters is filters // cardinality\n # Only used for the ResNeXt\n return 4\n return d", "def extract_feat(self, batch_inputs: Tensor) -> dict:\n pass", "def test_with_dict(self, seed):\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim = Categorical(\"yolo\", OrderedDict(zip(categories, probs)))\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert samples[0] == 2\n assert dim._probs == probs\n\n assert categories == dim.categories\n\n assert 2 in dim\n assert 0 not in dim\n\n assert dim.name == \"yolo\"\n assert dim.type == \"categorical\"\n assert dim.shape == ()", "def test_conformer_to_xyz_dict(self):\n xyz_dict = converter.rmg_conformer_to_xyz(self.conformer_12)\n self.assertTrue(almost_equal_coords_lists(xyz_dict, self.xyz_dict_12))\n self.assertEqual(xyz_dict['isotopes'], self.xyz_dict_12['isotopes'])", "def test_default(config, expected):\n if isinstance(expected, dict):\n formatted_config = parse_default(config, MODEL)\n try:\n assert expected == formatted_config\n except AssertionError:\n for k, d in formatted_config[\"model\"][\"layers\"].items():\n for opt in [\"user_vals\"]:\n try:\n assert (\n d[\"options\"][opt]\n is expected[\"model\"][\"layers\"][k][\"options\"][opt]\n ), f\"layer {k} does not have matching {opt}\"\n except AssertionError:\n for i, a in enumerate(d[\"options\"][opt]):\n b = expected[\"model\"][\"layers\"][k][\"options\"][opt][i]\n try:\n assert (\n a is b\n ), f\"layer {k} does not have matching {opt} for {a} != {b}\"\n except AssertionError:\n if issubclass(\n type(b), tf.keras.regularizers.Regularizer\n ):\n # TODO: implement more in depth check\n assert issubclass(\n type(a), tf.keras.regularizers.Regularizer\n )\n elif issubclass(\n type(b), tf.keras.initializers.Initializer\n ):\n # TODO: implement more in depth check\n assert issubclass(\n type(a), tf.keras.initializers.Initializer\n )\n else:\n assert (\n a == b\n ), f\"{opt} in layer {k} does not match: {a} != {b}\"\n for opt in [\"func\", \"func_args\", \"func_defaults\"]:\n assert (\n d[\"layer_base\"][opt]\n == expected[\"model\"][\"layers\"][k][\"layer_base\"][opt]\n ), f\"layer {k} does not have matching {opt}\"\n for opt in [\"layer_in_name\"]:\n # print(d[opt])\n assert (\n d[opt] == expected[\"model\"][\"layers\"][k][opt]\n ), f\"layer {k} does not have matching {opt}\"\n\n elif isinstance(expected, ValueError):\n with pytest.raises(ValueError):\n formatted_config = parse_default(config, MODEL)\n elif isinstance(expected, TypeError):\n with pytest.raises(TypeError):\n formatted_config = parse_default(config, MODEL)", "def test_dict(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType.from_dict(data)\n assert data == observation_type.to_dict()", "def test_init() -> None:\n neural_net = NeuralNetwork()\n assert neural_net.model.get_layer('output_layer').output_shape, (None, 4)", "def test_model_layer_types_ww2x(self):\n \n\t\tdetails = self.watcher.describe(pool=False, min_evals=1)\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\n \t\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8*9, \"8*9 conv2D layers, but {} found\".format(denseCount))", "def test_output_layers_created_correctly():\n layers = [[\"gru\", 25], [\"lstm\", 23], [\"linear\", 5], [\"linear\", 10]]\n\n rnn = RNN(input_dim=5, layers_info=layers, hidden_activations=\"relu\", output_activation=\"relu\")\n\n assert rnn.output_layers[0].in_features == 5\n assert rnn.output_layers[0].out_features == 10\n\n layers = [[\"gru\", 25], [\"lstm\", 23], [\"lstm\", 10]]\n\n rnn = RNN(input_dim=5, layers_info=layers, hidden_activations=\"relu\",\n output_activation=\"relu\")\n\n assert rnn.output_layers[0].input_size == 23\n assert rnn.output_layers[0].hidden_size == 10\n\n layers = [[\"gru\", 25], [\"lstm\", 23], [[\"lstm\", 10], [\"linear\", 15]]]\n rnn = RNN(input_dim=5, layers_info=layers, hidden_activations=\"relu\",\n output_activation=[\"relu\", \"softmax\"])\n\n assert rnn.output_layers[0].input_size == 23\n assert rnn.output_layers[0].hidden_size == 10\n\n assert rnn.output_layers[1].in_features == 23\n assert rnn.output_layers[1].out_features == 15", "def check_metadata(layer_name, neuron_indices, ideal_activation,\n multiply_by_input):\n\n error_checking.assert_is_string(layer_name)\n error_checking.assert_is_integer_numpy_array(neuron_indices)\n error_checking.assert_is_geq_numpy_array(neuron_indices, 0)\n error_checking.assert_is_numpy_array(neuron_indices, num_dimensions=1)\n error_checking.assert_is_not_nan(ideal_activation)\n error_checking.assert_is_boolean(multiply_by_input)", "def setUp(self):\n\t\t#import inspect\n\n\t\tprint(\"\\n-------------------------------------\\nIn Test_PyStateDictLayers:\", self._testMethodName)\n\t\tww.weightwatcher.torch = torch\n\t\tself.model = models.resnet18().state_dict()\n\t\t#for cls in inspect.getmro(type(self.model)):\n\t\t#\tprint(str(cls))\n \n\t\tfor key in self.model.keys():\n\t\t\tif key.endswith('.weight'):\n\t\t\t\tlayer_name = key[:-len('.weight')]\n\t\t\t\tself.fc_layer_name = layer_name", "def test_light_dict(self):\n light = self._create_example_light()\n\n dictionary = light.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\"warning\": False, \"off\": True}", "def layer_compare(accuracy_layer_numbers):\n width = np.linspace(3, 12, 10, dtype=int)\n layer = np.linspace(1, 9, 9, dtype=int)\n accurs = np.ones([10, 9], dtype=float)\n\n for m in range(10):\n for n in range(9):\n accurs[m][n] = accuracy_layer_numbers[(width[m], layer[n])]\n\n linecolor = ['gold', 'red', 'green', 'springgreen', 'black', 'blue', 'cyan', 'blueviolet', 'magenta', 'navy']\n\n layer_show_3D(layer, width, accurs,\n title='不同数目卷积层和卷积核宽度的分类准确率',\n path='E:/JackokiePapers/figures/chapter_5/fig_5_1.png')\n\n layer_hot_map(layer, width, accurs,\n colormap=cm.rainbow,\n path='E:/JackokiePapers/figures/chapter_5/fig_5_2.png')\n\n layer_show_2D(layer, width, accurs, [0, 72],\n linecolor=linecolor,\n mean_show=True,\n path='E:/JackokiePapers/figures/chapter_5/fig_5_4.png')", "def test_sum_dict_values(self, mocker):\n\n mocked = mocker.patch.object(\n LeafNodeScaledConformalPredictor, \"_sum_dict_values\"\n )\n\n dummy_confo_model = DummyLeafNodeScaledConformalPredictor()\n\n # set leaf_node_counts attribute so np.apply_along_axis can run\n dummy_confo_model.leaf_node_counts = {\"a\": 1}\n\n leaf_node_predictions_value = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n\n dummy_confo_model._count_leaf_node_visits_from_calibration(\n leaf_node_predictions_value\n )\n\n assert (\n mocked.call_count == leaf_node_predictions_value.shape[0]\n ), \"incorrect number of calls to _sum_dict_values\"\n\n for call_no in range(leaf_node_predictions_value.shape[0]):\n\n call_args = mocked.call_args_list[call_no]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n assert call_kwargs == {\n \"counts\": dummy_confo_model.leaf_node_counts\n }, f\"keyword args in _sum_dict_values call {call_no} incorrect\"\n\n assert (\n len(call_pos_args) == 1\n ), f\"number of positional args in _sum_dict_values call {call_no} incorrect\"\n\n np.testing.assert_array_equal(\n call_pos_args[0], leaf_node_predictions_value[call_no, :]\n )", "def check_for_dict(check):", "def test_ww_layer_attributes(self):\n\t\t\n\t\tww_layer = self._get_resnet_fc_layer()\n\t\t\t\t\t\n\t\texpected_type = \"<class 'weightwatcher.weightwatcher.WWLayer'>\"\n\t\tactual_type = str(type(ww_layer))\n\t\tself.assertEqual(expected_type, actual_type)\n\t\t\n\t\t# RESET FOR WW_FLATFILES vs PYSTATEDICT vs ...\n\t\texpected_name = self.fc_layer_name \n\t\tactual_name = ww_layer.name\n\t\tself.assertEqual(expected_name, actual_name)\n\t\t\n\t\tframework_layer = ww_layer.framework_layer\n\t\tself.assertTrue(framework_layer is not None)\n\t\t\n\t\t# RESET FOR WW_FLATFILES vs PYSTATEDICT vs ...\n\t\texpected_type = self.fc_layer_type \n\t\tactual_type = str(type(framework_layer))\n\t\tself.assertEqual(expected_type, actual_type)\n\t\n\t\tself.assertEqual(ww_layer.name, framework_layer.name)\n\t\t\n\t\t\n\t\thas_weights, weights, has_biases, biases = ww_layer.get_weights_and_biases()\n\t\tself.assertTrue(has_weights)\n\t\tself.assertTrue(has_biases)\n\t\tself.assertTrue(weights is not None)\n\t\tself.assertTrue(biases is not None)\n\t\t\n\t\texpected_W_shape = (1000, 512)\n\t\texpected_B_shape = (1000,)\n\t\tactual_W_shape = weights.shape\n\t\tactual_B_shape = biases.shape\n\t\t\n\t\tself.assertEqual(expected_W_shape, actual_W_shape)\n\t\tself.assertEqual(expected_B_shape, actual_B_shape)\n\t\t\n\t\treturn", "def test_pipeline(inputs, prefix, **config):\n if config['expert_model'] == 'adapnet':\n # Now we get the network output of the Adapnet expert.\n outputs = adapnet(inputs, prefix, config['num_units'], config['num_classes'])\n elif config['expert_model'] == 'fcn':\n outputs = fcn(inputs, prefix, config['num_units'], config['num_classes'],\n trainable=False, batchnorm=False)\n else:\n raise UserWarning('ERROR: Expert Model %s not found' % config['expert_model'])\n outputs['prob'] = tf.nn.softmax(outputs['score'])\n outputs['classification'] = tf.argmax(outputs['prob'], 3)\n return outputs", "def dict_to_tf_example(label_map_dict):\n filename = label_map_dict[0]\n img_path = os.path.join(FLAGS.image_data_dir, filename)\n\n try:\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n except:\n logging.warning('Image Not Found %s', img_path)\n return None\n\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n (witdh, height) = image.size\n\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n sentence_txt = label_map_dict[1]\n\n\n sentences = []\n f = open('dictionary.json', 'r')\n dictionary = f.read()\n dictionary = json.loads(dictionary)\n for index, _ in enumerate(sentence_txt):\n sentence = []\n for sen in sentence_txt[index].split(' '):\n try:\n sentence.append(dictionary[sen])\n except KeyError:\n sentence.append(dictionary['UNK'])\n sentences.append(sentence)\n\n feature_dict = {\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(witdh),\n 'image/filename': dataset_util.bytes_feature(filename.encode('utf8')),\n 'image/score_0': dataset_util.int64_list_feature(sentences[0]),\n 'image/score_1': dataset_util.int64_list_feature(sentences[1]),\n 'image/score_2': dataset_util.int64_list_feature(sentences[2]),\n 'image/score_3': dataset_util.int64_list_feature(sentences[3]),\n 'image/score_4': dataset_util.int64_list_feature(sentences[4]),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8'))\n }\n\n example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n return example", "def test_init_ops(self):\n tf.reset_default_graph()\n tf.set_random_seed(42)\n self.imagenet_inception_v3.set_up()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_param = [\n np.prod(v.get_shape().as_list())\n for v in tf.trainable_variables()\n ]\n # Check if number of parameters per \"layer\" is equal to what we expect\n # We will write them in the following form:\n # - Conv layer: [input_filter*output_filter*kernel[0]*kernel[1]]\n # - Batch norm: [input, input] (for beta and gamma)\n # - Fully connected: [input*output]\n # - Bias: [dim]\n self.assertEqual(\n num_param, [\n 3 * 32 * 3 * 3, 32, 32, 32 * 32 * 3 * 3, 32, 32,\n 32 * 64 * 3 * 3, 64, 64, 64 * 80 * 1 * 1, 80, 80,\n 80 * 192 * 3 * 3, 192, 192, 192 * 64 * 1 * 1, 64, 64,\n 192 * 32 * 1 * 1, 32, 32, 192 * 48 * 1 * 1, 48, 48,\n 48 * 64 * 5 * 5, 64, 64, 192 * 64 * 1 * 1,\n 64, 64, 64 * 96 * 3 * 3, 96, 96, 96 * 96 * 3 * 3, 96, 96,\n (64 + 32 + 64 + 96) * 64 * 1 * 1, 64, 64,\n (64 + 32 + 64 + 96) * 64 * 1 * 1, 64, 64,\n (64 + 32 + 64 + 96) * 48 * 1 * 1, 48, 48, 48 * 64 * 5 * 5,\n 64, 64, (64 + 32 + 64 + 96) * 64 * 1 * 1, 64, 64,\n 64 * 96 * 3 * 3, 96, 96, 96 * 96 * 3 * 3, 96, 96,\n (64 + 64 + 64 + 96) * 64 * 1 * 1, 64, 64,\n (64 + 64 + 64 + 96) * 64 * 1 * 1, 64, 64,\n (64 + 64 + 64 + 96) * 48 * 1 * 1, 48, 48, 48 * 64 * 5 * 5,\n 64, 64, (64 + 64 + 64 + 96) * 64 * 1 * 1, 64, 64,\n 64 * 96 * 3 * 3, 96, 96, 96 * 96 * 3 * 3, 96, 96,\n (64 + 64 + 64 + 96) * 384 * 3 * 3, 384, 384,\n (64 + 64 + 64 + 96) * 64 * 1 * 1, 64, 64, 64 * 96 * 3 * 3,\n 96, 96, 96 * 96 * 3 * 3, 96, 96,\n ((64 + 64 + 64 + 96) + 384 + 96) * 192 * 1 * 1, 192, 192,\n ((64 + 64 + 64 + 96) + 384 + 96) * 192 * 1 * 1, 192, 192,\n ((64 + 64 + 64 + 96) + 384 + 96) * 128 * 1 * 1, 128, 128,\n 128 * 128 * 1 * 7, 128, 128, 128 * 192 * 7 * 1, 192, 192,\n ((64 + 64 + 64 + 96) + 384 + 96) * 128 * 1 * 1, 128, 128,\n 128 * 128 * 7 * 1, 128, 128, 128 * 128 * 1 * 7, 128, 128,\n 128 * 128 * 7 * 1, 128, 128, 128 * 192 * 1 * 7, 192, 192,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n (192 + 192 + 192 + 192) * 160 * 1 * 1, 160, 160,\n 160 * 160 * 1 * 7, 160, 160, 160 * 192 * 7 * 1, 192, 192,\n (192 + 192 + 192 + 192) * 160 * 1 * 1, 160, 160,\n 160 * 160 * 7 * 1, 160, 160, 160 * 160 * 1 * 7, 160, 160,\n 160 * 160 * 7 * 1, 160, 160, 160 * 192 * 1 * 7, 192, 192,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n (192 + 192 + 192 + 192) * 160 * 1 * 1, 160, 160,\n 160 * 160 * 1 * 7, 160, 160, 160 * 192 * 7 * 1, 192, 192,\n (192 + 192 + 192 + 192) * 160 * 1 * 1, 160, 160,\n 160 * 160 * 7 * 1, 160, 160, 160 * 160 * 1 * 7, 160, 160,\n 160 * 160 * 7 * 1, 160, 160, 160 * 192 * 1 * 7, 192, 192,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n 192 * 192 * 1 * 7, 192, 192, 192 * 192 * 7 * 1, 192, 192,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n 192 * 192 * 7 * 1, 192, 192, 192 * 192 * 1 * 7, 192, 192,\n 192 * 192 * 7 * 1, 192, 192, 192 * 192 * 1 * 7, 192, 192,\n (192 + 192 + 192 + 192) * 128 * 1 * 1, 128, 128,\n 128 * 768 * 5 * 5, 768, 768, 768 * 1001, 1001,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n 192 * 192 * 1 * 7, 192, 192, 192 * 192 * 7 * 1,\n 192, 192, 192 * 192 * 3 * 3, 192, 192,\n (192 + 192 + 192 + 192) * 192 * 1 * 1, 192, 192,\n 192 * 320 * 3 * 3, 320, 320,\n (4 * 192 + 192 + 320) * 320 * 1 * 1, 320, 320,\n (4 * 192 + 192 + 320) * 384 * 1 * 1, 384, 384,\n 384 * 384 * 1 * 3, 384, 384, 384 * 384 * 3 * 1, 384, 384,\n (4 * 192 + 192 + 320) * 448 * 1 * 1, 448, 448,\n 448 * 384 * 3 * 3, 384, 384,\n 384 * 384 * 1 * 3, 384, 384, 384 * 384 * 3 * 1, 384, 384,\n (4 * 192 + 192 + 320) * 192, 192, 192,\n (320 + 384 * 2 + 384 * 2 + 192) * 320 * 1 * 1, 320, 320,\n (320 + 384 * 2 + 384 * 2 + 192) * 384 * 1 * 1, 384, 384,\n 384 * 384 * 1 * 3, 384, 384, 384 * 384 * 3 * 1, 384, 384,\n (320 + 384 * 2 + 384 * 2 + 192) * 448 * 1 * 1, 448, 448,\n 448 * 384 * 3 * 3, 384, 384, 384 * 384 * 1 * 3, 384,\n 384, 384 * 384 * 3 * 1, 384, 384,\n (320 + 384 * 2 + 384 * 2 + 192) * 192, 192, 192,\n 2048 * 1001, 1001\n ])\n for init_op in [\n self.imagenet_inception_v3.train_init_op,\n self.imagenet_inception_v3.test_init_op,\n self.imagenet_inception_v3.train_eval_init_op\n ]:\n sess.run(init_op)\n losses_, regularizer_, accuracy_ = sess.run([\n self.imagenet_inception_v3.losses,\n self.imagenet_inception_v3.regularizer,\n self.imagenet_inception_v3.accuracy\n ])\n self.assertEqual(losses_.shape, (self.batch_size, ))\n self.assertIsInstance(regularizer_, np.float32)\n self.assertIsInstance(accuracy_, np.float32)", "def check_weights(task, out_dict):\n f = open(\"correct_weights_\" + str(task.pid) + \".txt\", \"r\")\n correct_dump = json.load(f)\n\n missing_weights = 0\n missing_layers = 0\n diff_weights = 0\n sum_diff = 0\n missing_arr = []\n diff_layers = []\n\n for layer in correct_dump['tensors']:\n if (layer in out_dict['tensors']):\n print (layer)\n \n correct_arr = correct_dump['tensors'][layer]\n recovered_arr = out_dict['tensors'][layer]\n\n diff_pos = []\n \n if (len(recovered_arr) != len(correct_arr)):\n print \"Shapes Different\"\n else:\n for i in range(len(correct_arr)):\n if (recovered_arr[i] != correct_arr[i]):\n diff_pos.append(i)\n\n if (len(diff_pos) == len(correct_arr)):\n print \"No Valid Tensors\"\n else:\n print(\"{} weights different\".format(len(diff_pos)))\n print (diff_pos)\n sum_diff += len(diff_pos)\n if len(diff_pos) > 0:\n diff_layers.append(layer)\n print\n\n else:\n missing_layers += 1\n missing_weights += len(correct_dump['tensors'][layer])\n missing_arr.append(layer)\n\n print (\"Correct model_name: {}\".format(correct_dump['model_name']))\n print(\"Received model_name: {}\".format(out_dict['model_name']))\n print (\"Correct num_elements: {}\".format(correct_dump['num_elements']))\n print (\"Received num_elements: {}\\n\".format(out_dict['num_elements']))\n print (len(diff_layers))\n print (diff_layers)\n print (sum_diff)\n print (\"{} layers not found\".format(missing_layers))\n print (missing_arr)\n print (\"{} out of {} found weights are different\".format(sum_diff, correct_dump['num_elements'] - missing_weights))", "def get_features(inp_layer, pad=0):\n # Note: tweaked to use average pooling instead of maxpooling\n net = OrderedDict()\n net['conv1_1'] = ConvLayer(inp_layer, 64, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')\n net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')\n net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')\n net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')\n #net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=pad, flip_filters=False, nonlinearity=rectify)\n #net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad')\n\n nb_params = len(nn.layers.get_all_params(net['conv4_4']))\n\n values = pickle.load(open('weights/vgg19_normalized.pkl', 'rb'), encoding='latin1')['param values']\n nn.layers.set_all_param_values(net['conv4_4'], values[:nb_params])\n\n return net", "def test_save_load_state_dict_train(self):\n model = self.get_model()\n\n # Perform an update in order to modify tile weights and biases.\n loss_func = mse_loss\n if isinstance(model, AnalogConv2d):\n input_x = Tensor(rand(2, 2, 3, 3))*0.2\n input_y = Tensor(rand(2, 3, 4, 4))*0.2\n else:\n input_x = Tensor(rand(2, model.in_features))*0.2\n input_y = Tensor(rand(2, model.out_features))*0.2\n\n if self.USE_CUDA:\n input_x = input_x.cuda()\n input_y = input_y.cuda()\n\n self.train_model(model, loss_func, input_x, input_y)\n\n # Keep track of the current weights and biases for comparing.\n (model_weights, model_biases,\n tile_weights, tile_biases) = self.get_model_and_tile_weights(model)\n\n # now the tile weights should be out of sync\n assert_raises(AssertionError, assert_array_almost_equal, model_weights, tile_weights)\n assert_raises(AssertionError, assert_array_almost_equal, model_biases, tile_biases)\n\n # Save the model to a file.\n file = TemporaryFile()\n save(model.state_dict(), file)\n\n # Create a new model and load its state dict.\n file.seek(0)\n new_model = self.get_model()\n new_model.load_state_dict(load(file))\n file.close()\n\n # Compare the new model weights and biases. they should now be in sync\n (new_model_weights, new_model_biases,\n new_tile_weights, new_tile_biases) = self.get_model_and_tile_weights(new_model)\n\n assert_array_almost_equal(tile_weights, new_model_weights)\n assert_array_almost_equal(tile_biases, new_model_biases)\n assert_array_almost_equal(tile_weights, new_tile_weights)\n assert_array_almost_equal(tile_biases, new_tile_biases)", "def handle_input(data: dict):", "def forgiving_state_restore1(net, loaded_dict):\n net_state_dict = net.state_dict()\n new_loaded_dict = {}\n count_all,count_same1,count_same2 = 0, 0,0\n for k in net_state_dict:\n count_all += 1\n if k.split('.')[0] == 'resnet_features':\n if k[16:] in loaded_dict and net_state_dict[k].size() == loaded_dict[k[16:]].size():\n new_loaded_dict[k] = loaded_dict[k[16:]]\n count_same1 += 1\n elif k[16:] in loaded_dict and net_state_dict[k].size() != loaded_dict[k[16:]].size():\n count_same2 += 1\n else:\n print(\"跳过{0}的参数加载\".format(k))\n print('总参数{}个,相同参数{}个,大小不同{}个'.format(count_all,count_same1,count_same2))\n net_state_dict.update(new_loaded_dict)\n net.load_state_dict(net_state_dict)\n return net", "def visualize_saliency_of_output(FLAGS, model, input_images=[]):\n if len(input_images) == 0:\n # use predefined images\n img_dir='/esat/opal/kkelchte/docker_home/pilot_data/visualization_images'\n input_images=sorted([img_dir+'/'+f for f in os.listdir(img_dir)])\n\n print(\"[tools.py]: extracting saliency maps of {0} in {1}\".format([os.path.basename(i) for i in input_images], os.path.dirname(input_images[0])))\n \n \n inputs = load_images(input_images, model.input_size[1:])\n\n print 'shape: ',inputs.shape\n\n if 'nfc' in FLAGS.network:\n inputs = np.concatenate([inputs]*FLAGS.n_frames, axis=-1)\n \n # extract deconvolution\n import tf_cnnvis\n\n # layers = ['c']\n # layers=['MobilenetV1_1/control/Conv2d_1c_1x1/Conv2D']\n # layers=['MobilenetV1_1/control/Conv2d_1c_1x1/Conv2D','MobilenetV1_1/AvgPool_1a/AvgPool']\n\n # layers = [str(i.name) for i in model.sess.graph.get_operations() if 'outputs' in i.name and not 'activations' in i.name and not 'gradients' in i.name]\n layers = [model.endpoints['eval']['outputs'].name[:-2]] #cut out :0 in the end to change name from tensor to operation name\n # layers = ['outputs']\n \n # results = tf_cnnvis.activation_visualization(sess_graph_path = model.sess, \n # value_feed_dict = {model.inputs : inputs}, \n # layers=layers)\n results = tf_cnnvis.deconv_visualization(sess_graph_path = model.sess, \n value_feed_dict = {model.inputs : inputs}, \n layers=layers)\n\n # Normalize deconvolution within 0:1 range\n num_rows=0\n clean_results={} \n # Loop over layers\n for k in results.keys():\n clean_results[k]=[]\n # Loop over channels\n for c in range(len(results[k])):\n num_rows+=1\n clean_results[k].append(np.zeros((results[k][c].shape[0:3])))\n # Loop over images\n for i in range(results[k][c].shape[0]):\n clean_results[k][c][i]=deprocess_image(results[k][c][i],one_channel=True)\n if num_rows > 6:\n print(\"[tools.py]: There are too many columns to create a proper image.\")\n return\n\n # create one combined image with each input image on each column\n fig, axes = plt.subplots(num_rows+1,min(len(input_images),5),figsize=(23, 4*(2*len(results.keys())+1)))\n # fig, axes = plt.subplots(num_columns+1,min(len(input_images),5),figsize=(23, 4*(2*len(results.keys())+1)))\n # add original images in first row\n for i in range(axes.shape[1]):\n axes[0, i].set_title(os.path.basename(input_images[i]).split('.')[0])\n axes[0, i].imshow(matplotlibprove(inputs[i]), cmap='inferno')\n axes[0, i].axis('off')\n \n # experts=np.asarray([[k]*(FLAGS.action_quantity if FLAGS.discrete else 1) for v in sorted(model.factor_offsets.values()) for k in model.factor_offsets.keys() if model.factor_offsets[k]==v]).flatten()\n\n # add deconvolutions over the columns\n row_index = 1\n for k in results.keys(): # go over layers\n for c in range(len(results[k])): # add each channel in 2 new column\n for i in range(axes.shape[1]): # fill row going over input images\n # axes[row_index, i].set_title(k.split('/')[1]+'/'+k.split('/')[2]+'_'+str(c))\n axes[row_index, i].set_title(k+'_'+str(c))\n # axes[row_index, i].set_title(experts[c])\n \n axes[row_index, i].imshow(np.concatenate((inputs[i],np.expand_dims(clean_results[k][c][i],axis=2)), axis=2))\n axes[row_index, i].axis('off')\n # row_index+=2\n row_index+=1\n # plt.show()\n plt.savefig(FLAGS.summary_dir+FLAGS.log_tag+'/saliency_maps.jpg',bbox_inches='tight')", "def test_dict_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})\n inner_test(param={'foo': 1, 'bar': ['bat', 2]})", "def _expected_inputs():", "def _interactive_input_tensor_to_features_dict(feature_map, hparams):\n inputs = tf.convert_to_tensor(feature_map[\"inputs\"])\n input_is_image = False if len(inputs.get_shape()) < 3 else True\n\n x = inputs\n if input_is_image:\n x = tf.image.resize_images(x, [299, 299])\n x = tf.reshape(x, [1, 299, 299, -1])\n x = tf.to_int32(x)\n else:\n # Remove the batch dimension.\n num_samples = x[0]\n length = x[2]\n x = tf.slice(x, [3], tf.to_int32([length]))\n x = tf.reshape(x, [1, -1, 1, 1])\n # Transform into a batch of size num_samples to get that many random\n # decodes.\n x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))\n\n p_hparams = hparams.problem_hparams\n input_space_id = tf.constant(p_hparams.input_space_id)\n target_space_id = tf.constant(p_hparams.target_space_id)\n\n features = {}\n features[\"input_space_id\"] = input_space_id\n features[\"target_space_id\"] = target_space_id\n features[\"decode_length\"] = (\n IMAGE_DECODE_LENGTH if input_is_image else inputs[1])\n features[\"inputs\"] = x\n # Save inputs to \"partial_targets\" when prepending inputs to targets. Also\n # keep \"inputs\" as some models crash if they don't exist.\n if getattr(hparams, \"prepend_mode\", \"none\") != \"none\":\n shape = tf.shape(x)\n partial_targets = tf.reshape(x, [shape[0], shape[1]])\n partial_targets = tf.pad(partial_targets, [[0, 0], [0, 1]])\n features[\"partial_targets\"] = partial_targets\n return features", "def act_on_dict(output_names=None, input_names=None, mode='add'):\n def wrapper(func):\n assert mode in ACTING_MODES, f'mode has to be one of {ACTING_MODES}'\n # use names of return variables of func if keys to save returned values is not specified\n if output_names is None:\n provides = extract_return(func)\n else:\n provides = output_names\n\n # use argument names in case keys to get input values is not specified\n if input_names is None:\n args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations = inspect.getfullargspec(func)\n requires = (args if defaults is None else args[:len(args) - len(defaults)]) + \\\n (kwonlyargs if kwonlydefaults is None else kwonlyargs[:len(kwonlyargs) - len(kwonlydefaults)])\n uses = args + kwonlyargs\n else:\n args = input_names\n varkw = None\n kwonlyargs = []\n\n requires = args\n uses = args\n\n # define function to act on dictionary\n def inner(dictionary):\n # check that all required arguments are present\n for arg in inner.requires:\n assert arg in dictionary, \\\n f\"key '{arg}' whose value is required by function '{func.__name__}' is missing\"\n\n # apply function\n if input_names is not None:\n returns = func(*(dictionary[arg] for arg in args))\n elif varkw is not None:\n returns = func(**dictionary)\n else:\n returns = func(\n **{arg: dictionary[arg] for arg in args if arg in dictionary},\n **{kwonlyarg: dictionary[kwonlyarg] for kwonlyarg in kwonlyargs if kwonlyarg in dictionary})\n\n # add to input or construct new dict based on mode\n if mode == 'add':\n result = dictionary\n else:\n result = {}\n for name, value in zip(provides, returns):\n result[name] = value\n\n return result\n\n # add attributes to function specifying which keys are required, used, provided\n inner.requires = requires\n inner.uses = uses\n inner.provides = provides\n\n return inner\n\n if callable(output_names):\n func = output_names\n output_names = None\n return wrapper(func)\n else:\n return wrapper", "def test_layer_API(self):\n\n # Exceptions\n exclude = ['get_topN', 'get_bins',\n 'get_geotransform',\n 'get_nodata_value',\n 'get_attribute_names',\n 'get_resolution',\n 'get_geometry_type',\n 'get_geometry_name',\n 'to_vector_points',\n 'to_vector_layer']\n\n V = Vector() # Empty vector instance\n R = Raster() # Empty raster instance\n\n assert same_API(V, R, exclude=exclude)\n\n for filename in [os.path.join(TESTDATA,\n 'test_buildings.shp'),\n os.path.join(HAZDATA,\n 'Lembang_Earthquake_Scenario.asc')]:\n\n L = read_layer(filename)\n\n assert same_API(L, V, exclude=exclude)\n assert same_API(L, R, exclude=exclude)", "def _add_layer(self, layer_dict, layer_name, input_layers, merge_mode=None, share_params_with=None):\n util.colorprint(layer_name, 'teal')\n \n layer_dict = dict(layer_dict)\n util.colorprint(layer_dict, 'red')\n \n if share_params_with is not None:\n print \"Warning: ignoring share_params_with\"\n \n layer_options = layer_dict[\"options\"]\n layer=None\n if layer_dict[\"type\"]==\"conv2d\":\n #TODO: remove below\n nb_filter, nb_row, nb_col = 3,3,3\n layer = keras.layers.convolutional.Convolution2D(nb_filter, nb_row, nb_col, **layer_options)\n elif layer_dict[\"type\"]==\"dense\":\n dim = layer_dict[\"output_dim\"]\n # del layer_options[\"output_dim\"]\n layer = keras.layers.core.Dense(dim, **layer_options) \n else:\n print \"Ursol Major\"\n RaiseError()\n # TODO: one of the layers is a string\n if isinstance(input_layers, list):\n #this means that there is input from a loop to this layer\n self.model.add_node(layer, name=layer_name, inputs=input_layers, merge_mode=merge_mode)\n else:\n self.model.add_node(layer, name=layer_name, input=input_layers)\n\n return layer_name", "def test_user_hidden_layers_input_acceptances():\n inputs_that_should_work = [[[\"linear\", 33]], [[\"linear\", 12]], [[\"gru\", 2]], [[\"lstm\", 2]], [[\"lstm\", 1]],\n [[\"gru\", 330]], [[\"gru\", 33], [\"linear\", 2]] ]\n for input in inputs_that_should_work:\n assert RNN(input_dim=1, layers_info=input, hidden_activations=\"relu\",\n output_activation=\"relu\")", "def test_dict(self):\n s1 = Square(4)\n s1_dict = s1.to_dictionary()\n s1_correct = {\"id\":1, \"size\":4, \"x\":0, \"y\":0}\n self.assertEqual(s1_dict, s1_correct)\n\n s2 = Square(9)\n s2_new = {\"id\":9, \"size\":4, \"x\":3, \"y\":4}\n s2.update(**s2_new)\n self.assertEqual(s2.to_dictionary(), s2_new)", "def test_fitstoEIT(eit_map):\n assert isinstance(eit_map, EITMap)", "def check_metadata(\n activation_layer_name, vector_output_layer_name, output_neuron_indices,\n ideal_activation):\n\n error_checking.assert_is_string(activation_layer_name)\n error_checking.assert_is_string(vector_output_layer_name)\n\n error_checking.assert_is_integer_numpy_array(output_neuron_indices)\n error_checking.assert_is_geq_numpy_array(output_neuron_indices, 0)\n error_checking.assert_is_numpy_array(\n output_neuron_indices, exact_dimensions=numpy.array([2], dtype=int)\n )\n\n error_checking.assert_is_not_nan(ideal_activation)", "def test_layer_ids(self):\n\t\t\n\t\tprint(type(self.model))\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\tlayer_ids = details.layer_id.to_numpy()\n\t\tself.assertEqual(layer_ids[0], self.first_layer)\n\t\tself.assertEqual(layer_ids[1], self.second_layer)\n\t\tself.assertEqual(layer_ids[-3], self.fc1_layer)\n\t\tself.assertEqual(layer_ids[-2], self.fc2_layer)\n\t\tself.assertEqual(layer_ids[-1], self.fc3_layer)", "def test(module, annots_dict, inverse_annots_dict, mode='standard'):\n represented_functions = unions([annots_dict[gene] for gene in module])\n d = {}\n num_genes = len(annots_dict)\n num_drawn = len(module)\n for function in represented_functions:\n num_labeled_total = len(inverse_annots_dict[function])\n num_labeled_in_module = sum(\n [function in annots_dict[gene] for gene in module])\n d[function] = hypergeom.sf(num_labeled_in_module - 1, num_genes,\n num_labeled_total, num_drawn)\n if mode.startswith('c'):\n d[function] /= hypergeom.sf(0, num_genes, num_labeled_total, \n num_drawn)\n return d", "def _interactive_input_tensor_to_features_dict(feature_map, hparams):\n inputs = tf.convert_to_tensor(feature_map[\"inputs\"])\n\n x = inputs\n # Remove the batch dimension.\n num_samples = x[0]\n length = x[2]\n x = tf.slice(x, [3], tf.to_int32([length]))\n x = tf.reshape(x, [1, -1, 1, 1])\n # Transform into a batch of size num_samples to get that many random\n # decodes.\n x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))\n\n p_hparams = hparams.problem_hparams\n input_space_id = tf.constant(p_hparams.input_space_id)\n target_space_id = tf.constant(p_hparams.target_space_id)\n\n features = {}\n features[\"input_space_id\"] = input_space_id\n features[\"target_space_id\"] = target_space_id\n features[\"decode_length\"] = inputs[1]\n features[\"inputs\"] = x\n return features", "def test_predict_prep():\n args = get_layer('predict', 'manual', 'temporal', False, True, window=2, step_size=3)\n run_layer(*args)", "def test_apply_scalar_map(self):\n super(TestObjDict, self).test_apply_scalar_map(_as_obj=True)", "def test_simple_merge(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(6)(x1)\n x4 = merge([x2, x3], mode=\"concat\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def test_Stock_output_named_tuple_vs_dictionary_7():\n assert Stock_tuple[0][6] == Stock_list_dict[0][\"weight\"], \"weight is not getting stored properly\"", "def test_digi_scenarios(self):\n # -- data group has no digitizer devices ----\n _map = self.map\n self.assertBasics(_map)\n self.assertEqual(_map, {})\n\n # -- data group has all mappable devices ----\n self.f.add_module(\"SIS 3301\", {})\n self.f.add_module(\"SIS crate\", {})\n _map = self.map\n self.assertBasics(_map)\n\n # check all controls were mapped\n self.assertEqual(len(_map), 2)\n self.assertIn(\"SIS 3301\", _map)\n self.assertIn(\"SIS crate\", _map)\n\n # the data group has mappable and unknown digitizers ----\n self.f.remove_all_modules()\n self.f.add_module(\"SIS 3301\", {})\n self.f[\"Raw data + config\"].create_group(\"Not known\")\n _map = self.map\n self.assertBasics(_map)\n\n # check correct diagnostics were mapped\n self.assertEqual(len(_map), 1)\n self.assertIn(\"SIS 3301\", _map)\n self.assertNotIn(\"Not known\", _map)\n\n # delete unknown group\n del self.f[\"Raw data + config/Not known\"]\n\n # the data group has a dataset ----\n self.f.remove_all_modules()\n self.f.add_module(\"SIS crate\", {})\n data = np.empty((2, 100), dtype=np.float32)\n self.f[\"Raw data + config\"].create_dataset(\"A dataset\", data=data)\n _map = self.map\n self.assertBasics(_map)\n\n # check correct diagnostics were mapped\n self.assertEqual(len(_map), 1)\n self.assertIn(\"SIS crate\", _map)\n self.assertNotIn(\"A dataset\", _map)\n\n # delete dataset\n del self.f[\"Raw data + config/A dataset\"]\n\n # the data group has a mappable digitizer but ----\n # mapping fails ----\n self.f.remove_all_modules()\n self.f.add_module(\"SIS 3301\", {})\n self.f.add_module(\"SIS crate\", {})\n\n # remove a dataset from 'SIS 3301'\n # - this will cause mapping of 'Waveform' to fail\n #\n sis_group = self.f[\"Raw data + config/SIS 3301\"]\n for name in sis_group:\n if isinstance(sis_group[name], h5py.Dataset):\n del sis_group[name]\n\n # check map\n _map = self.map\n self.assertBasics(_map)\n\n # check correct controls were mapped\n self.assertEqual(len(_map), 1)\n self.assertIn(\"SIS crate\", _map)\n self.assertNotIn(\"SIS 3301\", _map)", "def get_data(layer_or_layers, seed=4576546):\n all_layers = get_all_layers(layer_or_layers)\n # initialize layer-to-output mapping from all input layers\n # with zeros\n all_outputs = dict((layer, np.zeros(layer.shape, dtype=np.float32))\n for layer in all_layers\n if isinstance(layer, InputLayer))\n \n rng = RandomState(seed)\n n_trials = all_layers[0].shape[0]\n y = np.round(rng.rand(n_trials)).astype(np.int32)\n # update layer-to-output mapping by propagating the inputs\n for layer in all_layers:\n if layer not in all_outputs:\n try:\n try:\n layer_inputs = [all_outputs[input_layer]\n for input_layer in layer.input_layers]\n except AttributeError:\n layer_inputs = all_outputs[layer.input_layer]\n except KeyError:\n # one of the input_layer attributes must have been `None`\n raise ValueError(\"get_output() was called without giving an \"\n \"input expression for the free-floating \"\n \"layer %r. Please call it with a dictionary \"\n \"mapping this layer to an input expression.\"\n % layer)\n outputs = layer.transform(topo=layer_inputs, y=y)\n all_outputs[layer] = outputs\n # return the output(s) of the requested layer(s) only\n try:\n return [all_outputs[layer].astype(np.float32) \n for layer in layer_or_layers], y\n except TypeError:\n return all_outputs[layer_or_layers].astype(np.float32), y", "def test2():\n\n # Internal Feature Layers\n feature_queries = []\n feature_layers = db(db.gis_layer_feature.resource == \"office\").select()\n for layer in feature_layers:\n if layer.role_required and not auth.s3_has_role(layer.role_required):\n continue\n _layer = gis.get_feature_layer(layer.module,\n layer.resource,\n layer.name,\n layer.popup_label,\n config=config,\n marker_id=layer.marker_id,\n active=layer.visible,\n polygons=layer.polygons,\n opacity=layer.opacity)\n if _layer:\n # Add a URL for downloading the GeoJSON\n # @ToDO: add to gis.get_feature_layer\n _layer[\"url\"] = \"%s.geojson\" % URL(r=request, c=layer.module, f=layer.resource)\n marker = db(db.gis_marker.id == _layer[\"marker\"]).select(db.gis_marker.image,\n db.gis_marker.height,\n db.gis_marker.width,\n limitby=(0, 1)).first()\n _layer[\"marker\"] = marker\n feature_queries.append(_layer)\n\n return dict(feature_queries=feature_queries)", "def _visualization_by_layer_name(graph, value_feed_dict, input_tensor, layer_name, method, path_logdir, path_outdir):\r\n start = -time.time()\r\n is_success = True\r\n\r\n sess = tf.get_default_session()\r\n if not(graph is sess.graph):\r\n print('Error, the graph input is not the graph of the current session!!')\r\n # try:\r\n parsed_tensors = parse_tensors_dict(graph, layer_name, value_feed_dict)\r\n if parsed_tensors == None:\r\n return is_success\r\n\r\n op_tensor, x, X_in, feed_dict = parsed_tensors\r\n\r\n is_deep_dream = True\r\n #is_valid_sess = True\r\n with graph.as_default():\r\n # computing reconstruction\r\n X = X_in\r\n if input_tensor != None:\r\n X = get_tensor(graph = graph, name = input_tensor.name)\r\n # original_images = sess.run(X, feed_dict = feed_dict)\r\n\r\n results = None\r\n if method == \"act\":\r\n # compute activations\r\n results = _activation(graph, sess, op_tensor, feed_dict)\r\n elif method == \"deconv\":\r\n # deconvolution\r\n results = _deconvolution(graph, sess, op_tensor, X, feed_dict)\r\n elif method == \"deepdream\":\r\n # deepdream\r\n is_success = _deepdream(graph, sess, op_tensor, X, feed_dict, layer_name, path_outdir, path_logdir)\r\n is_deep_dream = False\r\n\r\n # except:\r\n # is_success = False\r\n # print(\"No Layer with layer name = %s\" % (layer_name))\r\n # return is_success\r\n\r\n if is_deep_dream:\r\n is_success = write_results(results, layer_name, path_outdir, path_logdir, method = method)\r\n\r\n start += time.time()\r\n print(\"Reconstruction Completed for %s layer. Time taken = %f s\" % (layer_name, start))\r\n\r\n return is_success", "def test_output_named_tuple_vs_dictionary_3():\n assert isclose(mean_current_location[0],\n mean_current_location_d[0]), \"Mean location cannot be different for Named Tuple and Dictionary list\"", "def test_output_named_tuple_vs_dictionary_6():\n assert delta2 > delta1, \"Dictionary cannot be faster than named tuple\"", "def test_num_layers(self):\n\t\tdetails = self.watcher.describe()\n\t\tprint(\"Testing Keras on VGG16\")\n\t\tprint(details)\n\t\tself.assertEqual(len(details), 16)", "def __call__(\n self, \n image: ndarray, \n adversarial_image: ndarray\n ) -> Dict[str, Union[float, int]]:\n ...", "def call(self, inputs, training=False):\r\n # BASE outputs\r\n x = self.base.indexable_call(inputs, end=\"block4_conv3\")\r\n out4_3 = self.l2_norm(x)\r\n out7 = self.base.indexable_call(x, start=\"block4_pool\")\r\n\r\n # EXTRA outputs\r\n out8_2 = self.extra_layers.indexable_call(out7, end=\"conv8_2\")\r\n out9_2 = self.extra_layers.indexable_call(out8_2, start=\"conv9_1\", end=\"conv9_2\")\r\n out10_2 = self.extra_layers.indexable_call(out9_2, start=\"conv10_1\", end=\"conv10_2\")\r\n out11_2 = self.extra_layers.indexable_call(out10_2, start=\"conv11_1\")\r\n\r\n # DETECTOR predictions \r\n feature_maps = self.detector(inputs=[\r\n out4_3, out7, out8_2, out9_2, out10_2, out11_2])\r\n \r\n return feature_maps", "def initialize(self,inputDict):\n pass", "def _build_representation_layer(self,\n input_question_word,\n input_question_word_mask,\n input_question_subword,\n input_question_subword_mask,\n input_question_char,\n input_question_char_mask,\n input_context_word,\n input_context_word_mask,\n input_context_subword,\n input_context_subword_mask,\n input_context_char,\n input_context_char_mask): \n word_vocab_size = self.hyperparams.data_word_vocab_size\n word_embed_dim = self.hyperparams.model_representation_word_embed_dim\n word_dropout = self.hyperparams.model_representation_word_dropout if self.mode == \"train\" else 0.0\n word_embed_pretrained = self.hyperparams.model_representation_word_embed_pretrained\n word_feat_trainable = self.hyperparams.model_representation_word_feat_trainable\n word_feat_enable = self.hyperparams.model_representation_word_feat_enable\n subword_vocab_size = self.hyperparams.data_subword_vocab_size\n subword_embed_dim = self.hyperparams.model_representation_subword_embed_dim\n subword_unit_dim = self.hyperparams.model_representation_subword_unit_dim\n subword_feat_trainable = self.hyperparams.model_representation_subword_feat_trainable\n subword_window_size = self.hyperparams.model_representation_subword_window_size\n subword_hidden_activation = self.hyperparams.model_representation_subword_hidden_activation\n subword_dropout = self.hyperparams.model_representation_subword_dropout if self.mode == \"train\" else 0.0\n subword_pooling_type = self.hyperparams.model_representation_subword_pooling_type\n subword_feat_enable = self.hyperparams.model_representation_subword_feat_enable\n char_vocab_size = self.hyperparams.data_char_vocab_size\n char_embed_dim = self.hyperparams.model_representation_char_embed_dim\n char_unit_dim = self.hyperparams.model_representation_char_unit_dim\n char_feat_trainable = self.hyperparams.model_representation_char_feat_trainable\n char_window_size = self.hyperparams.model_representation_char_window_size\n char_hidden_activation = self.hyperparams.model_representation_char_hidden_activation\n char_dropout = self.hyperparams.model_representation_char_dropout if self.mode == \"train\" else 0.0\n char_pooling_type = self.hyperparams.model_representation_char_pooling_type\n char_feat_enable = self.hyperparams.model_representation_char_feat_enable\n fusion_type = self.hyperparams.model_representation_fusion_type\n fusion_num_layer = self.hyperparams.model_representation_fusion_num_layer\n fusion_unit_dim = self.hyperparams.model_representation_fusion_unit_dim\n fusion_hidden_activation = self.hyperparams.model_representation_fusion_hidden_activation\n fusion_dropout = self.hyperparams.model_representation_fusion_dropout if self.mode == \"train\" else 0.0\n fusion_trainable = self.hyperparams.model_representation_fusion_trainable\n \n with tf.variable_scope(\"representation\", reuse=tf.AUTO_REUSE):\n input_question_feat_list = []\n input_question_feat_mask_list = []\n input_context_feat_list = []\n input_context_feat_mask_list = []\n \n if word_feat_enable == True:\n self.logger.log_print(\"# build word-level representation layer\")\n word_feat_layer = WordFeat(vocab_size=word_vocab_size, embed_dim=word_embed_dim,\n dropout=word_dropout, pretrained=word_embed_pretrained, embedding=self.word_embedding,\n num_gpus=self.num_gpus, default_gpu_id=self.default_gpu_id, regularizer=self.regularizer,\n random_seed=self.random_seed, trainable=word_feat_trainable)\n \n (input_question_word_feat,\n input_question_word_feat_mask) = word_feat_layer(input_question_word, input_question_word_mask)\n (input_context_word_feat,\n input_context_word_feat_mask) = word_feat_layer(input_context_word, input_context_word_mask)\n \n input_question_feat_list.append(input_question_word_feat)\n input_question_feat_mask_list.append(input_question_word_feat_mask)\n input_context_feat_list.append(input_context_word_feat)\n input_context_feat_mask_list.append(input_context_word_feat_mask)\n \n word_unit_dim = word_embed_dim\n self.word_embedding_placeholder = word_feat_layer.get_embedding_placeholder()\n else:\n word_unit_dim = 0\n self.word_embedding_placeholder = None\n \n if subword_feat_enable == True:\n self.logger.log_print(\"# build subword-level representation layer\")\n subword_feat_layer = SubwordFeat(vocab_size=subword_vocab_size, embed_dim=subword_embed_dim,\n unit_dim=subword_unit_dim, window_size=subword_window_size, hidden_activation=subword_hidden_activation,\n pooling_type=subword_pooling_type, dropout=subword_dropout, num_gpus=self.num_gpus,\n default_gpu_id=self.default_gpu_id, regularizer=self.regularizer,\n random_seed=self.random_seed, trainable=subword_feat_trainable)\n \n (input_question_subword_feat,\n input_question_subword_feat_mask) = subword_feat_layer(input_question_subword, input_question_subword_mask)\n (input_context_subword_feat,\n input_context_subword_feat_mask) = subword_feat_layer(input_context_subword, input_context_subword_mask)\n \n input_question_feat_list.append(input_question_subword_feat)\n input_question_feat_mask_list.append(input_question_subword_feat_mask)\n input_context_feat_list.append(input_context_subword_feat)\n input_context_feat_mask_list.append(input_context_subword_feat_mask)\n else:\n subword_unit_dim = 0\n \n if char_feat_enable == True:\n self.logger.log_print(\"# build char-level representation layer\")\n char_feat_layer = CharFeat(vocab_size=char_vocab_size, embed_dim=char_embed_dim,\n unit_dim=char_unit_dim, window_size=char_window_size, hidden_activation=char_hidden_activation,\n pooling_type=char_pooling_type, dropout=char_dropout, num_gpus=self.num_gpus,\n default_gpu_id=self.default_gpu_id, regularizer=self.regularizer,\n random_seed=self.random_seed, trainable=char_feat_trainable)\n \n (input_question_char_feat,\n input_question_char_feat_mask) = char_feat_layer(input_question_char, input_question_char_mask)\n (input_context_char_feat,\n input_context_char_feat_mask) = char_feat_layer(input_context_char, input_context_char_mask)\n \n input_question_feat_list.append(input_question_char_feat)\n input_question_feat_mask_list.append(input_question_char_feat_mask)\n input_context_feat_list.append(input_context_char_feat)\n input_context_feat_mask_list.append(input_context_char_feat_mask)\n else:\n char_unit_dim = 0\n \n feat_unit_dim = word_unit_dim + subword_unit_dim + char_unit_dim\n \n feat_fusion_layer = self._create_fusion_layer(feat_unit_dim, fusion_unit_dim,\n fusion_type, fusion_num_layer, fusion_hidden_activation, fusion_dropout,\n self.num_gpus, self.default_gpu_id, self.regularizer, self.random_seed, fusion_trainable)\n input_question_feat, input_question_feat_mask = self._build_fusion_result(input_question_feat_list,\n input_question_feat_mask_list, feat_fusion_layer)\n input_context_feat, input_context_feat_mask = self._build_fusion_result(input_context_feat_list,\n input_context_feat_mask_list, feat_fusion_layer)\n \n return input_question_feat, input_question_feat_mask, input_context_feat, input_context_feat_mask", "def test_filter_layer_ids(self):\n\t\t\n\t\tdetails = self.watcher.describe(layers=[])\n\t\tprint(details)\n\t\t\n\t\tdetails = self.watcher.describe(layers=self.fc_layers)\n\t\tprint(details)\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\n\t\t\t\n\t\tnonDenseLayers = details[details.layer_type!=str(LAYER_TYPE.DENSE)]\n\t\tnonDenseCount = len(nonDenseLayers)\n\t\tself.assertEqual(nonDenseCount, 0, \"Filter has No dense layers: {} found\".format(nonDenseCount))", "def get_attribute(layername='np', sublayer='sig_vv_aft'):\n layer1 = {'gm': 'Global_Projection', 'np': 'North_Polar_Projection', 'radar': 'Sigma0_Data', 'flag': 'flag',\n 'cell_tb_v_aft': 'North_Polar_Projection'}\n # all_tb = [u'cell_tb_h_aft', u'cell_tb_qual_flag_h_aft', u'cell_tb_qual_flag_v_aft',\n # u'cell_tb_v_aft', u'site_loc', u'tb_cell_lat', u'tb_cell_lon']\n # tbh_aft = ['cell_tb_h_aft', 'tb_cell_lat', 'tb_cell_lon', 'cell_tb_qual_flag_h_aft']\n # tbv_aft = ['cell_tb_v_aft', 'tb_cell_lat', 'tb_cell_lon', 'cell_tb_qual_flag_v_aft']\n # tbv_af = ['cell_tb_v_aft', 'cell_lat', 'cell_lon', 'cell_tb_qual_flag_v_aft', 'cell_tb_error_h_aft',\n # 'cell_tb_time_utc_aft', 'cell_boresight_incidence_aft']\n # tbv_fo = ['cell_tb_v_fore', 'cell_lat', 'cell_lon', 'cell_tb_qual_flag_v_fore', 'cell_tb_error_h_fore',\n # 'cell_tb_time_utc_fore', 'cell_boresight_incidence_fore']\n # l_sig = ['cell_lat', 'cell_lon', 'cell_sigma0_qual_flag_vv', 'cell_sigma0_vv_aft']\n # h_sig = ['cell_lat', 'cell_lon', 'cell_sigma0_qual_flag_vv', 'cell_sigma0_vv_aft']\n att_dict = \\\n {'tbh_aft': ['cell_tb_h_aft', 'cell_lat', 'cell_lon', 'cell_tb_qual_flag_h_aft'],\n 'tbv_aft': ['cell_tb_v_aft', 'cell_lat', 'cell_lon', 'cell_tb_qual_flag_v_aft'],\n 'cell_tb_v_aft': ['cell_tb_v_aft', 'cell_lat', 'cell_lon', 'cell_tb_qual_flag_v_aft', 'cell_tb_error_v_aft',\n 'cell_tb_time_utc_aft', 'cell_boresight_incidence_aft'],\n 'cell_tb_h_aft': ['cell_tb_h_aft', 'cell_lat', 'cell_lon', 'cell_tb_qual_flag_h_aft', 'cell_tb_error_h_aft',\n 'cell_tb_time_utc_aft', 'cell_boresight_incidence_aft'],\n 'cell_tb_v_fore': ['cell_tb_v_fore', 'cell_lat', 'cell_lon', 'cell_tb_qual_flag_v_fore', 'cell_tb_error_h_fore',\n 'cell_tb_time_utc_fore', 'cell_boresight_incidence_fore'],\n 'sig_vv_aft': ['cell_lat', 'cell_lon', 'cell_sigma0_qual_flag_vv', 'cell_sigma0_vv_aft'],\n 'sig_hh_aft': ['cell_lat', 'cell_lon', 'cell_sigma0_qual_flag_vv', 'cell_sigma0_vv_aft'],\n\n 'smap_tb': ['cell_tb_v_aft', 'cell_tb_qual_flag_v_aft', 'cell_tb_error_v_aft',\n 'cell_tb_h_aft', 'cell_tb_qual_flag_h_aft', 'cell_tb_error_h_aft',\n 'cell_boresight_incidence_aft', 'cell_tb_time_seconds_aft',\n 'cell_tb_v_fore', 'cell_tb_qual_flag_v_fore', 'cell_tb_error_v_fore',\n 'cell_tb_h_fore', 'cell_tb_qual_flag_h_fore', 'cell_tb_error_h_fore',\n 'cell_boresight_incidence_fore', 'cell_tb_time_seconds_fore'],\n 'smap_tb_lonlat': ['cell_lon', 'cell_lat',\n 'cell_tb_v_aft', 'cell_tb_qual_flag_v_aft', 'cell_tb_error_v_aft',\n 'cell_tb_h_aft', 'cell_tb_qual_flag_h_aft', 'cell_tb_error_h_aft',\n 'cell_boresight_incidence_aft', 'cell_tb_time_seconds_aft',\n 'cell_tb_v_fore', 'cell_tb_qual_flag_v_fore', 'cell_tb_error_v_fore',\n 'cell_tb_h_fore', 'cell_tb_qual_flag_h_fore', 'cell_tb_error_h_fore',\n 'cell_boresight_incidence_fore', 'cell_tb_time_seconds_fore'],\n 'smap_ta_lonlat_colrow': ['cell_lon', 'cell_lat',\n 'cell_tb_v_aft', 'cell_tb_qual_flag_v_aft', 'cell_tb_error_v_aft',\n 'cell_tb_h_aft', 'cell_tb_qual_flag_h_aft', 'cell_tb_error_h_aft',\n 'cell_boresight_incidence_aft', 'cell_tb_time_seconds_aft',\n 'cell_tb_v_fore', 'cell_tb_qual_flag_v_fore', 'cell_tb_error_v_fore',\n 'cell_tb_h_fore', 'cell_tb_qual_flag_h_fore', 'cell_tb_error_h_fore',\n 'cell_boresight_incidence_fore', 'cell_tb_time_seconds_fore', 'cell_row', 'cell_column']}\n att_read = [layer1[layername], att_dict[sublayer]]\n\n\n # att_dict = {'sig_vv_aft': att_sig_vv_aft, 'sig_hh_aft': att_sig_hh_aft,\n # if layername == 'sigma':\n # attributes = ['Sigma0_Data/cell_sigma0_vv_aft', 'Sigma0_Data/cell_lat', 'Sigma0_Data/cell_lon',\n # 'Sigma0_Data/cell_sigma0_qual_flag_vv']\n # elif layername == 'tb':\n # attributes = ['Global_Projection/cell_tb_v_aft', 'Global_Projection/tb_cell_lat',\n # 'Global_Projection/tb_cell_lon', '/none']\n # elif layername == 'tbn':\n # attributes = ['North_Polar_Projection/cell_tb_v_aft', 'North_Polar_Projection/tb_cell_lat',\n # 'North_Polar_Projection/tb_cell_lon', '/none']\n # else:\n # print 'there is no %s data' % layername\n return att_read", "def test_create_mimic_dict_3(self):\n result = self.module.create_mimic_dict(\"imdev.txt\")\n self.assertDictEqual(\n result, imdev,\n \"Mimic dict output for imdev.txt does match expected contents\"\n )", "def verifyData(self, expectedDict):\n pass", "def call(self, inputs, feature_layer=None, training=True):\n pass", "async def test_get_image_layers(\n image_config: ImageConfig, image_config_signed: ImageConfig, image_layers: List\n):\n assert image_config.get_image_layers() == image_layers\n assert image_config_signed.get_image_layers() == image_layers", "def evaluate_mapped_inputs(self,value,**kwargs):\n result = {\"result\": value}\n return result", "def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):\n pass", "def build_model(options,worddicts):\n opt_ret=dict()\n params=dict()\n word_xr1_mask=tf.reverse(word_x1_mask,[1])\n word_xr2_mask = tf.reverse(word_x2_mask, [1])\n\n\n\n #embedding layer\n word_embedding = norm_weight(options['n_words'], options['dim_word'])\n if options['embedding']:\n with open(options['embedding'], 'r',encoding='iso-8859-1') as f:\n for line in f:\n temp=line.split()\n word=temp[0]\n vector=temp[1:]\n if word in worddicts and worddicts[word]<options['n_words']:\n word_embedding[worddicts[word],:]=vector\n\n word_embedding_layer=tf.Variable(word_embedding,name='word_embedding')\n\n emb1=tf.nn.embedding_lookup(word_embedding_layer,word_x1,name='embedding_word_lookup1')\n emb2=tf.nn.embedding_lookup(word_embedding_layer,word_x2,name='embedding_word_lookup2')\n\n if options['use_dropout']:\n emb1=tf.cond(use_noise,lambda :tf.nn.dropout(emb1,0.5),lambda :emb1)\n emb2 = tf.cond(use_noise, lambda: tf.nn.dropout(emb2, 0.5), lambda: emb2)\n\n #1-layer LSTM\n print('LSTM result')\n for l in range(1):\n #param_init_lstm\n prefix = 'encoder_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim_word']\n else:\n nin = options['dim_word']+2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n #param_init_rlstm\n prefix = 'encoder_r_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim_word']\n else:\n nin = options['dim_word'] +2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n\n\n if l==0:\n ctx1=emb1\n ctx2=emb2\n else:\n ctx1=tf.concat([ctx1,emb1],axis=2)\n ctx2=tf.concat([ctx2,emb2],axis=2)\n\n print(ctx1)\n\n ctxr1=tf.reverse(ctx1,axis=[1])\n ctxr2=tf.reverse(ctx2,axis=[1])\n\n proj1=RNN_layer(ctx1,word_x1_mask,options,params,prefix='encoder_{}'.format(str(l+1)))\n projr1=RNN_layer(ctxr1,word_xr1_mask,options,params,prefix='encoder_r_{}'.format(str(l+1)))\n proj2=RNN_layer(ctx2,word_x2_mask,options,params,prefix='encoder_{}'.format(str(l+1)))\n projr2=RNN_layer(ctxr2,word_xr2_mask,options,params,prefix='encoder_r_{}'.format(str(l+1)))\n\n ctx1=tf.concat([proj1[0],projr1[0][::-1]],axis=len(projr1[0].shape)-1)\n ctx2 = tf.concat([proj2[0], projr2[0][::-1]], axis=len(projr2[0].shape) - 1)\n ctx1 = tf.transpose(ctx1, [1, 0, 2])\n ctx2 = tf.transpose(ctx2, [1, 0, 2])\n print(ctx1)\n\n ctx1=ctx1*word_x1_mask[:,:,None]\n ctx2 = ctx2 * word_x2_mask[:, :, None]\n def _step(h,x):\n return tf.matmul(x[0],x[1])\n temp=tf.zeros((tf.shape(ctx1)[1],tf.shape(ctx2)[1]))\n weight_martrix=tf.scan(_step,[ctx1,tf.transpose(ctx2,[0,2,1])],temp)\n weight_martrix_1=tf.exp(weight_martrix)*word_x2_mask[:,None,:]\n weight_martrix_2=tf.transpose(tf.exp(weight_martrix)*word_x1_mask[:,:,None],[0,2,1])\n weight_martrix_1=weight_martrix_1/tf.reduce_sum(weight_martrix_1,axis=2)[:,:,None]\n weight_martrix_2 = weight_martrix_2 / tf.reduce_sum(weight_martrix_2, axis=2)[:,:,None]\n\n ctx1_=tf.reduce_sum(weight_martrix_1[:,:,:,None]*ctx2[:,None,:,:],axis=2)\n ctx2_ = tf.reduce_sum(weight_martrix_2[:, :, :, None] * ctx1[:, None, :, :],axis=2)\n inp1=tf.concat([ctx1, ctx1_, ctx1*ctx1_, ctx1-ctx1_],axis=2)\n inp2 = tf.concat([ctx2, ctx2_, ctx2 * ctx2_, ctx2 - ctx2_], axis=2)\n params = param_init_fflayer(options, params, prefix='projection',\n nin=options['dim'] * 8, nout=options['dim'], ortho=False)\n\n\n s=tf.shape(inp1)\n inp1 = tf.nn.relu(tf.matmul(tf.reshape(inp1,[-1,int(inp1.shape[-1])]), params[_p('projection', 'W')]) + params[_p('projection', 'b')])\n inp1=tf.reshape(inp1,tf.concat([s[:2],[-1]],0))\n s=tf.shape(inp2)\n inp2 = tf.nn.relu(tf.matmul(tf.reshape(inp2,[-1,int(inp2.shape[-1])]), params[_p('projection', 'W')]) + params[_p('projection', 'b')])\n inp2=tf.reshape(inp2,tf.concat([s[:2],[-1]],0))\n if options['use_dropout']:\n inp1=tf.cond(use_noise,lambda :tf.nn.dropout(inp1,0.5),lambda :inp1)\n inp2 = tf.cond(use_noise, lambda: tf.nn.dropout(inp2, 0.5), lambda: inp2)\n\n\n for l in range(1):\n #param_init_lstm\n prefix = 'decoder_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim']\n else:\n nin = options['dim']+2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n #param_init_rlstm\n prefix = 'decoder_r_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim']\n else:\n nin = options['dim'] +2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n\n\n if l==0:\n ctx1=inp1\n ctx2=inp2\n else:\n ctx1=tf.concat([ctx1,inp1],axis=2)\n ctx2=tf.concat([ctx2,inp2],axis=2)\n\n print(ctx1)\n\n ctxr1=tf.reverse(ctx1,axis=[1])\n ctxr2=tf.reverse(ctx2,axis=[1])\n\n proj1=RNN_layer(ctx1,word_x1_mask,options,params,prefix='decoder_{}'.format(str(l+1)))\n projr1=RNN_layer(ctxr1,word_xr1_mask,options,params,prefix='decoder_r_{}'.format(str(l+1)))\n proj2=RNN_layer(ctx2,word_x2_mask,options,params,prefix='decoder_{}'.format(str(l+1)))\n projr2=RNN_layer(ctxr2,word_xr2_mask,options,params,prefix='decoder_r_{}'.format(str(l+1)))\n\n ctx1=tf.concat([proj1[0],projr1[0][::-1]],axis=len(projr1[0].shape)-1)\n ctx2 = tf.concat([proj2[0], projr2[0][::-1]], axis=len(projr2[0].shape) - 1)\n ctx1 = tf.transpose(ctx1, [1, 0, 2])\n ctx2 = tf.transpose(ctx2, [1, 0, 2])\n print(ctx1)\n\n mean_1=tf.reduce_sum(ctx1*word_x1_mask[:,:,None],axis=1)/tf.reduce_sum(word_x1_mask,axis=1)[:,None]\n max_1=tf.reduce_max(ctx1*word_x1_mask[:,:,None],axis=1)\n\n mean_2=tf.reduce_sum(ctx2*word_x2_mask[:,:,None],axis=1)/tf.reduce_sum(word_x2_mask,axis=1)[:,None]\n max_2=tf.reduce_max(ctx2*word_x2_mask[:,:,None],axis=1)\n\n #represention and MLP layer\n logit=tf.concat([mean_1,mean_2,max_1,max_2],axis=1)\n if options['use_dropout']:\n logit=tf.cond(use_noise,lambda :tf.nn.dropout(logit,0.5),lambda :logit)\n\n\n params = param_init_fflayer(options, params, prefix='ff_layer_1',\n nin=options['dim'] * 8, nout=options['dim'], ortho=False)\n params = param_init_fflayer(options, params, prefix='ff_layer_output',\n nin=options['dim'], nout=3, ortho=False)\n logit=tf.nn.tanh(tf.matmul(logit,params[_p('ff_layer_1','W')])+params[_p('ff_layer_1','b')])\n if options['use_dropout']:\n logit=tf.cond(use_noise,lambda :tf.nn.dropout(logit,0.5),lambda :logit)\n\n logit=tf.matmul(logit, params[_p('ff_layer_output', 'W')]) + params[_p('ff_layer_output', 'b')]\n probs=tf.nn.softmax(logit)\n pred=tf.argmax(probs,1)\n cost=tf.losses.sparse_softmax_cross_entropy(y,logit)\n return opt_ret,cost,pred,probs", "def get_output_data(\n self,\n inputs: Dict[str, Any]) -> Any:\n return inputs", "def test_types_values(dictionary: Dict, labels: List):\n for img_path in dictionary:\n bboxes = dictionary[img_path]\n for bbox in bboxes:\n \n assert isinstance(bbox[\"signTypes\"], str)\n assert isinstance(bbox[\"signBB\"], tuple) \n assert isinstance(bbox[\"signC\"], tuple)\n\n assert bbox[\"signTypes\"] in set(labels)\n \n for coord in bbox[\"signBB\"]:\n assert coord >= 0\n for coord in bbox[\"signC\"]:\n assert coord >= 0\n print(\"All assertions have been passed smoothly.\")", "def test_expected_output(self, arr, counts, expected_output):\n\n output = LeafNodeScaledConformalPredictor._sum_dict_values(arr, counts)\n\n assert output == expected_output, \"_sum_dict_values produced incorrect output\"", "def process_layer(layer_def, inputs):\n\n outputs = []\n for n in layer_def['neurons']:\n n_res = n.activate(inputs)\n\n outputs.append(n_res)\n\n return outputs", "def dict_to_tf_example(data, label_map_dict):\n\n encoded_jpg_io = io.BytesIO()\n image = data['image']\n image.save(encoded_jpg_io, \"JPEG\", quality=80)\n encoded_jpg = encoded_jpg_io.getvalue()\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width, height = image.size\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n rotation = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n masks = []\n difficult_obj = []\n for obj in data['object']:\n difficult = bool(int(obj['difficult']))\n difficult_obj.append(int(difficult))\n\n xmin.append(float(obj['bndbox']['xmin']) / width)\n ymin.append(float(obj['bndbox']['ymin']) / height)\n xmax.append(float(obj['bndbox']['xmax']) / width)\n ymax.append(float(obj['bndbox']['ymax']) / height)\n rotation.append(float(obj['rotation']))\n masks.append(obj['mask'])\n classes_text.append(obj['name'].encode('utf8'))\n classes.append(label_map_dict[obj['name']])\n truncated.append(int(obj['truncated']))\n poses.append(obj['pose'].encode('utf8'))\n\n mask = np.stack(masks)\n encoded_mask = pn_encode(mask.flatten()).tolist()\n print('mask encode:', mask.shape, '->', len(encoded_mask)) ###\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/rotation': dataset_util.float_list_feature(rotation),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n 'image/segmentation/object': dataset_util.int64_list_feature(encoded_mask),\n 'image/segmentation/object/class': dataset_util.int64_list_feature(classes),\n }))\n return example", "def Evaluate_Prediction(prediction_mask, true_mask, feature_dict, \n test_name = 'Test'):\n \n # true_mask has 3 layers but they are redundant\n true_mask = true_mask[:,:,0]\n \n # Convert from Prob to 0,1,2...\n prediction_mask = prediction_mask.argmax(axis = 2) + 1 \n\n # Compute confusion matrix -- subtract 1 so that first label is \"0\" \n conf = custom_confusion_matrix(prediction_mask.flatten(), true_mask.flatten(), feature_dict)\n \n # Convert mask to proper shape for loss function - shape should have 4 dimensions with one-hot encoding\n true_mask = Expand_Mask(mask = true_mask, num_class = len(feature_dict)) ## to 0,1\n true_mask = np.expand_dims(true_mask, axis=0)\n true_mask = true_mask.astype(np.float)\n\n # Convert prediction into proper shape for loss function\n prediction_mask = Expand_Mask(mask = prediction_mask, num_class = len(feature_dict)) #to 0,1\n prediction_mask = np.expand_dims(prediction_mask, axis=0) \n prediction_mask = prediction_mask.astype(np.float)\n \n score = {'Test':test_name, \n 'Dice':Dice_Coef_Multilabel(true_mask, prediction_mask).numpy(), \n 'Accuracy':np.mean(tf.metrics.categorical_accuracy(true_mask, prediction_mask)), \n 'CE':np.mean(tf.metrics.categorical_crossentropy(true_mask, prediction_mask))}\n \n return [score, conf]" ]
[ "0.65281624", "0.62313795", "0.6157317", "0.5946783", "0.59059757", "0.58558685", "0.5855444", "0.58469844", "0.58335406", "0.5805809", "0.57551163", "0.57410544", "0.5736132", "0.5734934", "0.5715027", "0.5689544", "0.5608706", "0.5605638", "0.55747133", "0.55457956", "0.55336136", "0.55179524", "0.551791", "0.5516553", "0.55109155", "0.5499435", "0.5489197", "0.54751855", "0.544271", "0.54262084", "0.5425691", "0.5425504", "0.54226226", "0.54068744", "0.5406438", "0.5403279", "0.5402485", "0.5394804", "0.5389523", "0.5374884", "0.53564274", "0.53464377", "0.5342353", "0.5329036", "0.5328826", "0.5321899", "0.5315265", "0.5306678", "0.53043103", "0.52938074", "0.5293275", "0.52921367", "0.5290696", "0.5284669", "0.5278019", "0.5275928", "0.5274406", "0.527342", "0.52704793", "0.5266495", "0.52631813", "0.52604204", "0.52598834", "0.52492565", "0.5240989", "0.5240489", "0.52371895", "0.5222397", "0.52203786", "0.52190715", "0.52095586", "0.5200856", "0.51972187", "0.51967436", "0.51872313", "0.5183931", "0.51827437", "0.5173754", "0.5166987", "0.5166029", "0.5162765", "0.5156232", "0.51556003", "0.5155103", "0.51523364", "0.51506764", "0.5144196", "0.5139625", "0.51367444", "0.513243", "0.5120074", "0.5120004", "0.51175326", "0.51150715", "0.51129776", "0.51046", "0.5095753", "0.5095319", "0.50908375", "0.508835" ]
0.6687717
0
test dictionary input as kwargs in an intermediate layer
def test_kwargs_input_dict_output(self): class KwargModel(torch.nn.Module): def __init__(self): super().__init__() self.mul = aimet_torch.elementwise_ops.Multiply() def forward(self, a, b, c): ab = a * b bc = b * c ca = self.mul(c, a) return {'ab': ab, 'bc': bc, 'ca': ca} class Net(torch.nn.Module): """ Model using multiply as functional and module at different depths """ def __init__(self): super().__init__() self.layer = KwargModel() def forward(self, x): return self.layer(**x) model = Net() # Add an empty dictionary as the last element to not treat as named arguments. # see torch.onnx.export() API for more details. dummy_input = ( {'a': torch.randn(1, 10, 10, 10), 'b': torch.randn(1, 10, 10, 10), 'c': torch.randn(1, 10, 10, 10) }, {} ) onnx_path = './data/MyModel.onnx' torch.onnx.export(model, dummy_input, onnx_path) onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input) onnx_model = onnx.load(onnx_path) onnx.checker.check_model(onnx_model) self.check_onnx_node_name_uniqueness(onnx_model) for node in onnx_model.graph.node: assert node.name.startswith('layer') or node.name.startswith('/layer') if os.path.exists(onnx_path): os.remove(onnx_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kw_args_with_dict():\n arg_dict = {'visited_color': 'blue',\n 'link_color': 'red',\n 'back_color': 'yellow',\n 'fore_color': 'orange'}\n assert arguments.fun_opt_kw_params(**arg_dict) == ('orange', 'yellow',\n 'red', 'blue')", "def kwargs(kwargs):\n run_kwargs(kwargs)", "def test_kwargs(self):\n kwargs = forge.kwargs\n assert isinstance(kwargs, forge._signature.VarKeyword)\n assert kwargs.name == 'kwargs'\n assert kwargs.converter is None\n assert kwargs.validator is None", "def test_map_args_no_kwargs():\n pass", "def inner_test(param: dict):\n pass", "def test_star_args_with_dict():\n arg_dict = {'visited_color': 'orange',\n 'link_color': 'yellow',\n 'back_color': 'red',\n 'fore_color': 'blue'}\n assert arguments.fun_star_params(**arg_dict) == ('orange', 'yellow',\n 'red', 'blue')", "def test_kwargs(self):\n def f(**kwargs):\n self.assertEqual(kwargs, {'spam': 'eggs'})\n\n kwargs = self.decode('\\n\\x0b\\x01\\tspam\\x06\\teggs\\x01')\n\n f(**kwargs)", "def test_kw_args_with_tuple_and_dict():\n arg_tuple = ('orange', 'yellow')\n arg_dict = {'visited_color': 'blue',\n 'link_color': 'red'}\n\n assert arguments.fun_opt_kw_params(*arg_tuple, **arg_dict) == ('orange',\n 'yellow',\n 'red',\n 'blue')", "def evaluate_mapped_inputs(self,**kwargs):\n print(\"DEBUG\")\n pprint(kwargs)\n return {}", "def test_accepts_kwargs(self):\n self.Test.scope('foo', where='foo')\n self.assertEqual(self.Test.foo().params['where'], ['foo'])", "def test_kwargs():\n client, server = make_queue_pairs('localhost')\n client.send_inputs(1, input_kwargs={'hello': 'world'})\n _, task = server.get_task()\n assert task.args == (1,)\n assert task.kwargs == {'hello': 'world'}", "def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))", "def test__call__(self):\n kwargs = dict(\n name='b',\n type=int,\n converter=dummy_converter,\n validator=dummy_validator,\n metadata={'meta': 'data'},\n )\n vark = VarKeyword()(**kwargs)\n name, fparam = self.assert_mapping_and_get_fparam(vark)\n assert isinstance(fparam, FParameter)\n assert name == kwargs['name']\n assert immutable.asdict(fparam) == dict(\n FPARAM_VKW_DEFAULTS,\n **kwargs,\n interface_name=kwargs['name'],\n )", "def test_operation_args_kwarg_kwarglist(self, parse_input_mocked_metadata, dc):\n bb = parse_input_mocked_metadata(\n 'MeasureHomodyne(0.23, 0.54, select={}, hi=\"yes\") | 0\\n'.format(dc)\n )\n assert bb.operations == [\n {\n \"modes\": [0],\n \"op\": \"MeasureHomodyne\",\n \"args\": [0.23, 0.54],\n \"kwargs\": {\"select\": dc, \"hi\": \"yes\"},\n }\n ]", "def test_func(**kwargs: Dict[str, Any]) -> None:\n click.echo(json.dumps(kwargs))", "def get_dict(**kwargs):\n return kwargs", "def test_operation_kwarg(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"Coherent(alpha=-0.3+2j) | 0\\n\")\n assert bb.operations == [\n {\"modes\": [0], \"op\": \"Coherent\", \"args\": [], \"kwargs\": {\"alpha\": -0.3 + 2j}}\n ]", "def func1(key, my_test_dict=my_test_dict):\n return key in my_test_dict", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def test_op_lambda_with_kwargs(self) -> None:\n op_base = OpLambda(func=lambda x, y: x + y)\n kwargs_per_step_to_add = [dict(), dict(), dict()]\n op_repeat = OpRepeat(op_base, kwargs_per_step_to_add)\n sample_dict = NDict()\n sample_dict[\"data.val.a\"] = 5\n sample_dict = op_repeat(sample_dict, \"_.test_repeat\", key=\"data.val.a\", y=5)\n self.assertEqual(sample_dict[\"data.val.a\"], 20)", "def _validate_kwargs(self, kwargs):\n pass", "def test_accepts_dictionary(self):\n self.Test.scope('foo', {'where': 'foo'})\n self.assertEqual(self.Test.foo().params['where'], ['foo'])", "def test_kwargs(self):\n\n @sync_performer\n def p(dispatcher, intent, extra):\n return extra\n\n dispatcher = lambda _: partial(p, extra=\"extra val\")\n result = sync_perform(dispatcher, Effect(\"foo\"))\n self.assertEqual(result, \"extra val\")", "def test_backwards_compat_kwargs_conversion(\n incoming: t.Dict[str, t.Any], updated: t.Dict[str, t.Any]\n) -> None:\n assert pypiserver.backwards_compat_kwargs(incoming) == updated", "def add_kwargs():\n pass", "def _filter_kwargs(names, dict_):\n return {k: v for k, v in dict_.items() if k in names and v is not None}", "def check_in_kwargs(kwarg_names):\n def layer(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n for kwarg in kwarg_names:\n if kwarg not in kwargs:\n raise SCBPaymentError('\"{0}\" attrs is required'.format(kwarg))\n return func(self, *args, **kwargs)\n return wrapper\n return layer", "def test_kwargs(self):\n user_1_dict = self.user_1.to_dict()\n user_2 = User(**user_1_dict)\n\n self.assertIsInstance(user_2, User)\n self.assertIsNot(self.user_1, user_2)\n self.assertEqual(self.user_1.__dict__, user_2.__dict__)", "def _run_kwargs(cls, kwargs: Dict[str, Any]):\n parser = cls.setup_args()\n opt = parser.parse_kwargs(**kwargs)\n return cls._run_from_parser_and_opt(opt, parser)", "def test_map_args_level():\n pass", "def test_create_keyword_only(self, extra_in, extra_out):\n kwargs = dict(\n interface_name='a',\n name='b',\n type=int,\n converter=dummy_converter,\n validator=dummy_validator,\n metadata={'meta': 'data'},\n )\n fparam = FParameter.create_positional_or_keyword(**kwargs, **extra_in)\n assert isinstance(fparam, FParameter)\n assert immutable.asdict(fparam) == \\\n {**FPARAM_POK_DEFAULTS, **kwargs, **extra_out}", "def _JsonDictToArgs(cls, path_context, data_location, dct, memo=None):\n if(cls is InputGenerator):\n tag = dct['tag']\n data = dct['data']\n return cls._registered[tag]._JsonDictToArgs(path_context, data_location, data, memo=memo)\n else:\n _, args, kwargs = super()._JsonDictToArgs(path_context, data_location, dct, memo=memo)\n args.append(StageMeta.Load(path_context, dct['meta'], memo=memo))\n return cls, args, kwargs", "def flexdictargs(func: Callable[[dict], RT]) -> Callable[[Iterable, Any], RT]:\n\n @wraps(func)\n def f(self, *args, **kwargs):\n if args and isinstance(args[0], MutableMapping):\n d = args[0]\n elif kwargs:\n d = kwargs\n else:\n raise TypeError(\"invalid input arguments\")\n return func(self, normalize(d))\n\n return f", "def test_kwargs_not_false_positive(*args, **kwargs):\n 'Hello John Doe {0[0]}'.format(args)\n 'Hello {0[name]}'.format(kwargs)", "def test_star_kwargs():\n\n @type_checked\n def _run_test(nothing, special=None, going:int=12, on=\"here\", **kw:str):\n assert nothing == \"hello\"\n assert special == 50.12\n assert going == 1999\n assert on is True\n assert kw[\"other\"] == \"False\"\n assert kw[\"thing\"] == \"15\"\n\n _run_test(\"hello\", 50.12, going=\"1999\", on=True, other=False, thing=15)", "def test_onearg_and_keyword(self):\n varargs = (12,)\n kwargs = {'default' : 13}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 13)\n self.assert_(len(var_dict) == 2)", "def _map_args_kwargs_to_input(self, *args, **kwargs) -> Dict[str, Any]:\n input_dict = {k: v for k, v in zip(self.inputs, args)}\n input_dict.update(kwargs)\n\n return input_dict", "def callFuncBasedOnDict(func, argdict, **kwargs):\n if argdict is None:\n argdict = {}\n seldict = selectArgsFromDict(func, argdict)\n if kwargs is not None:\n seldict.update(kwargs)\n return func(**seldict)", "def filter_kwargs(dict_to_filter, function_to_call):\n\n sig = inspect.signature(function_to_call)\n filter_keys = [param.name for param in sig.parameters.values() if (param.kind == param.POSITIONAL_OR_KEYWORD)]\n valid_args = {}\n invalid_args = {}\n\n for key in dict_to_filter:\n if key in filter_keys:\n valid_args[key] = dict_to_filter[key]\n else:\n invalid_args[key] = dict_to_filter[key]\n return valid_args, invalid_args", "def test_kwargs(self):\n self.Test.default_scope(where='foo')\n self.assertEqual(self.Test.scoped().params['where'], ['foo'])", "def check_call_contains_kwargs(\n call: Tuple,\n params: dict,\n param_mapping: Optional[dict] = None,\n) -> None:\n _param_mapping = {'negative_gradient_method': 'objective_function',\n 'early_exaggeration_iter': 'n_iter',\n 'late_exaggeration_iter': 'n_iter',\n 'early_exaggeration': 'exaggeration',\n 'late_exaggeration': 'exaggeration',\n 'initial_momentum': 'momentum',\n 'final_momentum': 'momentum'}\n if param_mapping is not None:\n _param_mapping.update(param_mapping)\n\n name, args, kwargs = call\n for key in params:\n # If a parameter isn't named the same way in the call\n if key in _param_mapping:\n kwargs_key = _param_mapping[key]\n else:\n kwargs_key = key\n\n expected_value = params[key]\n actual_value = kwargs.get(kwargs_key, None)\n if expected_value != actual_value:\n raise AssertionError(\n 'Mock not called with `%s=%s`. Called with `%s`' %\n (key, expected_value, actual_value)\n )", "def test_direct_invocation_works():\n assert (_add)(*[1, 2], **{\"3\": 3, \"4\": 4}) == 10", "def test_defaults(self):\n vark = VarKeyword()\n name, fparam = self.assert_mapping_and_get_fparam(vark)\n assert name == 'kwargs'\n assert fparam.type == empty\n assert not fparam.converter\n assert not fparam.validator\n assert not fparam.metadata", "def selectArgsFromDict(func, argdict):\n return dict([(i, argdict[i]) for i in getArgs(func) if i in argdict])", "def _kwargs_check(feature_extraction, kwargs):\n # When using policy_kwargs parameter on model creation,\n # all keywords arguments must be consumed by the policy constructor except\n # the ones for the cnn_extractor network (cf nature_cnn()), where the keywords arguments\n # are not passed explicitly (using **kwargs to forward the arguments)\n # that's why there should be not kwargs left when using the mlp_extractor\n # (in that case the keywords arguments are passed explicitly)\n if feature_extraction == 'mlp' and len(kwargs) > 0:\n raise ValueError(\"Unknown keywords for policy: {}\".format(kwargs))", "def test_dict_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})\n inner_test(param={'foo': 1, 'bar': ['bat', 2]})", "def inner_test(param: dict):\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})", "def test_map_args_invalid():\n pass", "def test_single_keyword_arg_provided(self):\n _func = required_parameters('arg1')(undecorated_func)\n self.assertEqual(_func(arg1='hello'), 'foo')", "def test_map_args_all_none():\n pass", "def myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')", "def test_kw_validation_with_trait_type_instances(self):\n\n @function(x=Int(10), y=Int(20), _returns_=Int(30))\n def add(**kw):\n return kw['x'] + kw['y']\n\n self.assertEqual(add(x=8, y=2), 10)\n self.failUnlessRaises(TraitError, add, x=2, y='xxx')\n\n return", "def test_kw_args_with_keywords():\n assert arguments.fun_opt_kw_params(visited_color='blue',\n link_color='red',\n back_color='yellow',\n fore_color='orange') == ('orange',\n 'yellow',\n 'red', 'blue')", "def test_named_params(self):\n varargs = ()\n kwargs = {'arg1' : \"arg1_val\", 'default' : \"default_val\"}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assertEquals(kwargs, var_dict)", "def test_operation_args_and_kwarg(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata('MeasureHomodyne(0.23, 0.54, select=0.41, hi=\"yes\") | 0\\n')\n assert bb.operations == [\n {\n \"modes\": [0],\n \"op\": \"MeasureHomodyne\",\n \"args\": [0.23, 0.54],\n \"kwargs\": {\"select\": 0.41, \"hi\": \"yes\"},\n }\n ]", "def test_kwargs(self):\n user1 = User(email='[email protected]', password='1234', first_name='Jack', last_name='Off')\n self.assertTrue(hasattr(user1, \"email\"))\n self.assertTrue(hasattr(user1, \"password\"))\n self.assertTrue(hasattr(user1, \"first_name\"))\n self.assertTrue(hasattr(user1, \"last_name\"))", "def test_kw_args_with_defaults():\n assert arguments.fun_opt_kw_params() == ('blue', 'red', 'yellow', 'orange')", "def validate_dict(in_dict, **kwargs):\n\n if not isinstance(in_dict, dict):\n raise ValueError('requires a dictionary')\n\n for key, value in iteritems(kwargs):\n\n if key == 'required':\n for required_key in value:\n if required_key not in in_dict:\n return False\n\n elif key not in in_dict:\n continue\n\n elif value == bool:\n\n in_dict[key] = (True\n if str(in_dict[key]).lower() == 'true'\n else False)\n\n else:\n\n if (isinstance(in_dict[key], list) and\n len(in_dict[key]) == 1 and\n value != list):\n in_dict[key] = in_dict[key][0]\n\n try:\n if key in in_dict:\n in_dict[key] = value(in_dict[key])\n except ValueError:\n return False\n\n return True", "def test_kw_args_with_positional():\n assert arguments.fun_opt_kw_params('blue', 'red', 'yellow',\n 'orange') == ('blue', 'red', 'yellow',\n 'orange')", "def filter_args(**kwargs):\n valid_args = \"tol_num_frames\"\n\n d = dict((k, kwargs[k]) for k in valid_args if k in kwargs)\n\n return d", "def test_operation_multiple_kwarg(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"MeasureHomodyne(phi=0.23, b=1) | 0\\n\")\n assert bb.operations == [\n {\"modes\": [0], \"op\": \"MeasureHomodyne\", \"args\": [], \"kwargs\": {\"phi\": 0.23, \"b\": 1}}\n ]", "def test_beaker_kwargs(self):\n css_source = stylesheet_link('/deep/a.css', '/b.css', combined=True, minified=True)\n from fixtures import beaker_container\n self.assertEqual(beaker_container, beaker_kwargs)\n\n css_source = stylesheet_link('/deep/a.css', '/b.css', combined=True, minified=True, beaker_kwargs={'foo': 'bar'})\n from fixtures import beaker_container\n beaker_kwargs.update({'foo': 'bar'})\n self.assertEqual(beaker_container, beaker_kwargs)", "def test_dict_keywords(self):\n output, _err = self.executor.docker.run('lego:1', env=dict(SPECIAL='emett',\n SONG='awesome')).batch()\n self.assertEqual(output, 'everything')", "def kwargsdec(f):\n def wrapper(**kwargs):\n args = inspect.getargspec(f).args\n return f(**{ k: kwargs[k] for k in args})\n return wrapper", "def filter_func(interface):\n return (\n all(getattr(interface, key) for key in args) and\n all(getattr(\n interface, key) == val for key, val in kwargs.items())\n )", "def _folium_kwargs(self):", "def parseKwargs(acceptable,kwargs):\n \n output = {}\n\n if kwargs:\n for key in kwargs.keys():\n \n if key in acceptable:\n output[key] = kwargs[key]\n\n return output", "def test_create_positional_or_keyword(self, extra_in, extra_out):\n kwargs = dict(\n type=int,\n converter=dummy_converter,\n validator=dummy_validator,\n metadata={'meta': 'data'},\n )\n fparam = FParameter.create_positional_or_keyword(**kwargs, **extra_in)\n assert isinstance(fparam, FParameter)\n assert immutable.asdict(fparam) == \\\n {**FPARAM_POK_DEFAULTS, **kwargs, **extra_out}", "def kwarg_check(kwargs: dict, options: list, callback: str) -> bool:\n if kwargs is not None:\n for key in kwargs.keys():\n if key not in options:\n raise InvalidKwargError(\n func=callback,\n key=key,\n value=kwargs[key],\n options=options,\n )\n\n # If 'zoom' is in kwargs\n if (\"zoom\" in kwargs) and (kwargs[\"zoom\"] < 14 or kwargs[\"zoom\"] > 17):\n\n # Raising exception for invalid zoom value\n raise InvalidOptionError(\n param=\"zoom\", value=kwargs[\"zoom\"], options=[14, 15, 16, 17]\n )\n\n # if 'image_type' is in kwargs\n if (\"image_type\" in kwargs) and (\n kwargs[\"image_type\"] not in [\"pano\", \"flat\", \"all\"]\n ):\n\n # Raising exception for invalid image_type value\n raise InvalidOptionError(\n param=\"image_type\",\n value=kwargs[\"image_type\"],\n options=[\"pano\", \"flat\", \"all\"],\n )\n\n # If all tests pass, return True\n return True", "def test_valid_analysis_request(analysis_request_dict: JSONDict) -> None:\n\n request = AnalysisRequest(**analysis_request_dict)\n\n assert request.dict() == analysis_request_dict", "def test_validate_params(mocker, params):\n validate_params(**params)", "def __call__(self, *args, **kwargs) -> Dict[str, Any]:\n pass", "def testSimpleEchoMethodWithKeywordArgs(self):\n body = dumps({'id': 100, 'jsonrpc': '2.0', 'method': 'pass',\n 'params': {'ingredient1': 'sugar',\n 'ingredient2': 'spice'}})\n headers = Headers({'Content-Length': [str(len(body))],\n 'Content-Type': ['application/json']})\n request = FakeRequest(headers=headers, body=body)\n resource = TestResource(None, None)\n result = yield resource.deferred_render_POST(request)\n response = loads(result)\n self.assertEqual({'args': [], 'kwargs': {'ingredient1': 'sugar',\n 'ingredient2': 'spice'}},\n response['result'])", "def test_class_kwargs(self):\n dictonary = {\n 'id': '662a23b3-abc7-4f43-81dc-64c000001c00', 'score': 100}\n state1 = State(**dictonary)\n self.assertTrue(hasattr(state1, \"id\"))\n self.assertEqual(state1.id, '662a23b3-abc7-4f43-81dc-64c000001c00')\n self.assertTrue(hasattr(state1, \"score\"))\n self.assertEqual(state1.score, 100)\n self.assertTrue(hasattr(state1, \"created_at\"))\n self.assertTrue(type(state1.updated_at), datetime)\n self.assertTrue(hasattr(state1, \"updated_at\"))\n self.assertTrue(type(state1.created_at), datetime)\n self.assertEqual(state1.__class__.__name__, \"State\")", "def test_data_preparer_get_params(deep):\n from foreshadow.preparer import DataPreparer\n\n dp = DataPreparer()\n params = dp.get_params(deep=deep)\n assert \"cleaner_kwargs\" in params\n assert \"cache_manager\" in params\n # assert \"engineerer_kwargs\" in params\n assert \"intent_kwargs\" in params\n assert \"preprocessor_kwargs\" in params\n # assert \"reducer_kwargs\" in params\n assert \"y_var\" in params\n assert \"steps\" in params", "def test_get_cases_for_dict(self):\n pass", "def test_call(self):\n c = ConfigDict()\n self.assertEqual(c, c(a=1))\n self.assertTrue('a' in c)\n self.assertEqual(1, c.a)", "def appropriate_kwargs(kwargs, func):\n sig = inspect.signature(func)\n filter_keys = [\n param.name\n for param in sig.parameters.values()\n if param.kind == param.POSITIONAL_OR_KEYWORD and param.name in kwargs.keys()\n ]\n appropriate_dict = {filter_key: kwargs[filter_key] for filter_key in filter_keys}\n return appropriate_dict", "def _evalKwargs(self, kwargs):\n\n for key, value in kwargs.iteritems():\n if key == 'xmin': self.xmin = value\n elif key == 'xmax': self.xmax = value\n elif key == 'ymin': self.ymin = value\n elif key == 'ymax': self.ymax = value\n elif key == 'raster': self.raster = value\n elif key == 'yerr': self.yerr = value\n elif key == 'xerr': self.xerr = value\n elif key == 'limit': self.limit = value # boolean\n elif key == 'shaded': self.shaded = value # list\n elif key == 'hline': self.hline = value\n elif key == 'label': self.label = value\n elif key == 'fillbelow': self.fillbelow = value # boolean\n elif key == 'alpha': self.alpha = value\n elif key == 'scinot': self.scinot = value\n elif key == 'prop': self.prop = value\n else:\n print(\"\\t=== Key '{}' unknown ===\".format(key))", "def get_run_method_kwargs(self, **kwargs) -> dict:\n return {\n key: value\n for key, value in kwargs.items()\n if self.input_definitions.get(key=key).run_method_input\n }", "def check_params(info_dict):\n # check the info_dict\n if not isinstance(info_dict, dict):\n raise TypeError(\"info_dict should be dict, but the input is %s\" % \\\n type(info_dict))\n\n # check the op_type info\n if \"op_type\" not in info_dict.keys():\n raise KeyError(\"the keyword 'op_type' is missing in input params\")", "def test_kwargs(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.6)\n def func(x, e=2):\n return x**e\n \n a = func(1, e=2)\n self.assertEqual(len(self.storage), 1)\n self.assertEqual(a, 1**2)\n \n a = func(2, e=2)\n self.assertEqual(len(self.storage), 2)\n self.assertEqual(a, 2**2)\n \n a = func(1, e=3)\n self.assertEqual(len(self.storage), 3)\n self.assertEqual(a, 1**3)\n \n a = func(2, e=3)\n self.assertEqual(len(self.storage), 4)\n self.assertEqual(a, 2**3)\n \n a = func(1.5, e=2)\n self.assertEqual(len(self.storage), 4)\n self.assertAlmostEqual(a, 0.5*(1**2 + 2**2))\n \n a = func(1.5, e=3)\n self.assertEqual(len(self.storage), 4)\n self.assertAlmostEqual(a, 0.5*(1**3 + 2**3))", "def test_keyword(self):\n varargs = ()\n kwargs = {'default' : 12}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 12)\n self.assert_(len(var_dict) == 1)", "def check_params(params: dict) -> Callable:\n\n def _decorator(test_case: Callable) -> Callable:\n @wraps(test_case)\n def _wrapper(self):\n for param_name in params:\n for param_value in params[param_name]:\n test_case(self, param_name, param_value)\n\n return _wrapper\n\n return _decorator", "def _extract_params(self, kwargs, hyperparameters):\n init_params = dict()\n fit_params = dict()\n produce_params = dict()\n\n for name, param in hyperparameters.get('fixed', dict()).items():\n if name in kwargs:\n value = kwargs.pop(name)\n\n elif 'default' in param:\n value = param['default']\n\n else:\n raise TypeError(\"{} required argument '{}' not found\".format(self.name, name))\n\n init_params[name] = value\n\n for name, param in hyperparameters.get('tunable', dict()).items():\n if name in kwargs:\n init_params[name] = kwargs.pop(name)\n\n if not isinstance(self.fit_args, str):\n fit_args = [arg['name'] for arg in self.fit_args]\n else:\n fit_args = []\n\n if not isinstance(self.produce_args, str):\n produce_args = [arg['name'] for arg in self.produce_args]\n else:\n produce_args = []\n\n for name in list(kwargs.keys()):\n if name in fit_args:\n fit_params[name] = kwargs.pop(name)\n\n elif name in produce_args:\n produce_params[name] = kwargs.pop(name)\n\n if kwargs:\n error = \"Unexpected hyperparameters '{}'\".format(', '.join(kwargs.keys()))\n raise TypeError(error)\n\n return init_params, fit_params, produce_params", "def test_star_args_with_tuple_and_dict():\n arg_tuple = ('orange', 'yellow')\n arg_dict = {'visited_color': 'red',\n 'link_color': 'blue'}\n\n assert arguments.fun_star_params(*arg_tuple, **arg_dict) == ('orange',\n 'yellow',\n 'red',\n 'blue')", "def test_map_args_include_time():\n pass", "def test_component_specifications_init_arg_dict(self):\r\n\t\tinit_args = self._configuration_[\"RemoveWordDefinitionTask\"].init_args()\r\n\r\n\t\tself.assertTrue(type(init_args) == dict and init_args.has_key(\"repeat\") and init_args[\"repeat\"] == \"{$RemoveWordTaskRepeat}\")", "def test_RestrictingNodeTransformer__visit_In_Dict():\n assert restricted_eval('2 in {1: 1, 2: 2, 3: 3}') is True", "def apply(self, func):\r\n return func(**self.kwargs)", "def _validate_kw(obj, fn, trait_types, kw):\n\n actual = {}\n for name, value in kw.items():\n trait_type = trait_types.get(name)\n if trait_type is not None:\n value = trait_type.validate_method_argument(obj, fn, name, value)\n\n actual[name] = value\n \n return actual", "def _verify_match_kwargs(self, match_kwargs, exclusions):\n for k in match_kwargs:\n assert k in self.statespace, (\n '%s is not a valid dimension to match against' % k)\n for k, v in match_kwargs.iteritems():\n assert v in self.statespace[k], (\n '%s is not a valid value for dimension %s' % (v, k))\n if exclusions:\n for k in match_kwargs:\n assert k in self.statespace, (\n '%s is not a valid dimension to exclude on' % k)\n for k, v in exclusions.iteritems():\n for w in v:\n assert w in self.statespace[k], (\n '%s is not a valid value for dimension %s' % (w, k))", "def get_args(multidict):\n data = {}\n for key in multidict.keys():\n data[key] = multidict.get(key)\n return data", "def filter_args_dict(self, args):\n return dict((k,v) for (k,v) in viewitems(args) if self.has_arg(k))", "def _verify_arguments(self, kwargs: dict[str, Any]):\n geom_stat_args = kwargs.keys() | self._stat._kwargs.keys()\n unknown = (\n geom_stat_args\n - self.aesthetics()\n - self.DEFAULT_PARAMS.keys() # geom aesthetics\n - self._stat.aesthetics() # geom parameters\n - self._stat.DEFAULT_PARAMS.keys() # stat aesthetics\n - { # stat parameters\n \"data\",\n \"mapping\",\n \"show_legend\", # layer parameters\n \"inherit_aes\",\n \"raster\",\n }\n ) # layer parameters\n if unknown:\n msg = (\n \"Parameters {}, are not understood by \"\n \"either the geom, stat or layer.\"\n )\n raise PlotnineError(msg.format(unknown))", "def test_target_kwarg(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example (shots=10, hbar=0.2)\")\n assert bb.target[\"options\"] == {\"shots\": 10, \"hbar\": 0.2}", "def test_kwarg_nonbool():\n\n with pytest.raises(ValueError) as error:\n # because this happens in the wrap, but before the wrap, we don't need\n # a test function, we just have to not be None\n type_checked(func=False, debug=\"abc\")\n\n assert \"abc is not a valid config value.\" in error.value.args", "def test_kw_validation_with_trait_type_classes(self):\n\n @function(x=Int, y=Int, _returns_=Int)\n def add(**kw):\n return kw['x'] + kw['y']\n\n self.assertEqual(add(x=8, y=2), 10)\n self.failUnlessRaises(TraitError, add, x=2, y='xxx')\n\n return", "def Dict(**args):\n return args", "def test_passes_on_args(self):\n record = []\n\n @self.actions(\"ctx_name\", [])\n def myview(request, *args, **kwargs):\n record.extend([args, kwargs])\n\n myview(self.req(\"get\", \"/\"), \"a\", b=2)\n\n self.assertEqual(record, [(\"a\",), {\"b\": 2}])" ]
[ "0.68380266", "0.6642572", "0.6544952", "0.6419672", "0.64001125", "0.63954014", "0.63548684", "0.6272449", "0.6210259", "0.6194217", "0.61427027", "0.6134622", "0.61176914", "0.61130285", "0.61014044", "0.6090693", "0.60874087", "0.6078532", "0.60526466", "0.5990979", "0.59765893", "0.5960931", "0.5915863", "0.59149563", "0.58966935", "0.58812743", "0.5860754", "0.5847603", "0.5846457", "0.58382213", "0.58369505", "0.5806435", "0.5800405", "0.5773798", "0.5771069", "0.5753008", "0.5750176", "0.57454896", "0.5737907", "0.57303256", "0.5727077", "0.57244337", "0.5700637", "0.5690441", "0.566859", "0.5664757", "0.56608164", "0.56592655", "0.56431293", "0.5613524", "0.55967283", "0.559585", "0.55949265", "0.558937", "0.5588034", "0.5582652", "0.5577874", "0.55724096", "0.5569378", "0.55689293", "0.5568579", "0.55425096", "0.55419403", "0.55388856", "0.5534195", "0.5530578", "0.54980785", "0.54966354", "0.5495512", "0.54933405", "0.54841113", "0.54815775", "0.54813063", "0.5480118", "0.5471778", "0.5469877", "0.54579747", "0.5447742", "0.5444835", "0.5439776", "0.5439007", "0.54385185", "0.54362667", "0.5432772", "0.5430415", "0.54293066", "0.54214656", "0.54188114", "0.5415239", "0.5414558", "0.54090595", "0.5408971", "0.5401669", "0.53945726", "0.5393042", "0.53902215", "0.53861225", "0.53814304", "0.53784055", "0.5374589" ]
0.6957905
0
test naming works for model have large number of sequential nodes
def test_naming_for_model_with_deep_graph(self): model = models.resnet152(pretrained=False) dummy_input = torch.randn(1, 3, 224, 224) onnx_path= './data/' + model.__class__.__name__ + '.onnx' with onnx_simply(True): onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input, is_conditional=False, module_marker_map={}) onnx_model = onnx.load(onnx_path) onnx.checker.check_model(onnx_model) self.check_onnx_node_names(onnx_model) counts = defaultdict(int) top_level_nodes = tuple(['conv1', 'bn1', 'relu', 'maxpool', 'avgpool', 'Flatten_', '/Flatten', 'fc']) for node in onnx_model.graph.node: if node.name.startswith(top_level_nodes): continue elif '.' in node.name: layer_name = '.'.join(node.name.split('#')[0].split('.')[:-1]) counts[layer_name] += 1 elif node.name.startswith('/'): layer_name = '.'.join(node.name.split('/')[1:-1]) counts[layer_name] += 1 for name, counts in counts.items(): if 'downsample' in name: assert counts == 2 else: print(name, counts) assert counts == 10 if os.path.exists(onnx_path): os.remove(onnx_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_unique_node_names(self):\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def _generate_node_name(self):\r\n while 1:\r\n name = \"node\" + str(self._name_sequence)\r\n if name not in self.nodes.keys():\r\n break\r\n self._name_sequence += 1\r\n\r\n return name", "def check_onnx_node_name_uniqueness(onnx_model):\n onnx_node_names = [node.name for node in onnx_model.graph.node]\n assert len(onnx_node_names) == len(set(onnx_node_names)), f'list size mismatch, check if names are unique'", "def test_non_leaf_module_names(self):\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = HierarchicalMultiplyModule()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n expected_names = [\n # names compatible with torch 1.9.1 version (should be removed in the future)\n 'layer.mul1.mul',\n 'layer.mul1.Mul_7',\n 'layer.mul2.mul',\n 'layer.mul2.Mul_15',\n 'layer.Mul_18',\n \n # names compatible with torch 1.13.1 version \n '/layer/mul1/Mul',\n '/layer/mul2/Mul',\n '/layer/Mul'\n ]\n for node in onnx_model.graph.node:\n assert 'Constant' in node.name or node.name in expected_names\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def test_onnx_node_name_to_input_output_names_util(self):\n model = models.resnet18(pretrained=False)\n dummy_input = torch.randn(1, 3, 224, 224)\n torch.onnx.export(model, dummy_input, './data/resnet18.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/resnet18.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/resnet18.onnx')\n\n # Get Dict mapping node name to the input and output names\n node_to_io_dict,_ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n\n node_0 = onnx_model.graph.node[0]\n assert node_0.input == node_to_io_dict[node_0.name].inputs\n assert node_0.output == node_to_io_dict[node_0.name].outputs", "def _auto_name(self):\n return \"node_\"+str(self._id)", "def _auto_name(self, job):\n root = job.__class__.__name__\n nodes = list(self.lastNode.nodes())\n matches = [node.name for node in nodes if node.name.startswith(root)]\n logger.debug(\"Node names: %s\" % nodes)\n if (len(matches)==0):\n return root + '_1'\n try:\n iter_str = [name.split('_')[-1] for name in matches]\n logger.debug(\"Node iter_str: %s\" % iter_str)\n iters = [int(i) for i in iter_str]\n logger.debug(\"Node iters: %s\" % iter_str)\n max_iter = max(iters) + 1\n logger.debug(\"max_iter: %s\" % max_iter)\n return root + '_' + str(max_iter)\n except:\n logger.warn(\"Could not determine iteration: %s \" % matches)\n return root + '_1'", "def test_instance_naming_creation(os_info):\n NEUTRON.list_security_groups = mock.MagicMock(\n return_value=iter([{\"security_groups\": []}]))\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n\n instance_names = os_info.nodes_names\n for i in range(len(instance_names)):\n assert instance_names[i] == 'test-node-{}'.format(i + 1)", "def make_unique_node(graph, name):\n if name not in graph:\n return name\n ctr = 1\n while True:\n name_ = name + '_' * ctr\n if name_ not in graph:\n return name_\n ctr += 1", "def test_name(self):\n g = h5g.create(self.fid, '/foobar')\n self.assertEqual(h5i.get_name(g), '/foobar')", "def test_data_naming():\n with pm.Model(\"named_model\") as model:\n x = pm.ConstantData(\"x\", [1.0, 2.0, 3.0])\n y = pm.Normal(\"y\")\n assert y.name == \"named_model::y\"\n assert x.name == \"named_model::x\"", "def _generate_node_name(labels: dict, node_suffix: str) -> str:\n name_label = labels[TAG_RAY_NODE_NAME]\n assert len(name_label) <= (INSTANCE_NAME_MAX_LEN - INSTANCE_NAME_UUID_LEN - 1), (\n name_label,\n len(name_label),\n )\n return f\"{name_label}-{uuid4().hex[:INSTANCE_NAME_UUID_LEN]}-{node_suffix}\"", "def graph_node_names_details(model):\n\n node_details = namedtuple('node_details', ['node', 'outputs'])\n node_names_details = {}\n for initializer in model.initializer():\n initializer_name = initializer.name\n each_node = node_details(node=initializer, outputs=[])\n if initializer_name not in node_names_details:\n each_node.outputs.extend(get_initializer_children_names(model, initializer))\n node_names_details[initializer_name] = each_node\n for node in model.nodes():\n node_name = node.name\n output_names = node.output\n # onnx output has different name from node name\n for output_name in output_names:\n if output_name not in node_names_details:\n node_names_details[output_name] = node_name\n each_node = node_details(node=node, outputs=[])\n if node_name not in node_names_details:\n each_node.outputs.extend(get_node_children_names(model, node))\n node_names_details[node_name] = each_node\n for graph_input in model.graph().input:\n outputs = []\n node_name = graph_input.name\n for k, v in node_names_details.items():\n try:\n if node_name in v.node.input:\n outputs.append(k)\n except BaseException:\n continue\n each_node = node_details(node=graph_input, outputs=outputs)\n # if node_name not in node_names_details:\n node_names_details[node_name] = each_node\n\n return node_names_details", "def test_node_name(self):\n xmlns = {\n \"a\": \"_a\",\n \"g\": \"_g\"\n }\n self.assertEqual(\n utils._node_name(\"a\", \"g\", xmlns),\n (False, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"a@a\", \"g\", xmlns),\n (False, \"a\", \"{_a}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@@a\", \"g\", xmlns),\n (True, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@a@a\", \"g\", xmlns),\n (True, \"a\", \"{_a}a\")\n )\n # something not equal to _\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"1@a@a\", \"g\", xmlns),\n # too many @\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"@@a@a\", \"g\", xmlns),", "def check_root_node_name___fix():\n from stalker import Asset\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n t = v.task\n asset_name = None\n if isinstance(t.parent, Asset):\n asset_name = t.parent.name\n\n root_nodes = auxiliary.get_root_nodes()\n root_node_name = root_nodes[0].name()\n\n if asset_name is not None:\n correct_node_name = asset_name\n correct_node_name = correct_node_name.replace(\" \", \"_\")\n if correct_node_name[0].isdigit():\n correct_node_name = \"_%s\" % correct_node_name\n if correct_node_name[-1].isdigit():\n correct_node_name += \"_grp\"\n else:\n correct_node_name = root_node_name\n if correct_node_name[0].isdigit():\n correct_node_name = \"_%s\" % correct_node_name\n if correct_node_name[-1].isdigit():\n correct_node_name += \"_grp\"\n\n root_nodes[0].rename(correct_node_name)", "def generate_predictable_names():\n index = 0\n while True:\n index += 1\n yield f\"_{index}\"", "def gen_nodes(modelfile, starting_genes):\n # read json file with final model variables\n shape, top_genes, weights, output_key, biases = read_json(modelfile)\n\n # initialize database\n database = db.Database()\n\n # create list to store all layers\n NN = []\n\n # get input probe sequences\n input_seqs_df = inputs.probes_df(top_genes)\n # each layer is a dictionary with keys as names of strands and values as a list of seqs\n l_0 = {}\n probe_seqs = []\n for probe in input_seqs_df[\"Probe Sequences\"]:\n index = 0\n size = database.size\n while database.size < size + 1:\n try:\n database.database_insert(Seq(probe[index]))\n index += 1\n # except block handles case that NONE of the probe sequences were accepted into the database\n # ***TEMPORARY FIX***\n except IndexError:\n index -= 1\n break\n probe_seqs.append(Seq(probe[index]))\n l_0[\"Probe Sequence\"] = probe_seqs\n print(\"Layer 0: \", l_0)\n NN.append(l_0)\n\n # add the tether and promotor to the database\n database.database_insert(starting_genes[\"Tether\"])\n database.database_insert(starting_genes[\"T7 Promoter\"])\n\n # generate all the sequences for every node in each layer\n for layer in range(1, len(shape)):\n # add the cage and tether sequences to the layer dictionary\n l_i = {}\n l_i[\"Cage Sense\"] = [starting_genes[\"Cage Sense\"]] * shape[layer]\n l_i[\"Cage Antisense\"] = [starting_genes[\"Cage Antisense\"]] * shape[layer]\n l_i[\"Tether\"] = [starting_genes[\"Tether\"]] * shape[layer]\n\n print(\"getting anchor strands\")\n tether_length = len(starting_genes[\"Tether\"])\n size = database.size\n # generate anchor strands until all of them have been accepted into the database\n while database.size < size + shape[layer]:\n anchor = oligo.oligo(tether_length)\n database.database_insert(anchor)\n anchor_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n print(\"getting transcription factors\")\n threshold_energy = 9 # variable that can be changed, pos integer, see gen_tf for description\n static_tf_seqs = []\n tf_seqs = []\n for anchor in anchor_seqs:\n static_tf, tf = gen_tf(anchor, starting_genes[\"Tether\"], threshold_energy)\n static_tf_seqs.append(static_tf)\n tf_seqs.append(tf)\n print(\"DONE\")\n\n print(\"getting outputs\")\n output_length = 25 # length of dna transcript from one node\n size = database.size\n while database.size < size + shape[layer]:\n output = oligo.oligo(output_length).sequence\n database.database_insert(output)\n transcript_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n # assemble longer strands in the node\n l_i[\"Static TF + Transcript Sense\"] = [static_tf_seqs[i] + starting_genes[\"T7 Promoter\"] + transcript_seqs[i]\n for i in range(shape[layer])]\n l_i[\"Transcript Antisense + Anchor\"] = [\n oligo.complement(transcript_seqs[i]) + oligo.complement(starting_genes[\"T7 Promoter\"]) + anchor_seqs[i] for\n i in range(shape[layer])]\n\n # intermediates are the strands that determine weights in toehold-mediated displacement\n print(\"getting intermediate\")\n toe_length = 20 # standard length for all toehold sequences\n # get the 2D matrix for this layer and round the values to one decimal place\n weight_matrix = np.array(weights[layer - 1])\n weight_matrix = np.round(weight_matrix, 1)\n intermediate_seqs = []\n tf_appendage_seqs = []\n for i in range(shape[layer - 1]):\n if layer == 1:\n output = NN[0][\"Probe Sequence\"][i]\n else:\n output = NN[layer - 1][\"Static TF + Transcript Sense\"][i][-output_length:]\n inters = []\n top_toe = output[:toe_length]\n b_dom = output[toe_length:]\n tf_appendage_seqs.append(b_dom)\n # get all the possible sequences for toehold weights between 0 and 1\n weight_dict = quant.find_quanta(top_toe)\n for j in range(shape[layer]):\n w = weight_matrix[j, i]\n tf = tf_seqs[j]\n a_star_tf = tf[:len(tf) // 2]\n if w < 0:\n # negative weights\n inters.append(a_star_tf + oligo.complement(b_dom) + weight_dict[w * -1])\n else:\n # positive weights\n inters.append(oligo.complement(a_star_tf) + oligo.complement(b_dom) + weight_dict[w])\n\n intermediate_seqs.append(inters)\n # each list in the nested list is for one node in the layer, get nodes row-wise\n l_i[\"Intermediate\"] = np.array(intermediate_seqs).T.tolist()\n print(\"DONE\")\n\n # TF and TF Inhibitor are products of toehold-mediated displacement for pos and neg weights, respectively\n full_tf_seqs_2D = []\n attack_seqs_2D = []\n for tf in tf_seqs:\n full_tf_seqs = []\n attack_seqs = []\n for appendage in tf_appendage_seqs:\n full_tf_seq = appendage + tf\n attack_seq = appendage + oligo.complement(tf[:len(tf) // 2])\n full_tf_seqs.append(full_tf_seq)\n attack_seqs.append(attack_seq)\n full_tf_seqs_2D.append(full_tf_seqs)\n attack_seqs_2D.append(attack_seqs)\n l_i[\"TF\"] = full_tf_seqs_2D\n l_i[\"TF Inhibitor\"] = attack_seqs_2D\n\n print(\"Layer {}: \".format(layer), l_i)\n # add the completed layer to the NN list\n NN.append(l_i)\n\n return NN", "def create_node_name(input_node, mode=tuple):\n key = input_node.fullname\n if len(input_node.out_ports()) > 1:\n port_number = input_node.in_port(0).get_source().out\n key = (input_node.fullname, port_number) if mode == tuple else f\"{input_node.fullname}.{port_number}\"\n return key", "def test_set_node_name_for_matmul_add_linear(self, export_args):\n class Linear(torch.nn.Module):\n def __init__(self):\n super(Linear, self).__init__()\n self.linear = torch.nn.Linear(3, 2)\n\n def forward(self, inp):\n x = self.linear(inp)\n return x\n\n model = Linear()\n # Using an input to linear op with dimension != 2 causes torch to use matmul->add instead of gemm op\n onnx_path = './data/MyModel.onnx'\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n expected_node_names = ['linear', 'linear#1.end']\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n for name in expected_node_names:\n assert name in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n # Check that gemm still works as expected\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n assert 'linear' in actual_node_names\n assert 'linear#1' not in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def _generate_node_name(\n self,\n prefix,\n middle,\n suffix,\n ):\n name = ''\n if prefix:\n name += prefix + '-'\n name += middle\n if suffix:\n name += '-' + suffix\n\n return name", "def test_get_node_type_name(self):\n pass", "def test_rename_to_mlflow(mlflow):\n atom = ATOMClassifier(X_bin, y_bin, experiment=\"test\", random_state=1)\n atom.run(\"GNB\")\n atom.scoring()\n assert mlflow.call_count == 10 # 9 from scoring + 1 from training", "def rename_none_node(node_to_rename: newick.Node, counter):\n if node_to_rename.name is None:\n node_to_rename.name = str(node_to_rename.name) + \"_\" + str(counter)\n counter += 1\n return node_to_rename, counter", "def test_func(x):\n for i in range(32):\n handle = self.model_manager.create(name='%s-%s' % (x, i))\n self.assertTrue(\n handle in [m.handle for m in self.model_manager.models()])\n self.model_manager.delete(handle)\n self.assertTrue(\n handle not in\n [m.handle for m in self.model_manager.models()])\n return True", "def unique_name():\n num = 0\n while True:\n yield \"theta_\" + str(num)\n num += 1", "def get_node_name(self, node):\n raise NotImplementedError()", "def get_name():\n return \"SVMd+ - simplified approach\"", "def _get_unique_param_name(self, name, mode):\n _name = name\n inc = 1\n \n if mode == NodeParam.INPUT:\n existing_params = self._input_params\n else:\n existing_params = self._output_params\n \n while _name in existing_params:\n _name = \"%s%i\" % (name, inc) \n inc += 1 \n return _name", "def generate_name(self):\n name = self._generate_test_name()\n while self.exists(name):\n name = self._generate_test_name()\n return name", "def get_node_name(name: str) -> str:\n if is_control_dependency(name):\n return name[1:]\n return name.split(':', maxsplit=1)[0]", "def check_sequence_name___fix():\n # do not consider referenced shot nodes\n shots = pm.ls(type=\"shot\")\n shot = None\n for s in shots:\n if s.referenceFile() is None:\n shot = s\n break\n\n sequencers = shot.outputs(type=\"sequencer\")\n if not sequencers:\n raise PublishError(\"There are no sequencers in the scene!\")\n\n sequencer = sequencers[0]\n\n # get current task\n from anima.dcc import mayaEnv\n\n m = mayaEnv.Maya()\n v = m.get_current_version()\n task = v.task\n\n # get sequence and scene names\n sequence_name = get_seq_name_from_task(task)\n scene_name = get_scene_name_from_task(task)\n\n # set sequencer name as seq_name + sc_name\n name = \"%s_%s\" % (sequence_name, scene_name)\n sequencer.set_sequence_name(name)", "def get_test_name(request):\n return request.node.name", "def test_model_flow_node_model_flow_id_node_id_component_get(self):\n pass", "def __init__(self, name):\n self.nodes = []\n self.name = str(name)", "def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()", "def get_node_name(self):\n return util.join_names_underscore(self.name, str(self.as_pointer()))", "def test_model_flow_node_model_flow_id_node_id_component_post(self):\n pass", "def get_name():\n return \"SVMd+\"", "def generate_model_name(self):\n now = datetime.datetime.now()\n name = '%s_%s_%s_%s_%s_%s' % (now.day, now.month, 'rnn', self._controller_type, self._num_layers, self._layer_size)\n if self._dropout > 0:\n name += '_dropout'\n\n return name", "def test_generate_nb_testing(self):\n pass", "def update_edge_node_name(node_name, node_number):\n return node_name.replace('block_' + str(node_number), 'block_' + str(node_number+1))", "def create_nodes(self):", "def test_single_pytorch_module_mapping_to_many_onnx_nodes(self):\n\n AimetLogger.set_level_for_all_areas(logging.DEBUG)\n\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model_name = 'multilayer_lstm'\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n\n torch.onnx.export(model, dummy_input, './data/' + model_name + '.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/' + model_name + '.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/' + model_name + '.onnx')\n\n lstm_nodes = [node for node in onnx_model.graph.node if node.op_type == 'LSTM']\n assert 3 == len(lstm_nodes)\n\n node_to_io_dict, _ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n assert isinstance(node_to_io_dict['lstm#root_node'], list)\n assert 3 == len(node_to_io_dict['lstm#root_node'])", "def _renumber(model):\n \n # Number each node in the model\n for id, node in enumerate(model.Nodes.values()):\n node.ID = id\n \n # Number each spring in the model\n for id, spring in enumerate(model.Springs.values()):\n spring.ID = id\n\n # Descritize all the physical members and number each member in the model\n id = 0\n for phys_member in model.Members.values():\n phys_member.descritize()\n for member in phys_member.sub_members.values():\n member.ID = id\n id += 1\n \n # Number each plate in the model\n for id, plate in enumerate(model.Plates.values()):\n plate.ID = id\n \n # Number each quadrilateral in the model\n for id, quad in enumerate(model.Quads.values()):\n quad.ID = id", "def ex1_pickle_name(n, prop):\n \n f_name = f\"ukf_agents_{n}_prop_{prop}.pkl\"\n return f_name", "def generateTaskName(self):\n brokenComponent = ['head','hand','leg','body','hand','leg']\n for component in brokenComponent:\n self.enqueue(Task(component))", "def get_model_name(ind: int) -> str:\n nonlocal model_index\n model_index += 1\n return f'{fizz_name}-{fizz_type.model_name}{model_index:02}'", "def rename_value(model: onnx.ModelProto, old_name: str, new_name: str):\n if old_name == new_name:\n return\n logger = get_root_logger()\n logger.info(f'rename {old_name} -> {new_name}')\n for n in model.graph.node:\n for i, output in enumerate(n.output):\n if output == old_name:\n n.output[i] = new_name\n for i, input in enumerate(n.input):\n if input == old_name:\n n.input[i] = new_name\n for v in model.graph.value_info:\n if v.name == old_name:\n v.name = new_name\n for i, input in enumerate(model.graph.input):\n if input.name == old_name:\n input.name = new_name\n for i, output in enumerate(model.graph.output):\n if output.name == old_name:\n output.name = new_name", "def generate_artificial_names(seed=\"\", num_names=1):\n generated_names = []\n \n stop = False\n while not stop:\n # generate names more than needed as some names may exist in real life\n num_needed_names = (num_names - len(generated_names)) * 3 // 2\n names = generate_names(seed=seed, num_names=num_needed_names)\n \n # check whether names are in dataset or not\n for name in names:\n if not is_real_name(name):\n generated_names.append(name)\n if len(generated_names) == num_names:\n stop = True\n break\n \n return generated_names", "def generate_artificial_names(seed=\"\", num_names=1):\n generated_names = []\n \n stop = False\n while not stop:\n # generate names more than needed as some names may exist in real life\n num_needed_names = (num_names - len(generated_names)) * 3 // 2\n names = generate_names(seed=seed, num_names=num_needed_names)\n \n # check whether names are in dataset or not\n for name in names:\n if not is_real_name(name):\n generated_names.append(name)\n if len(generated_names) == num_names:\n stop = True\n break\n \n return generated_names", "def rename(self):\n\n # Remove any zero-padding from single-digit parameter names\n # This reverses any change applied by one of the CUDA writers\n for i in range(self.parser.comp-1, len(self.parser.parsedModel.parameterId)):\n old_name = self.parser.parsedModel.parameterId[i]\n num = old_name[len('parameter'):]\n if len(num) > 1 and num[0] == '0':\n new_name = 'parameter' + str(num[1:])\n self.parser.parsedModel.parameterId[i] = new_name\n self.parser.rename_everywhere(old_name, new_name)\n\n # Remove any zero-padding from single-digit species names\n # This reverses any change applied by one of the CUDA writers\n for i in range(len(self.parser.parsedModel.speciesId)):\n old_name = self.parser.parsedModel.speciesId[i]\n num = old_name[len('species'):]\n if len(num) > 1 and num[0] == '0':\n new_name = 'species' + str(num[1:])\n self.parser.parsedModel.speciesId[i] = new_name\n self.parser.rename_everywhere(old_name, new_name)", "def test_change_name_of_the_devicetrue():", "def test_node_bad_name(self):\n node_name = 1\n self.assertRaises(TypeError, Node, node_name, '1', '2', 'leaf')", "def nameToNode(name):\n\n pass", "def test_create_nontar_model():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n model_file = os.path.join(misc_folder, \"model-nonexistent.bla\")\n create.main(\"mlp\", \"10:12:8\", model_file)\n # TODO: Check if error was logged", "def test_good_node():\n node_a = Node({'A':['B','C']})\n assert node_a.name == 'A'\n assert node_a.connections == ['B','C']", "def onBuildModels(self):\n if self.refSeriesNumber != '-1':\n ref = self.refSeriesNumber\n refLongName = self.seriesMap[ref]['LongName']\n labelNodes = slicer.util.getNodes('*'+refLongName+'*-label*')\n\n numNodes = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelHierarchyNode\" )\n outHierarchy = None\n\n for n in xrange(numNodes):\n node = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelHierarchyNode\" )\n if node.GetName() == 'mpReview-'+refLongName:\n outHierarchy = node\n break\n\n # Remove the previous models\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n slicer.mrmlScene.RemoveNode(modelNode)\n\n # if models hierarchy does not exist, create it.\n else:\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\n outHierarchy.SetScene( slicer.mrmlScene )\n outHierarchy.SetName( 'mpReview-'+refLongName )\n slicer.mrmlScene.AddNode( outHierarchy )\n\n progress = self.makeProgressIndicator(len(labelNodes))\n step = 0\n for label in labelNodes.values():\n labelName = label.GetName().split(':')[1]\n structureName = labelName[labelName[:-6].rfind(\"-\")+1:-6]\n # Only save labels with known structure names\n if any(structureName in s for s in self.structureNames):\n parameters = {}\n parameters[\"InputVolume\"] = label.GetID()\n parameters['FilterType'] = \"Sinc\"\n parameters['GenerateAll'] = True\n\n parameters[\"JointSmoothing\"] = False\n parameters[\"SplitNormals\"] = True\n parameters[\"PointNormals\"] = True\n parameters[\"SkipUnNamed\"] = True\n\n # create models for all labels\n parameters[\"StartLabel\"] = -1\n parameters[\"EndLabel\"] = -1\n\n parameters[\"Decimate\"] = 0\n parameters[\"Smooth\"] = 0\n\n parameters[\"ModelSceneFile\"] = outHierarchy\n\n progress.labelText = '\\nMaking Model for %s' % structureName\n progress.setValue(step)\n if progress.wasCanceled:\n break\n\n try:\n modelMaker = slicer.modules.modelmaker\n self.CLINode = slicer.cli.run(modelMaker, self.CLINode,\n parameters, wait_for_completion=True)\n except AttributeError:\n qt.QMessageBox.critical(slicer.util.mainWindow(),'Editor', 'The ModelMaker module is not available<p>Perhaps it was disabled in the application settings or did not load correctly.')\n step += 1\n progress.close()\n #\n\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n displayNode = modelNode.GetDisplayNode()\n displayNode.SetSliceIntersectionVisibility(1)\n displayNode.SetSliceIntersectionThickness(2)\n self.modelsVisibilityButton.checked = False\n self.updateViewRenderer()", "def _auto_name(name, parent):\n if not is_ready(parent):\n parent._pywarm_auto_name_dict = {}\n def _hook(model, x):\n model._pywarm_auto_name_dict = {}\n parent._pywarm_forward_pre_hook = parent.register_forward_pre_hook(_hook)\n track = parent._pywarm_auto_name_dict\n if name not in track:\n track[name] = 0\n track[name] += 1\n return f'{name}_{track[name]}'", "def run(name, create_nodes):\n\n print('== {} =='.format(name))\n create_nodes()\n emit_and_print_errors(lkt_file='foo.lkt')\n print('')", "def test_model_flow_node_model_flow_id_node_id_component_put(self):\n pass", "def verifyModels(self):\r\n\r\n #\r\n # now check that all models have the same poly data in the\r\n # model node as in the display node\r\n #\r\n polyDataInScene = []\r\n fileNamesInScene = []\r\n success = True\r\n numModels = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" )\r\n for n in range(numModels):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelNode\" )\r\n polyDataInScene.append(modelNode.GetPolyData())\r\n for dn in range(modelNode.GetNumberOfDisplayNodes()):\r\n displayNode = modelNode.GetNthDisplayNode(dn)\r\n if modelNode.GetPolyData() != displayNode.GetInputPolyData():\r\n self.delayDisplay(\"Model %d does not match its display node %d! (name: %s, ids: %s and %s)\" % (n,dn,modelNode.GetName(), modelNode.GetID(),displayNode.GetID()))\r\n success = False\r\n for sn in range(modelNode.GetNumberOfStorageNodes()):\r\n storageNode = modelNode.GetNthStorageNode(sn)\r\n fileName = storageNode.GetFileName()\r\n fileNamesInScene.append(fileName)\r\n if fileName in fileNamesInScene:\r\n self.delayDisplay(\"Model %d has duplicate file name %s! (ids: %s and %s)\" % (n,fileName,modelNode.GetID(),storageNode.GetID()))\r\n success = False\r\n\r\n\r\n #\r\n # now check that each model has a unique polydata\r\n #\r\n for n in range(numModels):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelNode\" )\r\n if polyDataInScene.count(modelNode.GetPolyData()) > 1:\r\n self.delayDisplay(\"Polydata for Model is duplicated! (id: %s and %s)\" % (n,modelNode.GetID()))\r\n success = False\r\n\r\n return success", "def _create_node(\n self,\n name,\n ):\n pass", "def _generate_name(name):\n return 'test-%s-%s-%s' % (time.strftime('%Y%m%d%H%M%S'),\n random.randint(0, 999), name)", "def update_node_name(node_name, bb):\n node_number = int(re.search(r'\\d+', re.search(r'block_\\d+', node_name).group()).group())\n if bb < node_number:\n return node_name.replace('block_' + str(node_number), 'block_' + str(node_number+1))\n return node_name", "def get_name():\n return \"SVM+\"", "def test_simple_creation():\n # Get model file\n create.main(\"mlp\", \"10:12:8\", \"model_test.tar\")", "def model_name(spec_name):\n return \"spec_\" + spec_name", "def restore_names(input_file, output_file):\n\n if not dataModel.loadModel(input_file):\n print(\"Couldn't open input file\")\n return 1\n\n model = dataModel.getModel()\n\n restore_names_in(model.getCompartments())\n restore_names_in(model.getMetabolitesX())\n restore_names_in(model.getModelValues())\n restore_names_in(model.getReactions())\n restore_names_in(model.getEvents())\n\n dataModel.saveModel(output_file, True)\n\n return 0", "def get_node_children_names(model, node):\n\n output_nodes = model.get_children(node)\n outputs = [node.name for node in output_nodes]\n return outputs", "def test_activation_names(num_mutations):\n net = WeightAgnosticNetwork(10, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n assert len(net.activation_names) == net.num_neurons", "def test_workon_name(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"", "def test_route_name(self):\n route_name = 'Single Driver Round Trip'\n self.route4me.optimization.route_name(route_name)\n data = self.route4me.optimization.data['parameters']\n self.assertEqual(route_name, data['route_name'])", "def testGetAllPhEDExNodeNames(self):\n result = self.mySiteDB.getAllPhEDExNodeNames(excludeBuffer=True)\n self.assertFalse([pnn for pnn in result if pnn.endswith('_Buffer')])\n\n result = self.mySiteDB.getAllPhEDExNodeNames(excludeBuffer=False)\n self.assertTrue(len([pnn for pnn in result if pnn.endswith('_Buffer')]) > 5)\n\n result = self.mySiteDB.getAllPhEDExNodeNames(pattern='T1.*', excludeBuffer=True)\n self.assertFalse([pnn for pnn in result if not pnn.startswith('T1_')])\n self.assertTrue(len(result) > 10)\n\n result = self.mySiteDB.getAllPhEDExNodeNames(pattern='.*', excludeBuffer=True)\n self.assertTrue([pnn for pnn in result if pnn.startswith('T1_')])\n self.assertTrue([pnn for pnn in result if pnn.startswith('T2_')])\n self.assertTrue([pnn for pnn in result if pnn.startswith('T3_')])\n self.assertTrue(len(result) > 60)\n\n return", "def GetUniqueName( name, elems ):\n digits = []\n for c in reversed( name ):\n if c.isdigit():\n digits.append( c )\n else:\n break\n \n stem = name[0:len( name ) - len( digits )]\n val = ''.join( digits )[::-1] or 0\n i = int( val )\n \n while True:\n i += 1\n newName = ''.join( [stem, str( i )] )\n if newName not in elems:\n break\n \n return newName", "def node_name(self) -> str:\n op_name = f\"{self.name.name}_{self.name.overload_name}\".lower()\n return \"\".join(word.capitalize() or \"\" for word in op_name.split(\"_\"))", "def test01_name(self):\n model = self.setup_model01(\"m1\")\n model2 = self.setup_model01(\"m2\")\n\n model2.b[1].a = 0.11\n model2.b[1].b = 0.11\n model2.x = False\n to_json(model, fname=self.fname, human_read=True)\n from_json(model2, fname=self.fname)\n # make sure they are right\n assert pytest.approx(20) == value(model2.b[1].b)\n assert pytest.approx(2) == value(model2.b[1].a)\n assert value(model2.x) == True", "def fix_label_names():\n\n assert trace.cpu.trace_done\n binary_addr = memorymanager.BinaryAddr(0)\n while binary_addr < len(classifications):\n c = classifications[binary_addr]\n if c is not None:\n dummy = [str(x) for x in c.as_string_list(binary_addr, None)]\n binary_addr += c.length()\n else:\n binary_addr += 1", "def test_create_named_input_edge(self):\n n1, n2 = Node(), Node()\n result = n1 | 'foo' * n2\n self.assertEqual(result, n2)\n self.assertEqual(n1.eout, [Edge(n1, n2, input_name='foo')])\n self.assertEqual(n2.ein, [Edge(n1, n2, input_name='foo')])", "def test_unique_naming(self):\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='testx', offering='GreekHero', branch='draft'),\r\n 'problem', block_id='problem1'\r\n )\r\n original = modulestore().get_item(locator)\r\n\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='guestx', offering='contender', branch='draft'), 'course', 'head345679'\r\n )\r\n category = 'problem'\r\n new_payload = \"<problem>empty</problem>\"\r\n new_module = modulestore().create_item(\r\n locator, category, 'anotheruser',\r\n fields={'display_name': 'problem 1', 'data': new_payload},\r\n )\r\n another_payload = \"<problem>not empty</problem>\"\r\n another_module = modulestore().create_item(\r\n locator, category, 'anotheruser',\r\n fields={'display_name': 'problem 2', 'data': another_payload},\r\n definition_locator=original.definition_locator,\r\n )\r\n # check that course version changed and course's previous is the other one\r\n parent = modulestore().get_item(locator)\r\n self.assertNotEqual(new_module.location.block_id, another_module.location.block_id)\r\n self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))\r\n self.assertIn(another_module.location.version_agnostic(), version_agnostic(parent.children))\r\n self.assertEqual(new_module.data, new_payload)\r\n self.assertEqual(another_module.data, another_payload)\r\n # check definition histories\r\n new_history = modulestore().get_definition_history_info(new_module.definition_locator)\r\n self.assertIsNone(new_history['previous_version'])\r\n self.assertEqual(new_history['original_version'], new_module.definition_locator.definition_id)\r\n self.assertEqual(new_history['edited_by'], \"anotheruser\")\r\n another_history = modulestore().get_definition_history_info(another_module.definition_locator)\r\n self.assertEqual(another_history['previous_version'], original.definition_locator.definition_id)", "def recreate_subgraphs_name():\n global SUBGRAPHS\n for (name, subgraph) in SUBGRAPHS.items():\n subgraph.set_name(\"\\\"cluster_\" + subgraph.get_name() + \"\\\"\")", "def nodeToLongName(node):\n\n pass", "def create_nodes(name):\n # Find the tsp instance file and extract its extension\n dir = os.path.dirname(os.path.realpath(__file__))\n file = open(dir+'/instances/'+name)\n ext = name.split('.')[1]\n nodes = [] #Array storing nodes\n\n # If .csv then just read nodes line by line\n if (ext == \"csv\"):\n for nodeNo,line in enumerate(file): #enumerate used to obtain line numbers and thus node numbers\n coords = line.rsplit()[0].split(\",\")\n x = int(coords[0])\n y = int(coords[1])\n nodes.append(Node(x,y))\n elif (ext == \"tsp\"):\n # If .tsp then the format of the file changes and needs to be read differently.\n file.readline()\n file.readline()\n file.readline()\n no_nodes = int(file.readline().strip().split()[1])\n file.readline()\n file.readline()\n file.readline()\n\n for i in range(0, no_nodes):\n\n coords = file.readline().strip().split()[1:]\n x = float(coords[0])\n y = float(coords[1])\n\n nodes.append(Node(x,y))\n\n return nodes", "def test_class_attribute():\n assert isinstance(ResRNNModel.model_name, str)\n assert ResRNNModel.model_name == 'res-RNN'\n assert ResRNNModel.file_name == 'model-{}.pt'", "def test_generate_nb(self):\n pass", "def test_KGE_methods(model_name):\n testing_function(model_name)", "def test_create_final_name(self):\n \n date = \"111111\"\n fcid = \"A11A22BCXX\"\n sample_name = \"P101_150B_index5\"\n \n test_names = [(\"1_{}_{}_1_nophix_1_fastq.txt.gz\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"1_{}_{}_1_nophix_1_fastq.txt\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq\".format(date,fcid,sample_name)),\n (\"1_{}_{}_1_1_fastq.txt.gz\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_CGATGT_L001_R1_001.fastq.gz\".format(sample_name),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_NoIndex_L001_R2_001.fastq.gz\".format(sample_name),\n \"1_{}_{}_{}_2.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_CGATGT_L001_R1_001.fastq..gz\".format(sample_name),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_CGATGT_L001_R1_001.fastq\".format(sample_name),\n \"1_{}_{}_{}_1.fastq\".format(date,fcid,sample_name))]\n \n for test_fname, exp_result in test_names:\n obs_result = create_final_name(test_fname,date,fcid,sample_name)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected final name ({:s}) for file name {:s}\".format(exp_result,test_fname))\n \n # Try without the _index part of file name\n sample_name_noindex = \"P101_150\"\n test_names = [(\"1_{}_{}_1_nophix_1_fastq.txt.gz\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name_noindex)),\n (\"{}_CGATGT_L001_R1_001.fastq.gz\".format(sample_name_noindex),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name_noindex)),\n (\"{}_NoIndex_L001_R2_001.fastq.gz\".format(sample_name_noindex),\n \"1_{}_{}_{}_2.fastq.gz\".format(date,fcid,sample_name_noindex))]\n \n for test_fname, exp_result in test_names:\n obs_result = create_final_name(test_fname,date,fcid,sample_name_noindex)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected final name ({:s}) for file name {:s}\".format(exp_result,test_fname))\n \n # Try some illegal file names and assert that they raise exceptions\n test_names = [\"1_{}_{}_1_nophix_1_fastq.gz\".format(date,fcid),\n \"a_{}_{}_1_nophix_1_fastq.txt\".format(date,fcid),\n \"{}_CGATRGT_L1_R1_001.fastq.gz\".format(sample_name)]\n for test_name in test_names:\n with self.assertRaises(ValueError):\n create_final_name(test_name,date,fcid,sample_name)\n \n # Try a file with undetermined reads\n sample_name = \"lane1\"\n test_names = [(\"{}_Undetermined_L001_R1_001.fastq.gz\".format(sample_name),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),] \n for test_fname, exp_result in test_names:\n obs_result = create_final_name(test_fname,date,fcid,sample_name)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected final name ({:s}) for file name {:s}\".format(exp_result,test_fname))", "def test_entities__Entity__name__1(entity):\n assert 'IcemacAddressbookTestsTestEntitiesDummy' == entity.name", "def test_intro_model_n_amd():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n n=100\n prep.prepare(n_components=n, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl')\n features = [\n\n u'days_since_start',\n u'vote_required',\n u'nterms', u'success_rate',\n u'n_amd', u'session_type',\n u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', u'party_COM',\n u'urgency_No', u'urgency_Yes',\n u'appropriation_No', u'appropriation_Yes',\n u'taxlevy_No', u'taxlevy_Yes',\n u'fiscal_committee_No', u'fiscal_committee_Yes']\n topic_features = [\"topic_\"+str(k) for k in range(n)]\n features += topic_features\n X_train, y_train = prep.subset(features, dep_var='n_amd')\n\n baseline = DummyRegressor()\n\n gb = GradientBoostingRegressor()\n\n mc = ModelChooser([baseline, gb])\n mc.fit_predict(X_train, y_train, regressor=True)\n mc.print_results(regressor=True)", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def parse_yolo_name(backbone_name, num_anchors, num_classes):\n model_name = 'yolov3'\n\n if 'tiny' in backbone_name:\n model_name += '-tiny'\n elif 'spp' in backbone_name:\n model_name += '-spp'\n model_name += '_a' + str(num_anchors)\n model_name += '_c' + str(num_classes)\n\n return model_name", "def create_name(name, epochs, lr, lr_decay_step, dilation, batch_size):\n\treturn '{}_ep-{}_lr-{}_de-{}_di-{}_bs-{}'.format(name, epochs, lr, lr_decay_step, sum(dilation), batch_size)", "def test_network_split_variables_by_time(request):\n print(\"\\n--Starting:\", request.node.name)\n\n net = ModelRoadwayNetwork.read(\n link_file=STPAUL_LINK_FILE,\n node_file=STPAUL_NODE_FILE,\n shape_file=STPAUL_SHAPE_FILE,\n fast=True,\n )\n\n net.split_properties_by_time_period_and_category()\n assert \"trn_priority_AM\" in net.links_df.columns\n print(net.links_df.info())\n ## todo write an assert that actually tests something", "def sequential_naming(self):\n return self.config.get('sequential_naming', False)", "def get_not_quantize_node_name(model):\n node_name_not_quantize = []\n qconfig_map = model._qconfig_map # pylint: disable=protected-access\n\n modules = dict(model.named_modules(remove_duplicate=False))\n\n for node in model.graph.nodes:\n if node.op == \"call_module\" and isinstance(modules[node.target], FakeQuantizeBase) or node.op == \"placeholder\":\n continue\n\n if qconfig_map[node.name] is None:\n node_name_not_quantize.append(node.name)\n\n if not node_name_not_quantize:\n node_name_not_quantize = None\n\n return node_name_not_quantize", "def test_get_node_sled(self):\n pass", "def check_name(self, node):\n assert \"name\" in node, \"Package node does not contain attribute 'node'\"\n assert len(node[\"name\"]) >= 1, \"Expecting at least one 'name' value\"\n # TODO: add more thorough checks", "def test_get_model_names():\n\n names = Instafilter.get_models()\n assert isinstance(names, list)\n assert len(names) > 1", "def get_name():\n return \"SVM\"", "def test_create_cluster_network(self):\n pass", "def generate_name(seed=\"\"):\n \n seed = \"<\" + seed\n output = seed\n \n # create initial states\n h_state = tf.zeros(shape=(1, embedding_dim))\n c_state = tf.zeros(shape=(1, embedding_dim))\n states = [h_state, c_state]\n \n stop = False\n \n while not stop:\n # convert text seed to model input\n seq = name_to_seq(seed)\n seq = np.array([seq])\n \n # predict next char\n probs, h_state, c_state = inference_model([seq] + states)\n states = [h_state, c_state]\n probs = np.asarray(probs)[:, -1, :]\n # \n index = np.random.choice(list(range(vocab_size)), p=probs.ravel())\n \n if index == 0:\n break\n \n pred_char = index_to_char[index]\n seed = pred_char\n output += pred_char\n \n if pred_char == \">\" or len(output) > max_len + 2:\n break\n \n return output.lstrip(\"<\").rstrip(\">\") # get rid of start(<) and end(>) chars" ]
[ "0.75483245", "0.6777502", "0.6655101", "0.6572657", "0.63191295", "0.6208426", "0.61246103", "0.6104829", "0.6082704", "0.6061295", "0.5965343", "0.59363145", "0.5935405", "0.5920117", "0.5828375", "0.56998265", "0.56853604", "0.5650619", "0.5605244", "0.55901146", "0.55672294", "0.5556693", "0.5555282", "0.5547202", "0.55394006", "0.5521654", "0.55186623", "0.55123276", "0.55064803", "0.55030227", "0.546013", "0.5457783", "0.54342717", "0.5433218", "0.54302794", "0.54277337", "0.5423399", "0.54101217", "0.5401591", "0.53888357", "0.5388433", "0.5387769", "0.53782195", "0.53738457", "0.536933", "0.5364178", "0.53638977", "0.5353149", "0.53520584", "0.53520584", "0.5351882", "0.53503424", "0.53474855", "0.53406817", "0.53377396", "0.5315637", "0.53137124", "0.5290552", "0.52901757", "0.5289904", "0.52801585", "0.5276293", "0.5275286", "0.5272526", "0.52685523", "0.5268494", "0.5261879", "0.5253642", "0.5248034", "0.52427965", "0.5235399", "0.5233764", "0.52325827", "0.52298594", "0.52128613", "0.5211169", "0.5207076", "0.52041113", "0.52039385", "0.5193859", "0.5190427", "0.5188889", "0.51862043", "0.5185057", "0.51700723", "0.51619875", "0.5156446", "0.51425236", "0.5139481", "0.5137546", "0.5135856", "0.5133061", "0.5123987", "0.51223403", "0.512101", "0.511762", "0.51167053", "0.51142764", "0.51120305", "0.5110817" ]
0.6890959
1
Given the output tensor, the dataset at hand and the current task, masks the former by setting the responses for the other tasks at inf. It is used to obtain the results for the taskil setting.
def mask_classes(outputs: torch.Tensor, dataset: ContinualDataset, k: int) -> None: outputs[:, 0:k * dataset.N_CLASSES_PER_TASK] = -float('inf') outputs[:, (k + 1) * dataset.N_CLASSES_PER_TASK: dataset.N_TASKS * dataset.N_CLASSES_PER_TASK] = -float('inf')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_none(input_array, dummyfactor):\n return(input_array)", "def output_mask(self):\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)", "def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo:\n raise NotImplementedError", "def mask_no_self(task:dict, mask_params:dict, qn_pair, verbose=False):\n\n # unpack quantum numbers\n (qnf,qni) = qn_pair\n (Ji,gi,ni) = qni\n (Jf,gf,nf) = qnf\n\n # calculate mask value\n allow = (qnf!=qni)\n\n return allow", "def my_trace_ignore_this_task(orig, filtered_pids, task):\n return 0 if task == trace_task else 1", "def _compute_masked_targets(self, item_ids: tf.Tensor, training: bool = False) -> MaskingInfo:\n\n labels = tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype)\n non_padded_mask = tf.cast(item_ids != self.padding_idx, labels.dtype)\n rows_ids = tf.range(labels.shape[0], dtype=tf.int64)\n # During training, masks labels to be predicted according to a probability, ensuring that\n # each session has at least one label to predict\n if training:\n # Selects a percentage of items to be masked (selected as labels)\n probability_matrix = tf.cast(\n backend.random_bernoulli(array_ops.shape(labels), p=self.mlm_probability),\n labels.dtype,\n )\n\n mask_labels = probability_matrix * non_padded_mask\n labels = tf.where(\n tf.cast(mask_labels, tf.bool),\n item_ids,\n tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype),\n )\n\n # Set at least one item in the sequence to mask, so that the network\n # can learn something with this session\n one_random_index_by_session = tf.random.categorical(\n tf.math.log(tf.cast(non_padded_mask, tf.float32)), num_samples=1\n )\n indices = tf.concat([tf.expand_dims(rows_ids, 1), one_random_index_by_session], axis=1)\n labels = tf.tensor_scatter_nd_update(\n labels, indices=indices, updates=tf.gather_nd(item_ids, indices)\n )\n mask_labels = tf.cast(labels != self.padding_idx, labels.dtype)\n\n # If a sequence has only masked labels, unmask one of the labels\n sequences_with_only_labels = tf.reduce_sum(mask_labels, axis=1) == tf.reduce_sum(\n non_padded_mask, axis=1\n )\n sampled_labels_to_unmask = tf.random.categorical(\n tf.math.log(tf.cast(mask_labels, tf.float32)), num_samples=1\n )\n\n labels_to_unmask = tf.boolean_mask(sampled_labels_to_unmask, sequences_with_only_labels)\n rows_to_unmask = tf.boolean_mask(rows_ids, sequences_with_only_labels)\n indices = tf.concat([tf.expand_dims(rows_to_unmask, 1), labels_to_unmask], axis=1)\n num_updates, _ = indices.shape.as_list()\n labels = tf.tensor_scatter_nd_update(\n labels, indices, tf.cast(tf.fill(num_updates, self.padding_idx), labels.dtype)\n )\n mask_labels = labels != self.padding_idx\n\n else:\n if self.eval_on_last_item_seq_only:\n last_item_sessions = tf.reduce_sum(non_padded_mask, axis=1) - 1\n\n indices = tf.concat(\n [\n tf.expand_dims(rows_ids, 1),\n tf.cast(tf.expand_dims(last_item_sessions, 1), tf.int64),\n ],\n axis=1,\n )\n labels = tf.tensor_scatter_nd_update(\n labels, indices=indices, updates=tf.gather_nd(item_ids, indices)\n )\n mask_labels = labels != self.padding_idx\n else:\n masking_info = self.predict_all(item_ids)\n mask_labels, labels = masking_info.schema, masking_info.targets\n\n return MaskingInfo(mask_labels, labels)", "def get_output_mask_at(self, node_index):\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)", "def mask_only_self(task:dict, mask_params:dict, qn_pair, verbose=False):\n\n # unpack quantum numbers\n (qnf,qni) = qn_pair\n (Ji,gi,ni) = qni\n (Jf,gf,nf) = qnf\n\n # calculate mask value\n allow = (qnf==qni)\n\n return allow", "def predict_mask(logit, EMPTY_THRESHOLD, MASK_THRESHOLD):\n #pred mask 0-1 pixel-wise\n #n = logit.shape[0]\n IMG_SIZE = logit.shape[-1] #256\n #EMPTY_THRESHOLD = 100.0*(IMG_SIZE/128.0)**2 #count of predicted mask pixles<threshold, predict as empty mask image\n #MASK_THRESHOLD = 0.22\n #logit = torch.sigmoid(torch.from_numpy(logit)).view(n, -1)\n #pred = (logit>MASK_THRESHOLD).long()\n #pred[pred.sum(dim=1) < EMPTY_THRESHOLD, ] = 0 #bug here, found it, the bug is input shape is (256, 256) not (16,256,256)\n logit = sigmoid(logit)#.reshape(n, -1)\n pred = (logit>MASK_THRESHOLD).astype(np.int)\n if pred.sum() < EMPTY_THRESHOLD:\n return np.zeros(pred.shape).astype(np.int)\n else:\n return pred", "def impute_target_if_unobserved(\n self,\n F,\n output,\n scale,\n current_target,\n current_observed_indicator,\n is_pad,\n ) -> Tensor:\n distr_args = self.proj_distr_args(output)\n distr = self.distr_output.distribution(distr_args, scale=scale)\n\n with autograd.pause():\n sample = distr.sample(\n num_samples=self.num_imputation_samples, dtype=self.dtype\n ).mean(axis=0)\n\n target_value = mx_switch(\n F,\n (current_observed_indicator, current_target),\n (is_pad, F.zeros_like(sample)),\n sample,\n )\n return target_value", "def compute_mask(self, inputs, mask=None):\n return None", "def predict_entire_mask(dataset, model, batch_size=32, tta=False):\n loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True)\n\n weighting = torch.from_numpy(get_tile_weighting(dataset.tile_size))\n weighting_cuda = weighting.clone().cuda().unsqueeze(0)\n weighting = weighting.cuda().half()\n\n global_pred = torch.zeros(\n (dataset.orig_size[0], dataset.orig_size[1]),\n dtype=torch.half, device=\"cuda\"\n )\n global_counter = torch.zeros(\n (dataset.orig_size[0], dataset.orig_size[1]),\n dtype=torch.half, device=\"cuda\"\n )\n\n model.eval()\n with torch.no_grad():\n for img, pos in loader:\n img = img.to(\"cuda\")\n _, _, h, w = img.shape\n\n if model.num_classes == 2:\n pred = model(img)[:, 0].view(-1, 1, h, w).sigmoid().detach()\n else:\n pred = model(img).view(-1, 1, h, w).sigmoid().detach()\n\n if tta:\n for f in FLIPS:\n pred_flip = model(torch.flip(img, f))\n if model.num_classes == 2:\n pred_flip = pred_flip[:, 0]\n\n pred_flip = torch.flip(pred_flip, f).view(-1, 1, h, w).sigmoid().detach()\n pred += pred_flip\n pred = torch.div(pred, len(FLIPS) + 1)\n\n pred = torch.nn.functional.interpolate(\n pred, (dataset.tile_size, dataset.tile_size), mode='area'\n ).view(-1, dataset.tile_size, dataset.tile_size)\n\n pred = (pred * weighting_cuda).half()\n\n for tile_idx, (x0, x1, y0, y1) in enumerate(pos):\n global_pred[x0: x1, y0: y1] += pred[tile_idx]\n global_counter[x0: x1, y0: y1] += weighting\n\n for i in range(len(global_pred)):\n global_pred[i] = torch.div(global_pred[i], global_counter[i])\n\n return global_pred", "def out_do(x):\n if output_dropout:\n return out_do_mask * x\n else:\n return x", "def out_do(x):\n if output_dropout:\n return out_do_mask * x\n else:\n return x", "def out_do(x):\n if output_dropout:\n return out_do_mask * x\n else:\n return x", "def out_do(x):\n if output_dropout:\n return out_do_mask * x\n else:\n return x", "def omit_nans(self, data, label):\n maskarray=np.full(data.shape[0], True)\n masker=np.unique(np.argwhere(np.isnan(data))[:,0])\n maskarray[masker]=False\n traindata=data[maskarray,:,:,:]\n trainlabel=label[maskarray]\n return traindata, trainlabel", "def _fetch_task_inputs(self, task):\n return None", "def _mask(self):\n if self.__mask is None:\n # need this to be *exactly* the numpy boolean False\n return nomask\n return self.__mask", "def test_reduction_none(self):\n predict = torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)\n target = self._get_default_target_tensor()\n mask = self._get_default_mask_tensor()\n\n loss_weigths = [1.0, 0.5]\n ce_crit = nn.CrossEntropyLoss(reduction=\"none\")\n mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths, reduction=\"none\")\n\n # expected result\n ce_loss = ce_crit(predict, target)\n _mask = mask.view_as(ce_loss)\n mask_loss = ce_loss * _mask\n expected_loss = ce_loss * loss_weigths[0] + mask_loss * loss_weigths[1]\n\n # mask ce loss result\n loss = mask_ce_crit(predict, target, mask)\n\n self._assertion_torch_values(expected_loss, loss)\n self.assertEqual(target.size(), loss.size())", "def get_output_for(self, inputs, **kwargs):\n # Retrieve the layer input\n input = inputs[0]\n # Retrieve the mask when it is supplied\n mask = None\n hid_init = None\n if self.mask_incoming_index > 0:\n mask = inputs[self.mask_incoming_index]\n if self.hid_init_incoming_index > 0:\n hid_init = inputs[self.hid_init_incoming_index]\n\n # Input should be provided as (n_batch, n_time_steps, n_features)\n # but scan requires the iterable dimension to be first\n # So, we need to dimshuffle to (n_time_steps, n_batch, n_features)\n input = input.dimshuffle(1, 0, *range(2, input.ndim))\n seq_len, num_batch = input.shape[0], input.shape[1]\n\n # When we are not precomputing the input, we also need to pass the\n # input-to-hidden parameters to step\n non_seqs = L.get_all_params(self.input_to_hidden)\n\n # Create single recurrent computation step function\n def step(input_n, hid_previous, *args):\n hid_pre = L.get_output(\n self.input_to_hidden,{self.input_to_hidden_input : input_n,\n self.input_to_hidden_hidden : hid_previous}, **kwargs)\n\n # Clip gradients\n if self.grad_clipping:\n hid_pre = theano.gradient.grad_clip(\n hid_pre, -self.grad_clipping, self.grad_clipping)\n\n return hid_pre\n\n def step_masked(input_n, mask_n, hid_previous, *args):\n # Skip over any input with mask 0 by copying the previous\n # hidden state; proceed normally for any input with mask 1.\n hid = step(input_n, hid_previous, *args)\n hid_out = T.switch(mask_n, hid, hid_previous)\n return [hid_out]\n\n if mask is not None:\n mask = mask.dimshuffle(1, 0, 'x')\n sequences = [input, mask]\n step_fun = step_masked\n else:\n sequences = input\n step_fun = step\n\n if not isinstance(self.hid_init, L.Layer):\n # The code below simply repeats self.hid_init num_batch times in\n # its first dimension. Turns out using a dot product and a\n # dimshuffle is faster than T.repeat.\n dot_dims = (list(range(1, self.hid_init.ndim - 1)) +\n [0, self.hid_init.ndim - 1])\n hid_init = T.dot(T.ones((num_batch, 1)),\n self.hid_init.dimshuffle(dot_dims))\n\n if self.unroll_scan:\n # Retrieve the dimensionality of the incoming layer\n input_shape = self.input_shapes[0]\n # Explicitly unroll the recurrence instead of using scan\n hid_out = unroll_scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[hid_init],\n go_backwards=self.backwards,\n non_sequences=non_seqs,\n n_steps=input_shape[1])[0]\n else:\n # Scan op iterates over first dimension of input and repeatedly\n # applies the step function\n hid_out = theano.scan(\n fn=step_fun,\n sequences=sequences,\n go_backwards=self.backwards,\n outputs_info=[hid_init],\n non_sequences=non_seqs,\n truncate_gradient=self.gradient_steps,\n strict=True)[0]\n\n # When it is requested that we only return the final sequence step,\n # we need to slice it out immediately after scan is applied\n if self.only_return_final:\n hid_out = hid_out[-1]\n else:\n # dimshuffle back to (n_batch, n_time_steps, n_features))\n hid_out = hid_out.dimshuffle(1, 0, *range(2, hid_out.ndim))\n\n # if scan is backward reverse the output\n if self.backwards:\n hid_out = hid_out[:, ::-1]\n\n return hid_out", "def applymask(self,mask):\n self.spec[mask==0]=np.nan", "def pred_from_net_output(self, net_output: ALL_NET_OUTPUT) -> torch.Tensor:\n raise NotImplementedError", "def attention_mask_future(nd, ns, dtype=tf.float32):\n i = tf.range(nd)[:, None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n out = tf.cast(m, dtype)\n return out", "def identity_mask_propagation(nx_node, nx_graph):\n input_masks = get_input_masks(nx_node, nx_graph)\n assert len(input_masks) == 1\n nx_node['input_masks'] = input_masks\n nx_node['output_mask'] = input_masks[0]", "def identity_mask_propagation(nx_node, nx_graph):\n input_masks = get_input_masks(nx_node, nx_graph)\n assert len(input_masks) == 1\n nx_node[\"input_masks\"] = input_masks\n nx_node[\"output_mask\"] = input_masks[0]", "def switch_to_no_feedforward_inputs(self):\n\n self.h_e=np.ones_like(self.inputs.noise_flat.T)*self.feed_forward_off_value\n self.h=np.vstack([self.h_e,self.h_i])", "def compute_acc(task_id, data, mnet, hnet, hhnet, device, config, shared,\n split_type='test', return_dataset=False, return_entropies=False,\n return_confidence=False, return_agreement=False,\n return_pred_labels=False, return_labels=False,\n return_samples=False, deterministic_sampling=False,\n in_samples=None, out_samples=None, num_w_samples=None,\n w_samples=None):\n # FIXME The code is almost a perfect copy from the original function.\n\n assert in_samples is not None or split_type in ['test', 'val', 'train']\n assert out_samples is None or in_samples is not None\n\n generator = None\n if deterministic_sampling:\n generator = torch.Generator()#device=device)\n # Note, PyTorch recommends using large random seeds:\n # https://tinyurl.com/yx7fwrry\n generator.manual_seed(2147483647)\n\n return_vals = Namespace()\n\n allowed_outputs = pmutils.out_units_of_task(config, data, task_id,\n shared.num_trained)\n\n ST = shared.softmax_temp[task_id]\n if not config.calibrate_temp:\n assert ST == 1.\n\n if in_samples is not None:\n X = in_samples\n T = out_samples\n elif split_type == 'train':\n X = data.get_train_inputs()\n T = data.get_train_outputs()\n elif split_type == 'test' or data.num_val_samples == 0:\n X = data.get_test_inputs()\n T = data.get_test_outputs()\n else:\n X = data.get_val_inputs()\n T = data.get_val_outputs()\n\n num_samples = X.shape[0]\n\n if T is not None:\n T = pmutils.fit_targets_to_softmax(config, shared, device, data,\n task_id, T)\n\n if return_dataset:\n return_vals.inputs = X\n return_vals.targets = T\n\n labels = None\n if T is not None:\n labels = np.argmax(T, axis=1)\n if return_labels:\n return_vals.labels = labels\n\n X = data.input_to_torch_tensor(X, device)\n #if T is not None:\n # T = data.output_to_torch_tensor(T, device)\n\n hnet_theta = None\n return_vals.theta = None\n if hhnet is not None:\n assert hnet is not None\n hnet_theta = hhnet.forward(cond_id=task_id)\n return_vals.theta = hnet_theta\n elif hnet is not None:\n return_vals.theta = hnet.unconditional_params\n\n # There is no weight sampling without an implicit hypernetwork.\n if w_samples is not None:\n num_w_samples = len(w_samples)\n elif num_w_samples is None:\n num_w_samples = 1 if hnet is None else config.val_sample_size\n else:\n if hnet is None and num_w_samples > 1:\n warn('Cannot draw multiple weight samples for deterministic ' +\n 'network')\n num_w_samples = 1\n\n if hasattr(config, 'non_growing_sf_cl3') and config.cl_scenario == 3 \\\n and config.non_growing_sf_cl3:\n softmax_width = config.num_tasks * data.num_classes\n elif config.cl_scenario == 3 and not config.split_head_cl3:\n softmax_width = len(allowed_outputs)\n else:\n softmax_width = data.num_classes\n softmax_outputs = np.empty((num_w_samples, X.shape[0], softmax_width))\n\n if return_samples:\n return_vals.samples = None\n\n # FIXME Note, that a continually learned hypernet (whose weights come from a\n # hyper-hypernet) would in principle also require correct argument passing,\n # e.g., to choose the correct set of batch statistics.\n kwargs = pmutils.mnet_kwargs(config, task_id, mnet)\n\n for j in range(num_w_samples):\n weights = None\n if w_samples is not None:\n weights = w_samples[j]\n elif hnet is not None:\n z = torch.normal(torch.zeros(1, shared.noise_dim),\n config.latent_std, generator=generator).to(device)\n weights = hnet.forward(uncond_input=z, weights=hnet_theta)\n\n if weights is not None and return_samples:\n if j == 0:\n return_vals.samples = np.empty((num_w_samples,\n hnet.num_outputs))\n return_vals.samples[j, :] = torch.cat([p.detach().flatten() \\\n for p in weights]).cpu().numpy()\n\n\n curr_bs = config.val_batch_size\n n_processed = 0\n\n while n_processed < num_samples:\n if n_processed + curr_bs > num_samples:\n curr_bs = num_samples - n_processed\n n_processed += curr_bs\n\n sind = n_processed - curr_bs\n eind = n_processed\n\n Y = mnet.forward(X[sind:eind, :], weights=weights, **kwargs)\n if allowed_outputs is not None:\n Y = Y[:, allowed_outputs]\n\n softmax_outputs[j, sind:eind, :] = F.softmax(Y / ST, dim=1). \\\n detach().cpu().numpy()\n\n # Predictive distribution per sample.\n pred_dists = softmax_outputs.mean(axis=0)\n\n pred_labels = np.argmax(pred_dists, axis=1)\n # Note, that for CL3 (without split heads) `labels` are already absolute,\n # not relative to the head (see post-processing of targets `T` above).\n if labels is not None:\n accuracy = 100. * np.sum(pred_labels == labels) / num_samples\n else:\n accuracy = None\n\n if return_pred_labels:\n assert pred_labels.size == X.shape[0]\n return_vals.pred_labels = pred_labels\n\n if return_entropies:\n # We use the \"maximum\" trick to improve numerical stability.\n return_vals.entropies = - np.sum(pred_dists * \\\n np.log(np.maximum(pred_dists, 1e-5)),\n axis=1)\n # return_vals.entropies = - np.sum(pred_dists * np.log(pred_dists),\n # axis=1)\n assert return_vals.entropies.size == X.shape[0]\n\n # Normalize by maximum entropy.\n max_ent = - np.log(1.0 / data.num_classes)\n return_vals.entropies /= max_ent\n\n if return_confidence:\n return_vals.confidence = np.max(pred_dists, axis=1)\n assert return_vals.confidence.size == X.shape[0]\n\n if return_agreement:\n return_vals.agreement = softmax_outputs.std(axis=0).mean(axis=1)\n assert return_vals.agreement.size == X.shape[0]\n\n return accuracy, return_vals", "def _preprocess_input(self, dataset):\n masker = self.masker or dataset.masker\n\n mask_img = masker.mask_img or masker.labels_img\n if isinstance(mask_img, str):\n mask_img = nib.load(mask_img)\n\n # Ensure that protected values are not included among _required_inputs\n assert \"aggressive_mask\" not in self._required_inputs.keys(), \"This is a protected name.\"\n\n if \"aggressive_mask\" in self.inputs_.keys():\n LGR.warning(\"Removing existing 'aggressive_mask' from Estimator.\")\n self.inputs_.pop(\"aggressive_mask\")\n\n # A dictionary to collect masked image data, to be further reduced by the aggressive mask.\n temp_image_inputs = {}\n\n for name, (type_, _) in self._required_inputs.items():\n if type_ == \"image\":\n # If no resampling is requested, check if resampling is required\n if not self.resample:\n check_imgs = {img: nib.load(img) for img in self.inputs_[name]}\n _check_same_fov(**check_imgs, reference_masker=mask_img, raise_error=True)\n imgs = list(check_imgs.values())\n else:\n # resampling will only occur if shape/affines are different\n # making this harmless if all img shapes/affines are the same as the reference\n imgs = [\n resample_to_img(nib.load(img), mask_img, **self._resample_kwargs)\n for img in self.inputs_[name]\n ]\n\n # input to NiFtiLabelsMasker must be 4d\n img4d = concat_imgs(imgs, ensure_ndim=4)\n\n # Mask required input images using either the dataset's mask or the estimator's.\n temp_arr = masker.transform(img4d)\n\n # An intermediate step to mask out bad voxels.\n # Can be dropped once PyMARE is able to handle masked arrays or missing data.\n nonzero_voxels_bool = np.all(temp_arr != 0, axis=0)\n nonnan_voxels_bool = np.all(~np.isnan(temp_arr), axis=0)\n good_voxels_bool = np.logical_and(nonzero_voxels_bool, nonnan_voxels_bool)\n\n data = masker.transform(img4d)\n\n temp_image_inputs[name] = data\n if \"aggressive_mask\" not in self.inputs_.keys():\n self.inputs_[\"aggressive_mask\"] = good_voxels_bool\n else:\n # Remove any voxels that are bad in any image-based inputs\n self.inputs_[\"aggressive_mask\"] = np.logical_or(\n self.inputs_[\"aggressive_mask\"],\n good_voxels_bool,\n )\n\n # Further reduce image-based inputs to remove \"bad\" voxels\n # (voxels with zeros or NaNs in any studies)\n if \"aggressive_mask\" in self.inputs_.keys():\n n_bad_voxels = (\n self.inputs_[\"aggressive_mask\"].size - self.inputs_[\"aggressive_mask\"].sum()\n )\n if n_bad_voxels:\n LGR.warning(\n f\"Masking out {n_bad_voxels} additional voxels. \"\n \"The updated masker is available in the Estimator.masker attribute.\"\n )\n\n for name, raw_masked_data in temp_image_inputs.items():\n self.inputs_[name] = raw_masked_data[:, self.inputs_[\"aggressive_mask\"]]", "def mask_all_but_targets(rec, include_incorrect=True):\n\n newrec = rec.copy()\n newrec['resp'] = newrec['resp'].rasterize()\n #newrec = normalize_epoch_lengths(newrec, resp_sig='resp', epoch_regex='TARGET',\n # include_incorrect=include_incorrect)\n if 'stim' in newrec.signals.keys():\n newrec['stim'] = newrec['stim'].rasterize()\n\n #newrec = newrec.or_mask(['TARGET'])\n #newrec = newrec.and_mask(['PASSIVE_EXPERIMENT', 'TARGET'])\n #newrec = newrec.and_mask(['REFERENCE','TARGET'])\n newrec = newrec.and_mask(['TARGET'])\n\n if not include_incorrect:\n newrec = mask_incorrect(newrec)\n\n # svd attempt to kludge this masking to work with a lot of code that assumes all relevant epochs are\n # called \"REFERENCE\"\n #import pdb;pdb.set_trace()\n for k in newrec.signals.keys():\n newrec[k].epochs.name = newrec[k].epochs.name.str.replace(\"TARGET\", \"REFERENCE\")\n return newrec", "def get_task(option_set):\n return option_set & TASK_MASK", "def forward(self,\n pos_outputs: torch.Tensor,\n neg_outputs: torch.Tensor,\n mask: torch.Tensor = None) -> torch.Tensor:\n # Calculate loss by functional method\n loss = adaptive_hinge_loss(pos_outputs, neg_outputs, self.margin)\n\n # Apply masking and take reduction on loss\n return self.reduction(apply_mask(loss, mask)) if mask is not None \\\n else self.reduction(loss)", "def predict_entire_mask_downscaled(dataset, model, batch_size=32, tta=False):\n loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True)\n\n weighting = torch.from_numpy(get_tile_weighting(dataset.tile_size))\n weighting_cuda = weighting.clone().cuda().unsqueeze(0)\n weighting = weighting.cuda().half()\n\n global_pred = torch.zeros(\n (dataset.orig_size[0], dataset.orig_size[1]),\n dtype=torch.half, device=\"cuda\"\n )\n global_counter = torch.zeros(\n (dataset.orig_size[0], dataset.orig_size[1]),\n dtype=torch.half, device=\"cuda\"\n )\n\n model.eval()\n with torch.no_grad():\n for img, pos in loader:\n img = img.to(\"cuda\")\n _, _, h, w = img.shape\n\n if model.num_classes == 2:\n pred = model(img)[:, 0].view(-1, h, w).sigmoid().detach()\n else:\n pred = model(img).view(-1, h, w).sigmoid().detach()\n\n if tta:\n for f in FLIPS:\n pred_flip = model(torch.flip(img, f))\n if model.num_classes == 2:\n pred_flip = pred_flip[:, 0]\n pred_flip = torch.flip(pred_flip, f).view(-1, h, w).sigmoid().detach()\n pred += pred_flip\n pred = torch.div(pred, len(FLIPS) + 1)\n\n pred = (pred * weighting_cuda).half()\n\n for tile_idx, (x0, x1, y0, y1) in enumerate(pos):\n global_pred[x0: x1, y0: y1] += pred[tile_idx]\n global_counter[x0: x1, y0: y1] += weighting\n\n for i in range(len(global_pred)):\n global_pred[i] = torch.div(global_pred[i], global_counter[i])\n\n return global_pred", "def forward(self,\n pos_outputs: torch.Tensor,\n neg_outputs: torch.Tensor,\n mask: torch.Tensor = None) -> torch.Tensor:\n # Calculate loss by functional method\n loss = hinge_loss(pos_outputs, neg_outputs, self.margin)\n\n # Apply masking and take reduction on loss\n return self.reduction(apply_mask(loss, mask)) if mask is not None \\\n else self.reduction(loss)", "def mask(self):", "def compute_forgetting_metric(self, task_results, task_steps, task_id, num_tasks, num_cycles, return_scale):\n per_run_forgetting_per_subsequent = {id: {} for id in range(num_tasks)} # Inner dict maps cycle to total\n \n for run_id, task_result in enumerate(task_results):\n xs = np.array([t[0] for t in task_result])\n ys = np.array([t[1] for t in task_result]) * return_scale\n \n # Select only the rewards from the region up to and including the training of the given task\n task_rewards = self.get_rewards_for_region(xs, ys, [None, (task_id+1) * task_steps])\n max_task_value = task_rewards.max()\n \n for cycle_id in range(num_cycles):\n for subsequent_task_id in range(num_tasks):\n # It's not really \"catastrophic forgetting\" if we haven't seen the task yet, so skip the early tasks\n if cycle_id == 0 and subsequent_task_id <= task_id:\n continue\n \n offset = cycle_id * num_tasks\n \n if USE_ISOLATED_FORGETTING:\n task_rewards = self.get_rewards_for_region(xs, ys, [None, (subsequent_task_id + offset) * task_steps])\n max_task_value = task_rewards[-1]\n \n subsequent_region = [(subsequent_task_id + offset) * task_steps,\n (subsequent_task_id + offset + 1) * task_steps]\n subsequent_task_rewards = self.get_rewards_for_region(xs, ys, subsequent_region)\n last_reward = subsequent_task_rewards[-1]\n forgetting = max_task_value - last_reward\n \n if cycle_id not in per_run_forgetting_per_subsequent[subsequent_task_id]:\n per_run_forgetting_per_subsequent[subsequent_task_id][cycle_id] = []\n per_run_forgetting_per_subsequent[subsequent_task_id][cycle_id].append(forgetting)\n \n return per_run_forgetting_per_subsequent", "def _get_targets_single(self, mask_preds: Tensor,\n gt_instances: InstanceData,\n positive_info: InstanceData):\n gt_bboxes = gt_instances.bboxes\n device = gt_bboxes.device\n gt_masks = gt_instances.masks.to_tensor(\n dtype=torch.bool, device=device).float()\n\n # process with mask targets\n pos_assigned_gt_inds = positive_info.get('pos_assigned_gt_inds')\n scores = positive_info.get('scores')\n centernesses = positive_info.get('centernesses')\n num_pos = pos_assigned_gt_inds.size(0)\n\n if gt_masks.size(0) == 0 or num_pos == 0:\n return mask_preds, None, 0\n # Since we're producing (near) full image masks,\n # it'd take too much vram to backprop on every single mask.\n # Thus we select only a subset.\n if (self.max_masks_to_train != -1) and \\\n (num_pos > self.max_masks_to_train):\n perm = torch.randperm(num_pos)\n select = perm[:self.max_masks_to_train]\n mask_preds = mask_preds[select]\n pos_assigned_gt_inds = pos_assigned_gt_inds[select]\n num_pos = self.max_masks_to_train\n elif self.topk_masks_per_img != -1:\n unique_gt_inds = pos_assigned_gt_inds.unique()\n num_inst_per_gt = max(\n int(self.topk_masks_per_img / len(unique_gt_inds)), 1)\n\n keep_mask_preds = []\n keep_pos_assigned_gt_inds = []\n for gt_ind in unique_gt_inds:\n per_inst_pos_inds = (pos_assigned_gt_inds == gt_ind)\n mask_preds_per_inst = mask_preds[per_inst_pos_inds]\n gt_inds_per_inst = pos_assigned_gt_inds[per_inst_pos_inds]\n if sum(per_inst_pos_inds) > num_inst_per_gt:\n per_inst_scores = scores[per_inst_pos_inds].sigmoid().max(\n dim=1)[0]\n per_inst_centerness = centernesses[\n per_inst_pos_inds].sigmoid().reshape(-1, )\n select = (per_inst_scores * per_inst_centerness).topk(\n k=num_inst_per_gt, dim=0)[1]\n mask_preds_per_inst = mask_preds_per_inst[select]\n gt_inds_per_inst = gt_inds_per_inst[select]\n keep_mask_preds.append(mask_preds_per_inst)\n keep_pos_assigned_gt_inds.append(gt_inds_per_inst)\n mask_preds = torch.cat(keep_mask_preds)\n pos_assigned_gt_inds = torch.cat(keep_pos_assigned_gt_inds)\n num_pos = pos_assigned_gt_inds.size(0)\n\n # Follow the origin implement\n start = int(self.mask_out_stride // 2)\n gt_masks = gt_masks[:, start::self.mask_out_stride,\n start::self.mask_out_stride]\n gt_masks = gt_masks.gt(0.5).float()\n pos_mask_targets = gt_masks[pos_assigned_gt_inds]\n\n return (mask_preds, pos_mask_targets, num_pos)", "def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):\n if axes is None:\n axes = tuple(range(2, len(net_output.size())))\n\n shp_x = net_output.shape\n shp_y = gt.shape\n\n with torch.no_grad():\n if len(shp_x) != len(shp_y):\n gt = gt.view((shp_y[0], 1, *shp_y[1:]))\n\n if all([i == j for i, j in zip(net_output.shape, gt.shape)]):\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = gt\n else:\n gt = gt.long()\n y_onehot = torch.zeros(shp_x, device=net_output.device)\n y_onehot.scatter_(1, gt, 1)\n\n tp = net_output * y_onehot\n fp = net_output * (1 - y_onehot)\n fn = (1 - net_output) * y_onehot\n tn = (1 - net_output) * (1 - y_onehot)\n\n if mask is not None:\n with torch.no_grad():\n mask_here = torch.tile(mask, (1, tp.shape[1], *[1 for i in range(2, len(tp.shape))]))\n tp *= mask_here\n fp *= mask_here\n fn *= mask_here\n tn *= mask_here\n # benchmark whether tiling the mask would be faster (torch.tile). It probably is for large batch sizes\n # OK it barely makes a difference but the implementation above is a tiny bit faster + uses less vram\n # (using nnUNetv2_train 998 3d_fullres 0)\n # tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)\n # fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)\n # fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)\n # tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)\n\n if square:\n tp = tp ** 2\n fp = fp ** 2\n fn = fn ** 2\n tn = tn ** 2\n\n if len(axes) > 0:\n tp = tp.sum(dim=axes, keepdim=False)\n fp = fp.sum(dim=axes, keepdim=False)\n fn = fn.sum(dim=axes, keepdim=False)\n tn = tn.sum(dim=axes, keepdim=False)\n\n return tp, fp, fn, tn", "def mask_nodata(img_patch: Union[str, Path], gt_patch: Union[str, Path], nodata_val: int, mask_val: int = 255) -> None:\n image_ds = gdal.Open(str(img_patch), gdalconst.GA_ReadOnly)\n image_arr = image_ds.ReadAsArray()\n nodata_mask = image_arr != nodata_val\n nodata_mask_flat = np.sum(nodata_mask, axis=0) != 0\n\n if nodata_mask_flat.min() == 1:\n image_ds = None\n return\n\n gt_patch_ds = gdal.Open(str(gt_patch), gdalconst.GA_Update)\n gt_patch_arr = gt_patch_ds.ReadAsArray()\n masked_gt_arr = np.where(nodata_mask_flat == 1, gt_patch_arr, mask_val)\n gt_patch_ds.GetRasterBand(1).WriteArray(masked_gt_arr)\n gt_patch_ds = None\n image_ds = None", "def predict_entire_mask_downscaled_tta(dataset, model, batch_size=32):\n\n loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True)\n\n weighting = torch.from_numpy(get_tile_weighting(dataset.tile_size))\n weighting_cuda = weighting.clone().cuda().unsqueeze(0).unsqueeze(0)\n weighting = weighting.cuda().half()\n\n global_pred = torch.zeros(\n (4, dataset.orig_size[0], dataset.orig_size[1]),\n dtype=torch.half, device=\"cuda\"\n )\n global_counter = torch.zeros(\n (1, dataset.orig_size[0], dataset.orig_size[1]),\n dtype=torch.half, device=\"cuda\"\n )\n\n model.eval()\n with torch.no_grad():\n for img, pos in loader:\n img = img.to(\"cuda\")\n _, _, h, w = img.shape\n\n preds = []\n if model.num_classes == 2:\n pred = model(img)[:, 0].view(1, -1, h, w).sigmoid().detach()\n else:\n pred = model(img).view(1, -1, h, w).sigmoid().detach()\n preds.append(pred)\n\n for f in FLIPS:\n pred_flip = model(torch.flip(img, f))\n if model.num_classes == 2:\n pred_flip = pred_flip[:, 0]\n pred_flip = torch.flip(pred_flip, f).view(1, -1, h, w).sigmoid().detach()\n preds.append(pred_flip)\n\n pred = torch.cat(preds, 0)\n pred = (pred * weighting_cuda).half()\n\n for tile_idx, (x0, x1, y0, y1) in enumerate(pos):\n global_pred[:, x0: x1, y0: y1] += pred[:, tile_idx]\n global_counter[:, x0: x1, y0: y1] += weighting\n\n for i in range(global_pred.size(1)):\n global_pred[:, i] = torch.div(global_pred[:, i], global_counter[:, i])\n\n return global_pred", "def mask_out(self, x, lengths):\n params = self.params\n slen, bs = x.size()\n\n # define target words to predict\n if params.sample_alpha == 0:\n pred_mask = np.random.rand(slen, bs) <= params.word_pred\n pred_mask = torch.from_numpy(pred_mask.astype(np.uint8))\n else:\n x_prob = params.mask_scores[x.flatten()]\n n_tgt = math.ceil(params.word_pred * slen * bs)\n tgt_ids = np.random.choice(len(x_prob), n_tgt, replace=False, p=x_prob / x_prob.sum())\n pred_mask = torch.zeros(slen * bs, dtype=torch.uint8)\n pred_mask[tgt_ids] = 1\n pred_mask = pred_mask.view(slen, bs)\n\n # do not predict padding\n pred_mask[x == params.pad_index] = 0\n pred_mask[0] = 0 # TODO: remove\n\n # mask a number of words == 0 [8] (faster with fp16)\n if params.fp16:\n pred_mask = pred_mask.view(-1)\n n1 = pred_mask.sum().item()\n n2 = max(n1 % 8, 8 * (n1 // 8))\n if n2 != n1:\n pred_mask[torch.nonzero(pred_mask).view(-1)[:n1 - n2]] = 0\n pred_mask = pred_mask.view(slen, bs)\n # assert pred_mask.sum().item() % 8 == 0\n\n # generate possible targets / update x input\n pred_mask = pred_mask.bool()\n _x_real = x[pred_mask]\n if len(_x_real) == 0:\n pred_mask[0, 0] = 1\n _x_real = x[pred_mask]\n _x_rand = _x_real.clone().random_(params.n_words)\n _x_mask = _x_real.clone().fill_(params.mask_index)\n probs = torch.multinomial(params.pred_probs, len(_x_real), replacement=True)\n _x = _x_mask * (probs == 0).long() + _x_real * (probs == 1).long() + _x_rand * (probs == 2).long()\n x = x.masked_scatter(pred_mask, _x)\n\n assert 0 <= x.min() <= x.max() < params.n_words\n assert x.size() == (slen, bs)\n assert pred_mask.size() == (slen, bs)\n\n return x, _x_real, pred_mask", "def _task_filter(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")", "def reset_hidden(hidden, mask):\n if len(mask) != 0:\n hidden[:, mask, :] = 0\n \n return hidden", "def test_ignored_output(self):\n process_group = self._get_process_group()\n\n class IgnoredOutput(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n return F.softmax(x, dim=1)\n\n model = DistributedDataParallel(\n IgnoredOutput().float(),\n process_group=process_group,\n )\n\n batch_size = 4\n criterion = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.float)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])\n\n # Run a few iterations where we ignore the output.\n for _ in range(4):\n output = model(input)\n del output\n\n # Run a few iterations where we use the output.\n for _ in range(4):\n output = model(input)\n loss = criterion(output, target)\n loss.backward()", "def _mask(self, map_):\n return None", "def __call__(self, masks: torch.Tensor) -> torch.Tensor:\n\n if self.pre_thresholder is not None:\n masks = self.pre_thresholder(masks)\n\n if not self.batch_wise:\n masks: torch.Tensor = masks.unsqueeze(0)\n modified_masks: torch.Tensor = self.trafo(masks)\n if not self.batch_wise:\n modified_masks = masks.squeeze(0)\n\n if self.post_thresholder is not None:\n modified_masks = self.post_thresholder(modified_masks)\n\n return modified_masks", "def setup_target_mask(self):\n if self.region is not None:\n region_value = self.region.get(\"value\", None)\n if region_value is not None:\n self._obs_file.target_mask = MV2.not_equal(\n self.sftlf[\"target_grid\"], region_value\n )", "def create_mask_affinity_map_tasks(task_queue, aff_input_layer_path, aff_output_layer_path, \n aff_mip, mask_layer_path, mask_mip, output_block_start, output_block_size, grid_size ):\n for z in tqdm(range(grid_size[0]), desc='z loop'):\n for y in range(grid_size[1]):\n for x in range(grid_size[2]):\n output_bounds = Bbox.from_slices(tuple(slice(s+x*b, s+x*b+b)\n for (s, x, b) in zip(output_block_start, (z, y, x), output_block_size)))\n task = MaskAffinitymapTask(\n aff_input_layer_path=aff_input_layer_path,\n aff_output_layer_path=aff_output_layer_path,\n aff_mip=aff_mip, \n mask_layer_path=mask_layer_path,\n mask_mip=mask_mip,\n output_bounds=output_bounds,\n )\n task_queue.insert(task)\n task_queue.wait()\n\n vol = CloudVolume(output_layer_path, mip=aff_mip)\n vol.provenance.processing.append({\n 'method': {\n 'task': 'InferenceTask',\n 'aff_input_layer_path': aff_input_layer_path,\n 'aff_output_layer_path': aff_output_layer_path,\n 'aff_mip': aff_mip,\n 'mask_layer_path': mask_layer_path,\n 'mask_mip': mask_mip,\n 'output_block_start': output_block_start,\n 'output_block_size': output_block_size, \n 'grid_size': grid_size,\n },\n 'by': OPERATOR_CONTACT,\n 'date': strftime('%Y-%m-%d %H:%M %Z'),\n })\n vol.commit_provenance()", "def discard_none_targets(dataset):\r\n indices = []\r\n for (ii,sample) in enumerate(dataset):\r\n target = sample[1]\r\n if target is not None:\r\n indices.append(ii)\r\n\r\n return Subset(dataset,indices)", "def mask_incorrect(rec, include_ITI=True, ITI_sec_to_include=None, **context):\n newrec = rec.copy()\n \n if include_ITI:\n e=newrec['resp'].epochs\n et=e.loc[e.name==\"TRIAL\"]\n for i,r in e.loc[e.name.str.endswith(\"TRIAL\")].iterrows():\n next_trial=et.loc[et.start>r.start,'start'].min()\n if ~np.isnan(next_trial):\n # data exists after current trail\n if ITI_sec_to_include is not None:\n # limit amount of post-target data to include\n if next_trial > e.at[i,'end']+ITI_sec_to_include:\n next_trial = e.at[i,'end']+ITI_sec_to_include\n e.at[i,'end']=next_trial\n #print(i, r.start, next_trial)\n for s in list(newrec.signals.keys()):\n newrec[s].epochs = e\n newrec = newrec.and_mask(['PASSIVE_EXPERIMENT', 'HIT_TRIAL', 'CORRECT_REJECT_TRIAL', 'MISS_TRIAL'])\n\n return newrec", "def _get_mask_for_eval(\n self, mask: torch.BoolTensor, pos_tags: torch.LongTensor\n ) -> torch.Tensor:\n new_mask = mask.detach()\n for label in self._pos_to_ignore:\n label_mask = pos_tags.eq(label)\n new_mask = new_mask & ~label_mask\n return new_mask.bool()", "def adjust_images_task3(t1, t2, flair, mask, max_values=[150, 150, 180]):\n \n assert t1.shape == t2.shape == flair.shape == mask.shape\n t1 = cut_outliers_task3(t1, mask, cut_value=max_values[0])\n t1 = rescale_data(t1, mask, min_value=0, max_value=max_values[0])\n \n t2 = cut_outliers_task3(t2, mask, cut_value=max_values[1])\n t2 = rescale_data(t2, mask, min_value=0, max_value=max_values[1])\n \n flair = cut_outliers_task3(flair, mask, cut_value=max_values[2])\n flair = rescale_data(flair, mask, min_value=0, max_value=max_values[2])\n \n return t1, t2, flair", "def maskNLLLoss(self, decoder_output, target, mask, dev):\n mask = mask.unsqueeze(-1)\n nTotal = mask.sum()\n crossEntropy = -torch.log(torch.gather(decoder_output.squeeze(1), 1, target.unsqueeze(-1)).squeeze(1))\n loss = crossEntropy.masked_select(mask).mean()\n loss = loss.to(dev)\n return loss, nTotal.item()", "def new_task(self):\n self.true_trajectory = self.simulate()\n self.x0 = self.true_trajectory[0]\n self.xT = self.true_trajectory[-1]\n return self.reset()", "def get_pred_mask(test_image, model):\n\n test_image= test_image=transforms.ToPILImage()(test_image)\n #test_image=Image.fromarray(test_image)\n new_mask = model(transforms.ToTensor()(test_image).unsqueeze(1).cuda())[1].transpose(1,2).transpose(2,3).cpu().detach().numpy().squeeze()\n return new_mask", "def _apply_mask_for_mlm(self,\n ds: tf.data.Dataset,\n vocab_size: int):\n\n # Do action only for 15% of tokens (and mask output for others)\n prob_mask_idx = 0.15\n # 10% nothing to do, 10% random word, 80% mask\n # prob_nothing, prob_random_replacement, prob_replace_by_mask \\\n prob_mask_actions = np.array([0.1, 0.1, 0.8])\n prob_mask_actions = prob_mask_actions * prob_mask_idx\n prob_mask_actions = np.append(prob_mask_actions, [1 - sum(prob_mask_actions)]).tolist()\n\n distrib_mask = tfp.distributions.Multinomial(total_count=1,\n probs=prob_mask_actions)\n\n @tf.function\n def apply_mask(x, output):\n inputs, enc_padding_mask = x\n\n input_shape = tf.shape(inputs) # Batch size * Seq Length\n output_shape = tf.shape(output) # Batch size * Seq Length\n\n masks = distrib_mask.sample(input_shape,\n seed=self._seed) # Batch size *Seq Length * Probability for each class (4)\n masks = tf.cast(masks, dtype=tf.int32)\n\n random_tokens = tf.random.uniform(input_shape, minval=len(self._special_tokens), maxval=vocab_size,\n dtype=inputs.dtype, seed=self._seed, name=None)\n\n # Replace with mask\n # One is the mask token id for HuggingFace tokenizers\n inputs_masked = tf.where(tf.math.equal(masks[:, :, 2], 1), inputs, tf.ones(input_shape, dtype=inputs.dtype))\n\n # Replace with random token\n inputs_masked = tf.where(tf.math.equal(masks[:, :, 1], 1), inputs_masked, random_tokens)\n\n output_masked = tf.where(tf.math.equal(masks[:, :, 3], 1),\n tf.zeros(output_shape, dtype=output.dtype),\n output)\n\n return (inputs_masked, enc_padding_mask), output_masked\n\n return ds.map(map_func=apply_mask)", "def remove_nans(tensor, replace_with=0.):\n mask = tensor == tensor\n output = torch.full_like(tensor, replace_with)\n output[mask] = tensor[mask]\n return output", "def forward(self, pred, target):\n if self.mask:\n target, mask = target\n # todo: loss with mask\n else:\n # todo: loss w/o mask\n pass\n return loss", "def _reset_mask(self, reset_to=False):\n self.data.mask = reset_to", "def fn(mask, rem):\n if not mask: return 0 # done \n ans = inf \n for i, x in enumerate(tasks): \n if mask & (1<<i): \n if x <= rem: ans = min(ans, fn(mask ^ (1<<i), rem - x))\n else: ans = min(ans, 1 + fn(mask ^ (1<<i), sessionTime - x))\n return ans", "def shoot_infs(inp_tensor):\n mask_inf = torch.isinf(inp_tensor)\n ind_inf = torch.nonzero(mask_inf)\n if len(ind_inf) > 0:\n for ind in ind_inf:\n if len(ind) == 2:\n inp_tensor[ind[0], ind[1]] = 0\n elif len(ind) == 1:\n inp_tensor[ind[0]] = 0\n m = torch.max(inp_tensor)\n for ind in ind_inf:\n if len(ind) == 2:\n inp_tensor[ind[0], ind[1]] = m\n elif len(ind) == 1:\n inp_tensor[ind[0]] = m\n return inp_tensor", "def set_task_not_started(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 2)\n\n # Refresh the table\n self.write_tasks_table()", "def zero_out_nans(module, grad_input, grad_output):\n for grad in grad_input:\n grad[grad != grad] = 0 # technically shouldn't modify inputs", "def _do_power_action(cls, task):\n if task is None:\n return\n result = vc_soap_util.get_task_state(task)\n return result", "def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, list):\n raise ValueError('`mask` should be a list.')\n if not isinstance(inputs, list):\n raise ValueError('`inputs` should be a list.')\n if len(mask) != len(inputs):\n raise ValueError('The lists `inputs` and `mask` '\n 'should have the same length.')\n if mask[0] is not None:\n raise ValueError('Attention mask should be None.')\n if mask[1] is None:\n return None\n return K.any(mask[1], axis=-1)", "def reset_task(self, task_index: Optional[int] = None):\n if task_index is None:\n task_index = np.random.permutation(self.graph.num_graph)[0]\n else:\n task_index = task_index % self.graph.num_graph\n\n self.task_index = task_index\n self.graph.set_graph_index(task_index)\n self.num_subtasks = len(self.graph.subtask_id_list)\n self.subtask_reward = self.graph.subtask_reward\n self.subtask_id_list = self.graph.subtask_id_list\n self.game_length = self._default_game_len\n #self.game_length = int(np.random.uniform(0.8, 1.2) * self._default_game_len)\n\n # Reset map (96% of time)\n self.map.reset(subtask_id_list=self.subtask_id_list, reset_map=True)\n return self.task", "def finetune(ft_ds, model, task, epochs=10, eval_ds=None):\n\n print('==========FINETUNE==========')\n\n # Filter out undesired examples with excluded_label\n ds = ft_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.finetune_preprocess)\n ds = ds.shuffle(1000)\n ds = ds.batch(FLAGS.finetune_bs)\n\n # loss, metrics, optimizers\n train_loss= tf.keras.metrics.Mean(name='train_loss')\n train_sup_acc = tf.keras.metrics.Accuracy(name='train_supervised_accuracy')\n criterion_sup = tf.nn.softmax_cross_entropy_with_logits \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) \n for epoch in range(epochs): \n train_loss.reset_states()\n train_sup_acc.reset_states()\n for x in ds:\n with tf.GradientTape() as tape:\n image = x['image']\n labels = x[task['name']]\n out = model(image, mode='supervised', sup_layers=1, training=True)\n # print(tf.math.argmax(out, axis=-1))\n metrics.update_supervised_accuracy(train_sup_acc, labels, out)\n loss = criterion_sup(tf.one_hot(labels, depth=task['num_classes']), out)\n loss = tf.math.reduce_mean(loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(\n filter(lambda gv: gv[0] is not None, zip(gradients, model.trainable_variables))\n )\n train_loss.update_state(loss)\n print('supervised loss')\n print(train_loss.result())\n print('supervised accuracy')\n print(train_sup_acc.result())\n\n # Evaluate results on eval_ds if possible\n if eval_ds: \n evaluate(eval_ds, model, task)", "def update(self, # see `thelper.typedefs.IterCallbackParams` for more info\n task, # type: thelper.tasks.utils.Task\n input, # type: thelper.typedefs.InputType\n pred, # type: thelper.typedefs.DetectionPredictionType\n target, # type: thelper.typedefs.DetectionTargetType\n sample, # type: thelper.typedefs.SampleType\n loss, # type: Optional[float]\n iter_idx, # type: int\n max_iters, # type: int\n epoch_idx, # type: int\n max_epochs, # type: int\n output_path, # type: AnyStr\n **kwargs, # type: Any\n ): # type: (...) -> None\n assert len(kwargs) == 0, \"unexpected extra arguments present in update call\"\n assert isinstance(task, thelper.tasks.Detection), \"detect report only impl for detection tasks\"\n assert iter_idx is not None and max_iters is not None and iter_idx < max_iters, \\\n \"bad iteration indices given to update function\"\n if self.bbox is None or self.bbox.size != max_iters:\n self.bbox = np.asarray([None] * max_iters)\n self.true = np.asarray([None] * max_iters)\n self.meta = {key: np.asarray([None] * max_iters) for key in self.log_keys}\n if task.class_names != self.class_names:\n self.class_names = task.class_names\n if target is None or len(target) == 0 or all(len(t) == 0 for t in target):\n target = [None] * len(pred) # simplify unpacking during report generation\n else:\n assert len(pred) == len(target), \"prediction/target bounding boxes list batch size mismatch\"\n for gt in target:\n assert all(isinstance(bbox, BoundingBox) for bbox in gt), \\\n \"detect logger only supports 2D lists of bounding box targets\"\n for det in pred:\n assert all(isinstance(bbox, BoundingBox) for bbox in det), \\\n \"detect logger only supports 2D lists of bounding box predictions\"\n self.bbox[iter_idx] = pred\n self.true[iter_idx] = target\n for meta_key in self.log_keys:\n assert meta_key in sample, f\"could not extract sample field with key {repr(meta_key)}\"\n val = sample[meta_key]\n assert isinstance(val, (list, np.ndarray, torch.Tensor)), f\"field {repr(meta_key)} should be batched\"\n self.meta[meta_key][iter_idx] = val if isinstance(val, list) else val.tolist()", "def _nodata_mask(self):\n if self.nodata_value is None:\n return np.ones_like(self.array, dtype=np.bool)\n return self.array != self.nodata_value", "def forward_train(self, imgs, label, token_ids=None, segment_ids=None, input_mask=None, ans_ids=None, ans_mask=None, **kwargs):\n # (batch_size, num_clips*num_crops, channel, num_segments, h, w) -> (batch_size*num_clips*num_crops, channel, num_segments, h, w)\n imgs = imgs.reshape((-1, ) + imgs.shape[2:]) \n if self.from_scratch:\n imgs = imgs / 255.0\n B_text = token_ids.shape[0]\n # text reshape: (batch_size, num_candidates, seq_length) -> (batch_size * num_candidates, seq_length)\n token_ids = token_ids.reshape((-1, ) + token_ids.shape[2:])\n segment_ids = segment_ids.reshape((-1, ) + segment_ids.shape[2:])\n input_mask = input_mask.reshape((-1, ) + input_mask.shape[2:])\n losses = dict()\n visual_token = self.extract_visual_feat(imgs) # b, d, T, h, w\n B, D, T, H, W = visual_token.shape\n if B_text != B:\n visual_token = visual_token.view(B_text, -1, D, T, H, W)\n visual_token = visual_token.mean(dim=1)\n \n # text feature #\n text_out_with_mask = self.text_backbone(token_ids, input_mask)\n text_out_last_hidden_state = text_out_with_mask['last_hidden_state']\n\n # contrastive type finetuning retrieval #\n if self.task == 'retrieval':\n # text_only_out = self.text_backbone(token_ids, input_mask)\n visual_emb, text_emb = self.ssl_head(visual_token, text_out_last_hidden_state, input_mask, token_ids)\n nce_loss = self.loss_func(visual_emb, text_emb)\n losses['retrieval_nce_loss'] = nce_loss \n elif self.task == 'video_qa' or self.task == 'FIB':\n B, D, T, H, W = visual_token.shape\n visual_token = visual_token.view(B, D, T, -1).permute(0, 2, 3, 1)\n if hasattr(self.qa_head, 'num_labels'):\n num_choices = self.qa_head.num_labels\n visual_token_all = visual_token\n else:\n num_choices = int(token_ids.shape[0] / B)\n visual_token_all = visual_token.unsqueeze(1).expand(-1, num_choices, -1, -1, -1).flatten(0,1)\n\n output = self.multimodal_backbone(visual_token=visual_token_all, text_input_mask=input_mask, text_input_embeds=text_out_last_hidden_state)\n \n if self.answer_mask:\n mask_idx = torch.where(token_ids == 103)\n itm_output = output['t_last_hidden_state'][mask_idx]\n elif self.answer_cls:\n if 'cls_last_hidden_state' in output:\n itm_output = output['cls_last_hidden_state'].squeeze()\n else:\n itm_output = output['t_last_hidden_state'][:, 0]\n if self.itm_head is not None:\n itm_output = self.itm_head(itm_output)\n\n else:\n all_cls_emb = output['last_hidden_state'][:, 0]\n itm_output = self.itm_head(all_cls_emb)\n \n if self.qa_head is not None:\n final_output = self.qa_head(itm_output).view(-1, num_choices)\n final_label = label\n else:\n final_output = itm_output[:, 1]\n final_label = label\n\n\n qa_loss = self.loss_func(final_output, final_label.view(-1))\n losses['qa_loss'] = qa_loss\n\n\n\n return losses", "def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask", "def __call__(self, in_state, labels, label_mask, predict=False):\n t_ = tf.matmul(in_state, self._W_sftm) + self._B_sftm # t_: [batch_size * class_num]\n #t_ = tf.expand_dims(label_mask, 1) * t_\n t_sftm_ = self._activation(t_)\n if not predict:\n #labels_1hot = tf.one_hot(labels, self._class_num, 1.0, 0.0)\n loss = self._loss_f(t_, labels)\n loss = loss * label_mask\n return tf.argmax(t_sftm_, 1), t_sftm_, loss\n else:\n return tf.argmax(t_sftm_, 1), t_sftm_", "def evaluate(self, tick, task, inputs, nosend_ports=None):\n raise NotImplementedError(\"abstract\")", "def confusion_matrix_ir_weight_none(labels, prediction, output):\n labels_dtype = labels.dtype\n prediction_dtype = prediction.dtype\n output_dtype = output.dtype\n labels_perblock_nums = compute_perblock_nums(labels_dtype)\n prediction_perblock_nums = compute_perblock_nums(prediction_dtype)\n output_perblock_nums = compute_perblock_nums(output_dtype)\n\n height = list(output.shape)[0].value\n width = list(output.shape)[1].value\n number = list(labels.shape)[0].value\n length, label_factor = compute_ub_length(number, labels_dtype,\n prediction_dtype)\n\n # ================== IR builder Initial ==================\n # apply for register\n ibuilder = tvm.ir_builder.create()\n device_core_num = tbe_platform.cce_conf.get_soc_spec(\n tbe_platform.cce_conf.CORE_NUM)\n core_num = tvm.thread_axis(\"blockIdx.x\")\n ibuilder.scope_attr(core_num, \"thread_extent\", device_core_num)\n\n # apply for confusion_buf,weight_buf\n block_num, block_per_core, out_factor, last_remian, total_len, use_cores = compute_outub_size(\n height, width, output_dtype, device_core_num)\n block_num_int8 = int(total_len // BYTES_PER_BLOCK) + 1\n block_num_fp16 = int(total_len // FLOAT16_NUMS) + 1\n\n if output_dtype in [\"int8\", \"uint8\"]:\n confusion_buf = apply_for_new_alloc(ibuilder, \"float16\", (block_num_fp16, FLOAT16_NUMS), \\\n scope=cce.scope_ubuf, name=\"confusion_buf\")\n confusion_buf_out = apply_for_new_alloc(ibuilder, output_dtype,\n (block_num_int8, output_perblock_nums), \\\n scope=cce.scope_ubuf, name=\"confusion_buf_out\")\n weight_buf = applyub_by_length(ibuilder, length, \"float16\",\n \"weight_buf\")\n weight_castto_dtype = \"float16\"\n\n else:\n confusion_buf = apply_for_new_alloc(ibuilder, output_dtype,\n (block_num, output_perblock_nums), \\\n scope=cce.scope_ubuf, name=\"confusion_buf\")\n weight_buf = applyub_by_length(ibuilder, length, output_dtype,\n \"weight_buf\")\n weight_castto_dtype = output_dtype\n\n reg_buf = ibuilder.allocate(\"int32\", (2,), scope=cce.scope_reg, name=\"reg_buf\")\n reg_tmp = ibuilder.allocate(weight_castto_dtype, (4,), scope=cce.scope_reg, name=\"reg_tmp\")\n # apply for weights add ub_buf\n value_buf = apply_for_new_alloc(ibuilder, weight_castto_dtype, (BITS_NUMS,), \\\n scope=cce.scope_ubuf, name=\"value_buf\")\n value_buf1 = apply_for_new_alloc(ibuilder, weight_castto_dtype, (BITS_NUMS,), \\\n scope=cce.scope_ubuf, name=\"value_buf1\")\n\n # apply for label_buf, predict_buf\n label_buf_int32 = apply_for_new_alloc(ibuilder, \"int32\",\n (int(length // BITS_NUMS) + 1, BITS_NUMS), \\\n scope=cce.scope_ubuf, name=\"label_buf_int32\")\n predict_buf_int32 = apply_for_new_alloc(ibuilder, \"int32\",\n (int(length // BITS_NUMS) + 1, BITS_NUMS), \\\n scope=cce.scope_ubuf, name=\"predict_buf_int32\")\n if labels_dtype != \"int32\":\n label_buf = apply_for_new_alloc(ibuilder, labels_dtype,\n (int(length // labels_perblock_nums) + 1,\n labels_perblock_nums), \\\n scope=cce.scope_ubuf, name=\"label_buf\")\n if labels_dtype in (\"int8\", \"uint8\"):\n cast_fp16_buf_a = apply_for_new_alloc(ibuilder, \"float16\",\n (int(length // FLOAT16_NUMS) + 1, FLOAT16_NUMS), \\\n scope=cce.scope_ubuf, name=\"cast_fp16_buf_a\")\n if prediction_dtype != \"int32\":\n predict_buf = apply_for_new_alloc(ibuilder, prediction_dtype,\n (int(length // prediction_perblock_nums) + 1,\n prediction_perblock_nums), \\\n scope=cce.scope_ubuf, name=\"predict_buf\")\n if prediction_dtype in (\"int8\", \"uint8\"):\n cast_fp16_buf_b = apply_for_new_alloc(ibuilder, \"float16\",\n (int(length // FLOAT16_NUMS) + 1, FLOAT16_NUMS), \\\n scope=cce.scope_ubuf, name=\"cast_fp16_buf_b\")\n\n weight_blocks = int(length // compute_perblock_nums(weight_castto_dtype)) + 1\n with ibuilder.for_range(0, out_factor) as out:\n # initilation for confusion_buf and weight_buf\n if output_dtype in [\"int8\", \"uint8\"]:\n vector_dump_set(ibuilder, 0, block_num_fp16, confusion_buf)\n else:\n vector_dump_set(ibuilder, 0, block_num, confusion_buf)\n vector_dump_set(ibuilder, 1, weight_blocks, weight_buf)\n\n # ================== Traverse the value of labels/prediction ==================\n with ibuilder.for_range(0, label_factor) as b_out:\n # labels --> label_buf --> int32\n if labels_dtype != \"int32\":\n copy_weight_to_ub(ibuilder, labels, label_buf, b_out, length)\n if labels_dtype in (\"int8\", \"uint8\"):\n cast_to(ibuilder, length, label_buf, cast_fp16_buf_a)\n cast_to(ibuilder, length, cast_fp16_buf_a, label_buf_int32)\n else:\n cast_to(ibuilder, length, label_buf, label_buf_int32)\n else:\n copy_weight_to_ub(ibuilder, labels, label_buf_int32, b_out,\n length)\n\n # predictions --> predict_buf --> int32\n if prediction_dtype != \"int32\":\n copy_weight_to_ub(ibuilder, prediction, predict_buf, b_out,\n length)\n if prediction_dtype in (\"int8\", \"uint8\"):\n cast_to(ibuilder, length, predict_buf, cast_fp16_buf_b)\n cast_to(ibuilder, length, cast_fp16_buf_b,\n predict_buf_int32)\n else:\n cast_to(ibuilder, length, predict_buf, predict_buf_int32)\n else:\n copy_weight_to_ub(ibuilder, prediction, predict_buf_int32,\n b_out, length)\n\n # single value loop\n with ibuilder.for_range(0, length) as i:\n with ibuilder.if_scope((b_out * length + i) < number):\n # label <==> reg_buf[0]\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 1)\n ibuilder.emit(tvm.call_extern(\"int32\", \"reg_mov\", \\\n tvm.call_extern(\"int32\", \"reg\", reg_buf[0]), \\\n label_buf_int32.access_ptr(\"r\", offset=i), 0))\n # prediction <==> reg_buf[1]\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 1)\n ibuilder.emit(tvm.call_extern(\"int32\", \"reg_mov\", \\\n tvm.call_extern(\"int32\", \"reg\", reg_buf[1]), \\\n predict_buf_int32.access_ptr(\"r\", \\\n offset=i), 0))\n\n with ibuilder.if_scope(tvm.all(reg_buf[0]*width+reg_buf[1] >= out *\n total_len+block_per_core*output_perblock_nums*core_num,\n reg_buf[0]*width+reg_buf[1] < (out+1) *\n total_len+block_per_core*output_perblock_nums*core_num)):\n\n # weight <==> reg_tmp[0]\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 1)\n ibuilder.emit(tvm.call_extern(weight_castto_dtype, \"reg_mov\", \\\n tvm.call_extern(weight_castto_dtype,\n \"reg\", reg_tmp[0]), \\\n weight_buf.access_ptr(\"r\", offset=i), 0))\n\n # ==========Values in the same position need to be superimposed==========\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 1)\n ibuilder.emit(tvm.call_extern(weight_castto_dtype, \"reg_mov\", \\\n tvm.call_extern(weight_castto_dtype,\n \"reg\", reg_tmp[1]), \\\n confusion_buf.access_ptr(\"r\", \\\n offset=reg_buf[0] * width + reg_buf[1] - out * \\\n total_len-block_per_core*output_perblock_nums*core_num), 0))\n\n\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 1)\n ibuilder.emit(tvm.call_extern(weight_castto_dtype, \"reg_mov\", \\\n value_buf.access_ptr(\"w\", offset=0), \\\n tvm.call_extern(weight_castto_dtype,\n \"reg\", reg_tmp[0]), ))\n\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 1)\n ibuilder.emit(tvm.call_extern(weight_castto_dtype, \"reg_mov\", \\\n value_buf1.access_ptr(\"w\", offset=0), \\\n tvm.call_extern(weight_castto_dtype,\n \"reg\",\n reg_tmp[1]), ))\n\n with ibuilder.new_scope():\n mask_len = 8\n reset_mask_insn(\n ibuilder, value_buf.dtype, bits=mask_len)\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 2)\n ibuilder.emit(tvm.call_extern(value_buf.dtype, \"vadd\", \\\n value_buf.access_ptr('w', offset=0), \\\n value_buf.access_ptr('r', offset=0), \\\n value_buf1.access_ptr('r', offset=0), \\\n 1, 0, 0, 0, 0, 0, 0))\n\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 1)\n ibuilder.emit(tvm.call_extern(weight_castto_dtype, \"reg_mov\", \\\n tvm.call_extern(weight_castto_dtype,\n \"reg\", reg_tmp[0]), \\\n value_buf.access_ptr(\"r\", offset=0), 0))\n\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 1)\n ibuilder.emit(tvm.call_extern(weight_castto_dtype, \"reg_mov\", \\\n confusion_buf.access_ptr(\"w\", \\\n offset=reg_buf[0] * width + reg_buf[1] - out * \\\n total_len-block_per_core*output_perblock_nums*core_num), \\\n tvm.call_extern(weight_castto_dtype,\n \"reg\", reg_tmp[0]), ))\n\n # output\n with ibuilder.if_scope(core_num < use_cores):\n if output_dtype in [\"int32\", \"float16\", \"float32\"]:\n if last_remian != 0:\n with ibuilder.if_scope(tvm.all(out == (out_factor - 1))):\n last_block_num = last_remian\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 6)\n ibuilder.emit(tvm.call_extern(output_dtype, \"copy_ubuf_to_gm\", \\\n output.access_ptr('w', offset=out * total_len + \\\n block_per_core*output_perblock_nums*core_num), \\\n confusion_buf.access_ptr('r', offset=0), \\\n 0, 1, last_block_num, 0, 0))\n with ibuilder.else_scope():\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 6)\n ibuilder.emit(tvm.call_extern(output_dtype, \"copy_ubuf_to_gm\", \\\n output.access_ptr('w', offset=out * total_len + \\\n block_per_core*output_perblock_nums*core_num), \\\n confusion_buf.access_ptr('r', offset=0), \\\n 0, 1, block_num, 0, 0))\n else:\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 6)\n ibuilder.emit(tvm.call_extern(output_dtype, \"copy_ubuf_to_gm\", \\\n output.access_ptr('w', offset=out * total_len + \\\n block_per_core*output_perblock_nums*core_num), \\\n confusion_buf.access_ptr('r', offset=0), \\\n 0, 1, block_num, 0, 0))\n\n else:\n cast_to(ibuilder, total_len, confusion_buf, confusion_buf_out)\n if last_remian != 0:\n with ibuilder.if_scope(tvm.all(out == (out_factor - 1))):\n last_block_num = last_remian\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 6)\n ibuilder.emit(tvm.call_extern(output_dtype, \"copy_ubuf_to_gm\", \\\n output.access_ptr('w', offset=out * total_len + \\\n block_per_core*output_perblock_nums*core_num), \\\n confusion_buf_out.access_ptr('r', offset=0), \\\n 0, 1, last_block_num, 0, 0))\n with ibuilder.else_scope():\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 6)\n ibuilder.emit(tvm.call_extern(output_dtype, \"copy_ubuf_to_gm\", \\\n output.access_ptr('w', offset=out * total_len + \\\n block_per_core*output_perblock_nums*core_num), \\\n confusion_buf_out.access_ptr('r', offset=0), \\\n 0, 1, block_num, 0, 0))\n else:\n with ibuilder.new_scope():\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 6)\n ibuilder.emit(tvm.call_extern(output_dtype, \"copy_ubuf_to_gm\", \\\n output.access_ptr('w', offset=out * total_len + \\\n block_per_core*output_perblock_nums*core_num), \\\n confusion_buf_out.access_ptr('r', offset=0), \\\n 0, 1, block_num, 0, 0))\n\n return ibuilder.get()", "def _compute_masked_hidden(self, hidden, mask):\r\n mask = mask.unsqueeze(-1).expand_as(hidden)\r\n hidden_masked = hidden[mask].contiguous().view(-1, hidden.size(-1))\r\n return hidden_masked", "def mask_nan(y_true, y_pred):\n notnan_true = K.cast(~tf.math.is_nan(y_true), \"float32\")\n num_notnan = K.sum(K.flatten(notnan_true))\n y_pred = tf.math.multiply(y_pred, notnan_true)\n\n # We need to use tf.where to do this substitution, because when trying to\n # multiply with just the notnan_true masks,\n # NaN*0 = NaN, so NaNs are not removed\n y_true = K.cast(\n tf.where(~tf.math.is_nan(y_true), y_true, tf.zeros_like(y_true)), \"float32\"\n )\n return y_pred, y_true, num_notnan", "def mask_incoherent(self):\n self.MaskPrefix = 'i' + self.MaskPrefix\n print('Masking pixel values where .msk value is less than {0}...'.format(threshold))\n for ig in self.Set:\n igram = self.load_ma(ig)\n mskFile = ig.Path[:-3] + 'msk'\n coherence = roipy.tools.load_half(ig, 2, mskFile)\n incoherent = ma.masked_less(coherence, self.Cothresh)\n igram[incoherent.mask] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n\n print('Done')", "def forward_test(self, imgs, token_ids=None, segment_ids=None, input_mask=None, ans_ids=None, ans_mask=None, **kwargs):\n imgs = imgs.reshape((-1,) + imgs.shape[2:])\n if self.from_scratch:\n imgs = imgs / 255.0\n visual_token = self.extract_visual_feat(imgs) # b, d, T, h, w\n B, D, T, H, W = visual_token.shape\n B_text = token_ids.shape[0]\n if B_text != B:\n visual_token = visual_token.view(B_text, -1, D, T, H, W)\n visual_token = visual_token.mean(dim=1)\n B = B_text\n token_ids = token_ids.reshape((-1, ) + token_ids.shape[2:])\n segment_ids = segment_ids.reshape((-1, ) + segment_ids.shape[2:])\n input_mask = input_mask.reshape((-1, ) + input_mask.shape[2:])\n\n # text feature #\n text_out_with_mask = self.text_backbone(token_ids, input_mask)\n text_out_last_hidden_state = text_out_with_mask['last_hidden_state']\n\n # only use the uni-modal transformer for retrieval \n if self.separate_test:\n visual_emb, text_emb = self.ssl_head(visual_token, text_out_last_hidden_state, input_mask, token_ids)\n return visual_emb, text_emb\n\n\n if self.task == 'video_qa' or self.task == 'FIB':\n B, D, T, H, W = visual_token.shape\n visual_token = visual_token.view(B, D, T, -1).permute(0, 2, 3, 1)\n if hasattr(self.qa_head, 'num_labels'):\n num_choices = self.qa_head.num_labels\n visual_token_all = visual_token\n else:\n num_choices = int(token_ids.shape[0] / B)\n visual_token_all = visual_token.unsqueeze(1).expand(-1, num_choices, -1, -1, -1).flatten(0,1)\n\n output = self.multimodal_backbone(visual_token=visual_token_all, text_input_mask=input_mask, text_input_embeds=text_out_last_hidden_state)\n \n if self.answer_mask: \n mask_idx = torch.where(token_ids == 103)\n itm_output = output['t_last_hidden_state'][mask_idx]\n elif self.answer_cls:\n if 'cls_last_hidden_state' in output:\n itm_output = output['cls_last_hidden_state'].squeeze()\n else:\n itm_output = output['t_last_hidden_state'][:, 0]\n if self.itm_head is not None:\n itm_output = self.itm_head(itm_output)\n\n else:\n all_cls_emb = output['last_hidden_state'][:, 0]\n itm_output = self.itm_head(all_cls_emb)\n\n if self.qa_head is not None:\n qa_output = self.qa_head(itm_output).view(-1, num_choices)\n else:\n qa_output = torch.softmax(itm_output, dim=-1)[:, 1]\n qa_output = qa_output.view(-1, num_choices)\n \n itm_output_all = {}\n itm_output_all['result'] = qa_output.to(torch.float32)\n itm_output_all['attention'] = output['attentions'][-1].mean(dim=1)\n \n else:\n raise NotImplementedError(\"not implement the finetune test method\")\n \n return itm_output_all", "def _build_output_layer(self,\n answer_modeling,\n answer_modeling_mask):\n answer_start_dropout = self.hyperparams.model_output_answer_start_dropout if self.mode == \"train\" else 0.0\n answer_start_trainable = self.hyperparams.model_output_answer_start_trainable\n answer_end_dropout = self.hyperparams.model_output_answer_end_dropout if self.mode == \"train\" else 0.0\n answer_end_trainable = self.hyperparams.model_output_answer_end_trainable\n \n with tf.variable_scope(\"output\", reuse=tf.AUTO_REUSE):\n self.logger.log_print(\"# build answer output layer\")\n answer_output_list = []\n answer_output_mask_list = []\n \n with tf.variable_scope(\"start\", reuse=tf.AUTO_REUSE):\n answer_start_list = [answer_modeling[0], answer_modeling[1]]\n answer_start_mask_list = [answer_modeling_mask[0], answer_modeling_mask[1]]\n (answer_start,\n answer_start_mask) = self._build_fusion_result(answer_start_list,\n answer_start_mask_list, None)\n \n answer_ouput_start_layer = create_dense_layer(\"single\", 1, 1, 1, None,\n [answer_start_dropout], None, False, False, False, self.num_gpus, self.default_gpu_id,\n self.regularizer, self.random_seed, answer_start_trainable)\n answer_output_start, answer_output_start_mask = answer_ouput_start_layer(answer_start, answer_start_mask)\n answer_output_list.append(answer_output_start)\n answer_output_mask_list.append(answer_output_start_mask)\n \n with tf.variable_scope(\"end\", reuse=tf.AUTO_REUSE):\n answer_end_list = [answer_modeling[0], answer_modeling[2]]\n answer_end_mask_list = [answer_modeling_mask[0], answer_modeling_mask[2]]\n (answer_end,\n answer_end_mask) = self._build_fusion_result(answer_end_list,\n answer_end_mask_list, None)\n \n answer_output_end_layer = create_dense_layer(\"single\", 1, 1, 1, None,\n [answer_end_dropout], None, False, False, False, self.num_gpus, self.default_gpu_id,\n self.regularizer, self.random_seed, answer_end_trainable)\n answer_output_end, answer_output_end_mask = answer_output_end_layer(answer_end, answer_end_mask)\n answer_output_list.append(answer_output_end)\n answer_output_mask_list.append(answer_output_end_mask)\n \n return answer_output_list, answer_output_mask_list", "def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")", "def _mask_out_dataset(self, df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n mask_train = df[\"id\"].isin(set(chain(*self.train_fixtures_ids.values())))\n mask_test = df[\"id\"].isin(set(chain(*self.test_fixtures_ids.values())))\n mask_predict = df[\"id\"].isin(set(chain(*self.predict_fixtures_ids.values())))\n\n df_train = df[mask_train & ~mask_test & ~mask_predict].copy()\n df_test = df[mask_test & ~mask_predict].copy()\n df_predict = df[mask_predict].copy()\n\n if self._verbose > 0:\n print(\"Teams used for testing:\")\n print(sorted(self.test_fixtures_ids.keys()))\n print(\"Teams used for prediction:\")\n print(sorted(self.predict_fixtures_ids.keys()))\n print(f\"Train dataset total samples: {len(df_train)}\")\n print(f\"Test dataset total samples: {len(df_test)}\")\n print(f\"Predict dataset total samples: {len(df_predict)}\")\n\n # Additional argument checking which can be done only after computing individual datasets.\n # Check if datasets are empty (there is a check during parsing arguments that test/predict\n # split samples must be at least 1, but some dataset may become empty in the end due to some\n # restrictions and filtering).\n emsg = \"Maybe try to specify the split of test and/or prediction samples more reasonably?\"\n if df_train.empty:\n raise ValueError(f\"Train dataset is empty. {emsg}\")\n if df_test.empty:\n raise ValueError(f\"Test dataset is empty. {emsg}\")\n if df_predict.empty:\n raise ValueError(f\"Predict dataset is empty. {emsg}\")\n # Check if datasets are too large (e.g. predict dataset is larger than the rest, etc.)\n if len(df_predict) > len(df_test) + len(df_train):\n raise ValueError(f\"Number of samples in predict dataset is too large. {emsg}\")\n if len(df_test) > len(df_train):\n raise ValueError(f\"Number of samples in test dataset is too large. {emsg}\")\n\n return df_train, df_test, df_predict", "def reveal(self, dst=None):\n op = torch.distributed.ReduceOp.BXOR\n if dst is None:\n return comm.get().all_reduce(self.share, op=op)\n else:\n return comm.get().reduce(self.share, dst, op=op)", "def predict_transparent(self, observation, state=None, mask=None, deterministic=False):\n policy_out = self._get_policy_out(observation, state, mask, transparent=True,\n deterministic=deterministic)\n actions, _val, states, _neglogp, data = policy_out\n return actions, states, data", "def call(self, inputs, mask=None, training=None, initial_state=None):\n self.cell._generate_dropout_mask(inputs, training=training)\n self.cell._generate_recurrent_dropout_mask(inputs, training=training)\n return super().call(\n inputs, mask=mask, training=training, initial_state=initial_state\n )", "def _initalize_mask(dataSubStack):\n # Initalize an array to store the output mask values\n outMask = np.zeros(dataSubStack.shape, dtype=bool)\n\n # Start by masking out NaNs or Infs\n NaNsOrInfs = np.logical_not(np.isfinite(dataSubStack.data))\n dataSubStack.mask = NaNsOrInfs\n\n return outMask, dataSubStack", "def mask_by_energy_cutoff(task:dict, mask_params:dict, qn_pair, verbose=False):\n\n # unpack quantum numbers\n (qnf,qni) = qn_pair\n (Ji,gi,ni) = qni\n (Jf,gf,nf) = qnf\n\n # extract parameters\n E_max = mask_params.get(\"E_max\", None)\n Ei_max = mask_params.get(\"Ei_max\", None)\n Ef_max = mask_params.get(\"Ef_max\", None)\n \n # calculate mask value\n ket_results_data = task[\"metadata\"][\"ket_results_data\"]\n bra_results_data = task[\"metadata\"][\"bra_results_data\"]\n Ei = ket_results_data.get_energy(qni)\n Ef = ket_results_data.get_energy(qnf)\n allow = True\n allow &= E_max is None or Ei<=E_max\n allow &= E_max is None or Ef<=E_max\n allow &= Ei_max is None or Ei<=Ei_max\n allow &= Ef_max is None or Ef<=Ef_max\n\n return allow", "def finish_online_evaluation_extended(self, task):\n # -- Get current True-Positive, False-Positive and False-Negative -- #\n self.online_eval_tp = np.sum(self.online_eval_tp, 0)\n self.online_eval_fp = np.sum(self.online_eval_fp, 0)\n self.online_eval_fn = np.sum(self.online_eval_fn, 0)\n\n # -- Calculate the IoU -- #\n global_iou_per_class = [i for i in [i / (i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n\n # -- Calculate the Dice -- #\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n\n # -- Store IoU and Dice values. Ensure it is float64 so its JSON serializable -- #\n # -- Do not use self.all_val_eval_metrics since this is used for plotting and then the -- #\n # -- plots do not build correctly because based on self.save_every more dice values than -- #\n # -- expected (epochs) are in there --> see plot_progress function in network_trainer.py -- #\n iou = np.mean(global_iou_per_class, dtype=\"float64\")\n dice = np.mean(global_dc_per_class, dtype=\"float64\")\n\n # -- Update the log file -- #\n self.print_to_log_file(\"Average global foreground IoU for task {}: {}\".format(task, str(global_iou_per_class)))\n self.print_to_log_file(\"(interpret this as an estimate for the IoU of the different classes. This is not \"\n \"exact.)\")\n self.print_to_log_file(\"Average global foreground Dice for task {}: {}\".format(task, str(global_dc_per_class)))\n self.print_to_log_file(\"(interpret this as an estimate for the Dice of the different classes. This is not \"\n \"exact.)\")\n\n # -- Add the results to self.validation_results based on task and epoch -- #\n if self.validation_results.get('epoch_'+str(self.epoch), None) is None:\n self.validation_results['epoch_'+str(self.epoch)] = { task: {\n 'IoU': iou,\n 'Dice': dice\n }\n }\n else: # Epoch entry does already exist in self.validation_results, so only add the task with the corresponding values\n self.validation_results['epoch_'+str(self.epoch)][task] = { 'IoU': iou,\n 'Dice': dice\n }\n \n # -- Empty the variables for next iteration -- #\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []", "def switch_to_untuned_inputs(self):\n\n self.h_e=self.inputs.noise_flat.T\n self.h=np.vstack([self.h_e,self.h_i])", "def compute_mask(t, padding_idx=0):\n mask = torch.ne(t, padding_idx).float()\n return mask", "def draw_task(self):\n self.usedTask = True\n return self.target_task", "def compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo:\n assert item_ids.ndim == 2, \"`item_ids` must have 2 dimensions.\"\n masking_info = self._compute_masked_targets(item_ids, training=training)\n self.mask_schema, self.masked_targets = masking_info.schema, masking_info.targets\n\n return masking_info", "def transform(self, net, data, prediction, truth):\n raw_output = prediction['output']\n # raw_output = tf.constant(np.load('out.npy'))\n batch = util.shape(raw_output)[0]\n prediction = tf.reshape(\n raw_output,\n (batch, self.num_cells, self.num_cells,\n self.num_anchors, 4 + 1 + self.num_classes))\n box_predict, obj_predict, class_predict = tf.split(\n prediction, [4, 1, self.num_classes], axis=-1)\n obj_predict = tf.nn.sigmoid(obj_predict)\n obj_predict_squeeze = tf.squeeze(obj_predict, -1)\n xy_original, wh_original = tf.split(box_predict, [2, 2], axis=-1)\n xy_predict, wh_predict = tf.sigmoid(xy_original), tf.exp(wh_original)\n prediction = {\n 'raw': raw_output,\n 'object': obj_predict_squeeze,\n 'object_mask': obj_predict,\n 'coordinate': [xy_predict, wh_original],\n 'outbox': self._cell_to_global(xy_predict, wh_predict),\n 'class': tf.nn.softmax(class_predict),\n }\n inputs = (\n prediction['object_mask'], prediction['class'],\n prediction['outbox'])\n corner_test, score_test, class_test, count_test = map_fn(\n self._filter, inputs,\n dtype=(tf.float32, tf.float32, tf.int32, tf.int32))\n prediction['test'] = {\n 'corner': corner_test,\n 'class': class_test,\n 'score': score_test,\n 'count': count_test,\n }\n # the original box and label values from the dataset\n image = data['input']\n if truth:\n rawlabel, truebox, count = truth\n rawlabel += self.label_offset\n truebox = util.corners_to_box(truebox)\n obj, box, label = map_fn(\n self._truth_to_cell, (truebox, rawlabel, count),\n dtype=(tf.float32, tf.float32, tf.int32))\n truth = {\n 'object': obj,\n 'object_mask': tf.expand_dims(obj, -1),\n 'box': box,\n 'class': slim.one_hot_encoding(label, self.num_classes),\n 'count': count,\n 'rawbox': truebox,\n 'rawclass': rawlabel,\n }\n return image, prediction, truth", "def get_output_for(self, inputs, **kwargs):\n # Retrieve the layer input\n input = inputs[0]\n # Retrieve the mask when it is supplied\n mask = None\n hid_init = None\n cell_init = None\n if self.mask_incoming_index > 0:\n mask = inputs[self.mask_incoming_index]\n if self.hid_init_incoming_index > 0:\n hid_init = inputs[self.hid_init_incoming_index]\n if self.cell_init_incoming_index > 0:\n cell_init = inputs[self.cell_init_incoming_index]\n\n # Treat all dimensions after the second as flattened feature dimensions\n if input.ndim > 3:\n input = T.flatten(input, 3)\n\n # Because scan iterates over the first dimension we dimshuffle to\n # (n_time_steps, n_batch, n_features)\n input = input.dimshuffle(1, 0, 2)\n seq_len, num_batch, _ = input.shape\n\n # Stack input weight matrices into a (num_inputs, 4*num_units)\n # matrix, which speeds up computation\n W_in_stacked = T.concatenate(\n [self.W_in_to_ingate, self.W_in_to_forgetgate,\n self.W_in_to_cell, self.W_in_to_outgate], axis=1)\n\n # Same for hidden weight matrices\n W_hid_stacked = T.concatenate(\n [self.W_hid_to_ingate, self.W_hid_to_forgetgate,\n self.W_hid_to_cell, self.W_hid_to_outgate], axis=1)\n\n # Stack biases into a (4*num_units) vector\n b_stacked = T.concatenate(\n [self.b_ingate, self.b_forgetgate,\n self.b_cell, self.b_outgate], axis=0)\n\n if self.precompute_input:\n # Because the input is given for all time steps, we can\n # precompute_input the inputs dot weight matrices before scanning.\n # W_in_stacked is (n_features, 4*num_units). input is then\n # (n_time_steps, n_batch, 4*num_units).\n input = T.dot(input, W_in_stacked) + b_stacked\n\n # At each call to scan, input_n will be (n_time_steps, 4*num_units).\n # We define a slicing function that extract the input to each LSTM gate\n def slice_w(x, n):\n return x[:, n*self.num_units:(n+1)*self.num_units]\n\n # Create single recurrent computation step function\n # input_n is the n'th vector of the input\n def step(input_n, cell_previous, hid_previous, *args):\n if not self.precompute_input:\n input_n = T.dot(input_n, W_in_stacked) + b_stacked\n\n # Calculate gates pre-activations and slice\n gates = input_n + T.dot(hid_previous, W_hid_stacked)\n\n # Clip gradients\n if self.grad_clipping:\n gates = theano.gradient.grad_clip(\n gates, -self.grad_clipping, self.grad_clipping)\n\n # Extract the pre-activation gate values\n ingate = slice_w(gates, 0)\n forgetgate = slice_w(gates, 1)\n cell_input = slice_w(gates, 2)\n outgate = slice_w(gates, 3)\n\n if self.peepholes:\n # Compute peephole connections\n ingate += cell_previous*self.W_cell_to_ingate\n forgetgate += cell_previous*self.W_cell_to_forgetgate\n\n # Apply nonlinearities\n ingate = self.nonlinearity_ingate(ingate)\n forgetgate = self.nonlinearity_forgetgate(forgetgate)\n cell_input = self.nonlinearity_cell(cell_input)\n\n # Compute new cell value\n cell = forgetgate*cell_previous + ingate*cell_input\n\n if self.peepholes:\n outgate += cell*self.W_cell_to_outgate\n outgate = self.nonlinearity_outgate(outgate)\n\n # Compute new hidden unit activation\n hid = outgate*self.nonlinearity(cell)\n return [cell, hid]\n\n def step_masked(input_n, mask_n, cell_previous, hid_previous, *args):\n cell, hid = step(input_n, cell_previous, hid_previous, *args)\n\n # Skip over any input with mask 0 by copying the previous\n # hidden state; proceed normally for any input with mask 1.\n not_mask = 1 - mask_n\n cell = cell*mask_n + cell_previous*not_mask\n hid = hid*mask_n + hid_previous*not_mask\n\n return [cell, hid]\n\n if mask is not None:\n # mask is given as (batch_size, seq_len). Because scan iterates\n # over first dimension, we dimshuffle to (seq_len, batch_size) and\n # add a broadcastable dimension\n mask = mask.dimshuffle(1, 0, 'x')\n sequences = [input, mask]\n step_fun = step_masked\n else:\n sequences = input\n step_fun = step\n\n ones = T.ones((num_batch, 1))\n if isinstance(self.cell_init, Layer):\n pass\n elif isinstance(self.cell_init, T.TensorVariable):\n cell_init = self.cell_init\n else:\n # Dot against a 1s vector to repeat to shape (num_batch, num_units)\n cell_init = T.dot(ones, self.cell_init)\n\n if isinstance(self.hid_init, Layer):\n pass\n elif isinstance(self.hid_init, T.TensorVariable):\n hid_init = self.hid_init\n else:\n # Dot against a 1s vector to repeat to shape (num_batch, num_units)\n hid_init = T.dot(ones, self.hid_init)\n\n # The hidden-to-hidden weight matrix is always used in step\n non_seqs = [W_hid_stacked]\n # The \"peephole\" weight matrices are only used when self.peepholes=True\n if self.peepholes:\n non_seqs += [self.W_cell_to_ingate,\n self.W_cell_to_forgetgate,\n self.W_cell_to_outgate]\n\n # When we aren't precomputing the input outside of scan, we need to\n # provide the input weights and biases to the step function\n if not self.precompute_input:\n non_seqs += [W_in_stacked, b_stacked]\n\n if self.unroll_scan:\n # Retrieve the dimensionality of the incoming layer\n input_shape = self.input_shapes[0]\n # Explicitly unroll the recurrence instead of using scan\n cell_out, hid_out = unroll_scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[cell_init, hid_init],\n go_backwards=self.backwards,\n non_sequences=non_seqs,\n n_steps=input_shape[1])\n else:\n # Scan op iterates over first dimension of input and repeatedly\n # applies the step function\n cell_out, hid_out = theano.scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[cell_init, hid_init],\n go_backwards=self.backwards,\n truncate_gradient=self.gradient_steps,\n non_sequences=non_seqs,\n strict=True)[0]\n\n # When it is requested that we only return the final sequence step,\n # we need to slice it out immediately after scan is applied\n if self.only_return_final:\n hid_out = hid_out[-1]\n cell_out = cell_out[-1]\n else:\n # dimshuffle back to (n_batch, n_time_steps, n_features))\n hid_out = hid_out.dimshuffle(1, 0, 2)\n cell_out = cell_out.dimshuffle(1, 0, 2)\n\n # if scan is backward reverse the output\n if self.backwards:\n hid_out = hid_out[:, ::-1]\n cell_out = cell_out[:, ::-1]\n\n return T.concatenate([cell_out, hid_out], axis=2)", "def __call__(self, prediction, fg_mask, image):\n # prediction = self.pooling(prediction)\n # fg_mask = self.pooling(fg_mask)\n N, C, H, W = prediction.size()\n bg = prediction*(1-fg_mask)\n fg = prediction*fg_mask\n\n\n fg_patch = fg.view(N,C,-1).permute(0,2,1)\n bg_patch = bg.view(N,C,-1)\n\n fg_patch_mu = torch.mean(fg_patch, dim=2, keepdim=True)\n bg_patch_mu = torch.mean(bg_patch, dim=1, keepdim=True)\n fg_bg_local_conv = torch.matmul((fg_patch-fg_patch_mu), (bg_patch-bg_patch_mu))/(C-1)\n\n bg_distribution_std = (torch.var(bg_patch, dim=1, keepdim=True) + 1e-8).sqrt()\n fg_distribution_std = (torch.var(fg_patch, dim=2, keepdim=True) + 1e-8).sqrt()\n fg_bg_r = fg_bg_local_conv.div(torch.matmul(fg_distribution_std,bg_distribution_std)+1e-8)\n fg_bg_r = fg_bg_r.abs()\n # fg_bg_r[fg_bg_r<0.7] = 0\n\n pixel_count = H*W\n # # bg_patch_one = bg.unsqueeze(1).repeat(1, pixel_count, 1, 1, 1)\n # # fg_patch_one = fg.view(N,C,-1).permute(0,2,1).unsqueeze(-1).unsqueeze(-1).expand_as(bg_patch_one)\n # bg_patch_one = bg.permute(0,2,1,3).permute(0,1,3,2).unsqueeze(1)\n # fg_patch_one = fg.view(N,C,-1).permute(0,2,1).unsqueeze(-2).unsqueeze(-2)\n # fg_bg_L1 = (fg_patch_one-bg_patch_one).pow(2).mean(dim=-1)\n # fg_bg_L1_drop_fg = fg_bg_L1*(1-fg_mask)\n\n # fg_mask_channel = fg_mask.view(N, -1, 1, 1).expand_as(fg_bg_L1)\n # fg_bg_L1_only_fg = fg_bg_L1_drop_fg*fg_mask_channel\n\n # # fg_bg_local_conv[fg_bg_local_conv<0] = 0\n # # fg_bg_local_conv = torch.softmax(fg_bg_local_conv, dim=2)\n # # local_loss = fg_bg_L1_only_fg.view(N, pixel_count, pixel_count)*fg_bg_local_conv.permute(0,2,1).detach()\n # local_loss = fg_bg_L1_only_fg.view(N, pixel_count, -1)*fg_bg_r\n # fg_mask_sum = fg_mask.view(N, -1).sum(dim=1)\n\n C1 = 0.01**2\n image = self.adaptivepooling(image)\n # image = F.adaptive_avg_pool2d(image, 32)\n # print(image.size())\n image_fg = image*fg_mask\n image_bg = image*(1-fg_mask)\n image_fg_mu = image_fg.mean(dim=1)\n image_bg_mu = image_bg.mean(dim=1)\n image_fg_patch_one = image_fg_mu.view(N, -1,1)\n image_bg_patch_one = image_bg_mu.view(N, -1,1)\n image_fg_patch_one_sq = image_fg_patch_one.pow(2)\n image_bg_patch_one_sq = image_bg_patch_one.pow(2)\n\n luminance = torch.matmul(image_fg_patch_one, image_bg_patch_one.permute(0,2,1)+C1).div(image_fg_patch_one_sq+image_bg_patch_one_sq+C1)\n # image_bg_patch_one = image_bg.permute(0,2,1,3).permute(0,1,3,2).unsqueeze(1)\n # image_fg_patch_one = image_fg.view(N,image_fg.size(1),-1).permute(0,2,1).unsqueeze(-2).unsqueeze(-2)\n # fg_bg_L1 = (image_fg_patch_one-image_bg_patch_one).pow(2).mean(dim=-1)\n fg_bg_loss = luminance\n \n fg_bg_loss_drop_fg = fg_bg_loss*(1-fg_mask.view(N,1, -1))\n fg_mask_channel = fg_mask.view(N, -1, 1).expand_as(fg_bg_loss)\n fg_bg_loss_only_fg = fg_bg_loss_drop_fg*fg_mask_channel\n local_loss = fg_bg_loss_only_fg*fg_bg_r.detach()\n\n local_loss = local_loss.mean()\n loss = local_loss\n # if target_is_real:\n # loss = local_loss # self.relu(1-prediction.mean())\n # else:\n # loss = -local_loss # self.relu(1+prediction.mean())\n return loss", "def multi_output_loss(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.float32:\n tf.print(\"Sum of actual masking: \", tf.reduce_sum(y_true))\n tf.print(\"Sum of predicted masking: \", tf.reduce_sum(y_pred))\n # loss_multiplier = tf.where(tf.greater(y_true, tf.constant(5.)), tf.constant(10.),\n # tf.constant(1.))\n loss = tf.keras.losses.mean_squared_error(y_true,\n y_pred)\n # tf.print(\"Y true: \", y_true)\n # tf.print(\"Loss multiplier: \", loss_multiplier)\n # loss *= tf.cast(loss_multiplier, dtype=tf.float32)\n return tf.reduce_mean(loss)", "def reset_task(self, task, generator):\n self.task = task\n self.task_dep = task.task_dep[:]\n self.calc_dep = task.calc_dep.copy()\n self.generator = generator", "def negIP(self):\n np.negative(self.t, out=self.t)\n return self", "def forward(self, x, hidden, x_mask=None):\n x_mask = self._get_mask(x) if not x_mask else x_mask\n\n output, hidden = self.rnn(x, hidden)\n output = self.dropout(output)\n\n if self.attention:\n output = self._attn_mean_pooling(output, x_mask)\n else:\n output = self._mean_pooling(output, x_mask)\n\n return output", "def return_value_mask(actions):\n if hasattr(actions,'shape'):\n M = actions.shape[0]\n else:\n M = 1\n value_mask = torch.zeros(M,5)\n value_mask[torch.arange(M),actions] = 1\n value_mask = value_mask.bool()\n return value_mask.squeeze(0)", "def do_imputation(sample_i, cx_set0, cx_mask, device, imputation_model, cx_set, x_set):\n missed_data = cx_set0[sample_i]\n mask = cx_mask[sample_i]\n missed_dataT = np.multiply(missed_data, mask)\n missed_dataT = np.expand_dims(missed_dataT, axis=0)\n missed_dataT = torch.FloatTensor(missed_dataT).to(device)\n\n # Reconstruction\n with torch.no_grad():\n imputation_model.eval()\n filled_data = imputation_model(missed_dataT)\n filled_data = filled_data.cpu().detach().numpy()\n filled_data = np.squeeze(filled_data, axis=0)\n max_vector = np.expand_dims(cx_set[sample_i].max(axis=0), axis=0)\n filled_data = np.multiply(filled_data, max_vector)\n \"\"\"\n scaler = preprocessing.MinMaxScaler()\n scaler.fit(cx_set[sample_i])\n filled_data = scaler.inverse_transform(filled_data)\n \"\"\"\n\n total_num = cx_set0.shape[1] * cx_set0.shape[2]\n miss_num = total_num - np.count_nonzero(cx_mask[sample_i])\n new_mask = np.ones((cx_mask.shape[1], cx_mask.shape[2]))\n new_mask = new_mask - cx_mask[sample_i]\n\n imputed_data = np.multiply(filled_data, new_mask)\n x_real = np.multiply(x_set[sample_i], new_mask)\n error = imputed_data - x_real\n\n return miss_num, x_real, error" ]
[ "0.5656326", "0.5581794", "0.55620676", "0.55582994", "0.54249823", "0.54239273", "0.54085124", "0.5390853", "0.5355018", "0.5309503", "0.5292148", "0.52734554", "0.5234446", "0.5234446", "0.5234446", "0.5234446", "0.5184564", "0.51844215", "0.51771903", "0.5147717", "0.51107204", "0.5097505", "0.5094132", "0.5093208", "0.5082455", "0.50756204", "0.506939", "0.50681347", "0.5050913", "0.50477743", "0.50305855", "0.5030542", "0.50276446", "0.50156265", "0.50139487", "0.5010863", "0.5000024", "0.4996888", "0.498204", "0.4980823", "0.49758992", "0.49681684", "0.496544", "0.49448225", "0.49411723", "0.4920232", "0.49007547", "0.48901108", "0.48886013", "0.48855582", "0.48730356", "0.48661864", "0.48617154", "0.48519272", "0.48490378", "0.48385584", "0.48285615", "0.48172498", "0.48149306", "0.4803927", "0.47954738", "0.47947475", "0.47934818", "0.47919586", "0.47911674", "0.47902805", "0.4787746", "0.47865435", "0.47860843", "0.4780611", "0.47796065", "0.47734365", "0.47716346", "0.47612223", "0.47534302", "0.47517666", "0.47501576", "0.47499922", "0.47499838", "0.4748609", "0.47438273", "0.47426444", "0.47419357", "0.47368044", "0.4733718", "0.47328568", "0.47315872", "0.4724062", "0.4722629", "0.4721038", "0.47199765", "0.47186473", "0.4715999", "0.47113502", "0.47039437", "0.47022554", "0.47010028", "0.469553", "0.46910664", "0.46862027" ]
0.5071202
26
Evaluates the accuracy of the model for each past task.
def evaluate(model: ContinualModel, dataset: ContinualDataset, last=False) -> Tuple[list, list]: status = model.net.training model.net.eval() accs, accs_mask_classes = [], [] for k, test_loader in enumerate(dataset.test_loaders): if last and k < len(dataset.test_loaders) - 1: continue correct, correct_mask_classes, total = 0.0, 0.0, 0.0 for data in test_loader: inputs, labels = data inputs, labels = inputs.to(model.device), labels.to(model.device) if 'class-il' not in model.COMPATIBILITY: outputs = model(inputs, k) else: outputs = model(inputs) _, pred = torch.max(outputs.data, 1) correct += torch.sum(pred == labels).item() total += labels.shape[0] if dataset.SETTING == 'class-il': mask_classes(outputs, dataset, k) _, pred = torch.max(outputs.data, 1) correct_mask_classes += torch.sum(pred == labels).item() accs.append(correct / total * 100 if 'class-il' in model.COMPATIBILITY else 0) accs_mask_classes.append(correct_mask_classes / total * 100) model.net.train(status) return accs, accs_mask_classes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())", "def eval_model(self, model):\n evaluation = model.evaluate(x=self.xt_test, y=self.yt_test)\n print(\"loss : \" + str(round(evaluation[0]*100, 2)) + \"%\")\n print(\"accuracy: \" + str(round(evaluation[1]*100, 2)) + \"%\")", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def get_accuracy(model, task, batchmanager, test_set=False):\n\n model.eval()\n count, num = 0., 0\n batchmanager = batchmanager if isinstance(batchmanager, BatchManager) else batchmanager.batchmanagers[task]\n\n iter = batchmanager.test_iter if test_set else batchmanager.dev_iter\n\n with torch.no_grad():\n for batch in iter: \n data, targets = batch\n out = model(data, task)\n predicted = out.argmax(dim=1)\n count += (predicted == targets).sum().item()\n num += len(targets)\n\n model.train()\n return count / num", "def train(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.EXAMPLES_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return acc", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc", "def accuracy(self):\n if not self.run:\n self._run()\n return self.model_acc", "def test(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n \r\n dataset.set_split('test')\r\n batch_generator = generate_nmt_batches(dataset, \r\n batch_size=len(dataset), \r\n device=args.device)\r\n\r\n acc_sum = 0.0\r\n model.eval()\r\n \r\n for batch_index, batch_dict in enumerate(batch_generator):\r\n # step 1. compute the output\r\n if isinstance(model,NMTModelWithMLTM):\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_mltm_vector'],\r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n else:\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n\r\n acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index)\r\n acc_sum += acc_t\r\n \r\n return acc_sum / (batch_index+1)", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def get_accuracy(model, iter):\n\n model.eval()\n count, num = 0., 0\n with torch.no_grad():\n for i, batch in enumerate(iter):\n data, targets = batch\n out = model(data)\n predicted = out.argmax(dim=1)\n count += (predicted == targets).sum().item()\n num += len(targets)\n\n model.train()\n return count / num", "def evaluate_test(model, history, class_labels, train_X, test_X, train_y, test_y):\n train_loss, train_acc = model.evaluate(train_X, train_y, verbose=0)\n test_loss, test_acc = model.evaluate(test_X, test_y, verbose=0)\n print('Accuracy \\n Train: %.3f, Test: %.3f' % (train_acc, test_acc))\n print('Loss \\n Train: %.3f, Test: %.3f \\n' % (train_loss, test_loss))\n # plot loss during training\n plt.subplots_adjust(hspace = .5, wspace = 0.5)\n plt.subplot(211)\n plt.title('Loss', weight='bold')\n plt.plot(history.history['loss'], label='train')\n plt.plot(history.history['val_loss'], label='val')\n plt.legend()\n # plot accuracy during training\n plt.subplot(212)\n plt.title('Accuracy', weight='bold')\n plt.plot(history.history['acc'], label='train')\n plt.plot(history.history['val_acc'], label='val')\n plt.legend()\n plt.show()\n print('\\n')\n # predict probabilities for test set\n yhat_probs = model.predict(test_X, verbose=0)\n # predict classes for test set\n yhat_classes = model.predict_classes(test_X, verbose=0)\n # reduce to 1d array\n yhat_probs = yhat_probs[:, 0]\n yhat_classes = yhat_classes[:, 0]\n # calculate metrics\n report = metrics.classification_report(test_y, yhat_classes, target_names=class_labels)\n confusion_matrix = metrics.confusion_matrix(test_y, yhat_classes)\n plot_confusion_matrix(confusion_matrix, class_labels)\n print('\\n')\n return report", "def evaluate(self, input_fn, eval_times, checkpoint_path=None):\n if not checkpoint_path:\n checkpoint_path = self._estimator.latest_checkpoint()\n\n if self._params.eval.type == 'customized':\n metrics = self._estimator.evaluate(\n input_fn, steps=eval_times, checkpoint_path=checkpoint_path)\n else:\n if not self._evaluator:\n self.prepare_evaluation()\n if checkpoint_path:\n current_step = int(os.path.basename(checkpoint_path).split('-')[1])\n else:\n current_step = 0\n predictor = self._estimator.predict(\n input_fn=input_fn,\n checkpoint_path=checkpoint_path,\n yield_single_examples=False)\n losses = collections.defaultdict(lambda: 0.0)\n\n counter = 0\n try:\n while eval_times is None or counter < eval_times:\n outputs = six.next(predictor)\n predictions = {}\n groundtruths = {}\n for key, val in outputs.items():\n if key[0:5] == 'pred_':\n predictions[key[5::]] = val\n if key[0:3] == 'gt_':\n groundtruths[key[3::]] = val\n if key[0:5] == 'loss_':\n losses[key[5::]] += np.mean(val)\n self._evaluator.update(\n predictions,\n groundtruths=(None if self._params.eval.use_json_file\n else groundtruths))\n counter = counter + 1\n tf.logging.info(\n f'Finish eval step {counter} out of total {eval_times} steps.')\n except (tf.errors.OutOfRangeError, StopIteration):\n logging.info(\n 'Evaluation reaches the end after running %d times.', counter)\n\n for key, val in outputs.items():\n if key[0:5] == 'loss_':\n losses[key[5::]] /= counter\n metrics = self._evaluator.evaluate()\n\n # Summary writer writes out eval metrics.\n output_dir = os.path.join(self._model_dir,\n 'eval' + self._params.eval.suffix)\n tf.gfile.MakeDirs(output_dir)\n summary_writer = tf.summary.FileWriter(output_dir)\n write_summary(metrics, summary_writer, current_step)\n write_summary(losses, summary_writer, current_step)\n summary_writer.close()\n\n logging.info('Eval result: %s', metrics)\n return metrics", "def eval(self):\n\n # How many questions we get right at precision@1.\n correct = 0\n\n total = self._analogy_questions.shape[0]\n start = 0\n while start < total:\n limit = start + 2500\n sub = self._analogy_questions[start:limit, :]\n idx = self._predict(sub)\n start = limit\n for question in xrange(sub.shape[0]):\n if sub[question, 3] in idx[question]:\n # print(sub[question, 3], idx[question])\n correct += 1\n\n print()\n print(\"Eval %4d/%d accuracy @ top5= %4.1f%%\" % (correct, total,\n correct * 100. / total)\n )", "def eval(self, test_docs, test_labels):\n assert len(test_docs)==len(test_labels)\n preds = [] # predicted labels\n for doc,y_gold in zip(test_docs,test_labels):\n y_pred = self.predict(doc)\n preds.append(y_pred)\n ev = Eval(test_labels, preds)\n return ev.accuracy()", "def evaluate_model(model, ds_valid):\n print(\"-- Evaluate Model:\")\n for features, labels in ds_valid:\n valid_step(model, features, labels)\n logs = \"\\nValid Loss: {}, Valid Accuracy: {}\"\n tf.print(tf.strings.format(logs, (valid_loss.result(), valid_metric.result())))\n valid_loss.reset_states()\n train_metric.reset_states()\n valid_metric.reset_states()", "def evaluate(self, ts_loader=None):\n # start evaluation of the model\n self.tr_model.eval()\n samples, correct = 0, 0\n \n # check if a dataloader was provided for evaluation\n loader = self.ts_loader if not ts_loader else ts_loader\n \n with torch.no_grad():\n for x, y in loader:\n \n x, y = x.to(device), y.to(device)\n \n y_ = self.tr_model(x)\n _, predicted = torch.max(y_.detach(), 1)\n \n samples += y.shape[0]\n correct += (predicted == y).sum().item()\n \n # return evaluation statistics\n return {\"accuracy\" : correct/samples}", "def _compute_final_accuracies(self, meval):\n valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val')\n if self.hparams.eval_test:\n test_accuracy = self.eval_child_model(meval, self.data_loader, 'test')\n else:\n test_accuracy = 0\n tf.logging.info('Test Accuracy: {}'.format(test_accuracy))\n return valid_accuracy, test_accuracy", "def test_eval(model, test_set):\n num_test_batch = len(test_set)\n test_loss = np.zeros((num_test_batch, 1), dtype=float)\n test_acc = np.zeros((num_test_batch, 1), dtype=float)\n for ibatch, batch in enumerate(test_set):\n result = model.test_on_batch({'input':batch[0]}, {'fp1':batch[1], 'fp2':batch[1], 'fp3':batch[1], 'ave':batch[1]})\n test_loss[ibatch] = result[0]\n test_acc[ibatch] = result[-1]\n return np.mean(test_loss), np.mean(test_acc)", "def evaluate(self, y_pred, y_test):\n for i in range(len(y_pred)):\n if y_pred[i] == y_test.iloc[i]:\n self.accuracy += 1\n self.accuracy = (self.accuracy/len(y_pred))", "def do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n data_set):\n # And run one epoch of eval.\n true_count = 0 # Counts the number of correct predictions.\n steps_per_epoch = data_set.num_examples // FLAGS.batch_size\n num_examples = steps_per_epoch * FLAGS.batch_size\n for step in range(steps_per_epoch):\n feed_dict = fill_feed_dict(data_set,\n images_placeholder,\n labels_placeholder)\n true_count += sess.run(eval_correct, feed_dict=feed_dict)\n precision = true_count / num_examples\n print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %\n (num_examples, true_count, precision))", "def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy", "def test(self):\n with torch.no_grad():\n self.model.eval()\n p10_forecast, p10_forecast, p90_forecast, target = None, None, None, None\n\n t = time()\n for step, sample in enumerate(self.test_loader):\n\n # Hide future predictions from input vector, set to 0 (or 1) values where timestep > encoder_steps\n steps = self.cnf.all_params['num_encoder_steps']\n pred_len = sample['outputs'].shape[1]\n x = sample['inputs'].float().to(self.cnf.device)\n x[:, steps:, 0] = 1\n\n # Feed input to the model\n if self.cnf.all_params[\"model\"] == \"transformer\" or self.cnf.all_params[\"model\"] == \"grn_transformer\":\n\n # Auto-regressive prediction\n for i in range(pred_len):\n output = self.model.forward(x)\n x[:, steps + i, 0] = output[:, i, 1]\n output = self.model.forward(x)\n\n elif self.cnf.all_params[\"model\"] == \"tf_transformer\":\n output, _, _ = self.model.forward(x)\n else:\n raise NameError\n\n output = output.squeeze()\n y, y_pred = sample['outputs'].squeeze().float().to(self.cnf.device), output\n\n # Compute loss\n loss, _ = self.loss(y_pred, y)\n smape = symmetric_mean_absolute_percentage_error(output[:, :, 1].detach().cpu().numpy(),\n sample['outputs'][:, :, 0].detach().cpu().numpy())\n\n # De-Normalize to compute metrics\n target = unnormalize_tensor(self.data_formatter, y, sample['identifier'][0][0])\n p10_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 0], sample['identifier'][0][0])\n p50_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 1], sample['identifier'][0][0])\n p90_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 2], sample['identifier'][0][0])\n\n # Compute metrics\n self.test_losses['p10'].append(self.loss.numpy_normalised_quantile_loss(p10_forecast, target, 0.1))\n self.test_losses['p50'].append(self.loss.numpy_normalised_quantile_loss(p50_forecast, target, 0.5))\n self.test_losses['p90'].append(self.loss.numpy_normalised_quantile_loss(p90_forecast, target, 0.9))\n\n self.test_loss.append(loss.item())\n self.test_smape.append(smape)\n\n # Plot serie prediction\n p1, p2, p3, target = np.expand_dims(p10_forecast, axis=-1), np.expand_dims(p50_forecast, axis=-1), \\\n np.expand_dims(p90_forecast, axis=-1), np.expand_dims(target, axis=-1)\n p = np.concatenate((p1, p2, p3), axis=-1)\n plot_temporal_serie(p, target)\n\n # Log stuff\n for k in self.test_losses.keys():\n mean_test_loss = np.mean(self.test_losses[k])\n print(f'\\t● AVG {k} Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n\n # log log log\n mean_test_loss = np.mean(self.test_loss)\n mean_smape = np.mean(self.test_smape)\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n print(f'\\t● AVG SMAPE on TEST-set: {mean_smape:.6f} │ T: {time() - t:.2f} s')", "def evaluate_epoch(\n axes,\n tr_loader,\n val_loader,\n te_loader,\n model,\n criterion,\n epoch,\n stats,\n include_test=False,\n update_plot=True,\n multiclass=False,\n):\n\n def _get_metrics(loader):\n y_true, y_pred = [], []\n # correct, total = 0, 0\n\n running_loss = []\n\n # for X, y in loader:\n for i, batch in enumerate(loader):\n with torch.no_grad():\n print(\"evaluating... batch number\", i)\n # X = torch.Tensor(batch[\"image\"]).to(\"cuda\")\n # y = torch.Tensor(batch[\"depth\"]).to(device=\"cuda\")\n X = torch.Tensor(batch[\"image\"])\n y = torch.Tensor(batch[\"depth\"])\n\n output = model(X)\n predicted = predictions(output.data)\n y_true.append(y)\n y_pred.append(predicted)\n\n # total += y.size(0)\n\n # correct += (predicted == y).sum().item()\n\n # Calculate the net loss of this batch\n loss = criterion(predicted, y)\n # gradient_loss = gradient_criterion(predicted, y, device=\"cuda\")\n # running_loss.append(criterion(output, y).item())\n running_loss.append(loss)\n\n y_true = torch.cat(y_true)\n y_pred = torch.cat(y_pred)\n\n loss = np.mean(running_loss)\n\n acc = 0.9\n auroc = 0.9\n\n return acc, loss, auroc\n\n print(\"evaluating training set...\")\n train_acc, train_loss, train_auc = _get_metrics(tr_loader)\n print(\"evaluating validation set\")\n val_acc, val_loss, val_auc = _get_metrics(val_loader)\n\n stats_at_epoch = [\n val_acc,\n val_loss,\n val_auc,\n train_acc,\n train_loss,\n train_auc,\n ]\n if include_test:\n print(\"evaluating testing set...\")\n stats_at_epoch += list(_get_metrics(te_loader))\n\n stats.append(stats_at_epoch)\n # utils.log_training(epoch, stats)\n # if update_plot:\n # utils.update_training_plot(axes, epoch, stats)", "def run_validation(self):\n stats = {'loss': 0,\n 'accuracy': 0,\n 'time_error': 0,\n 'location_error': np.zeros(3),\n 'total_pts': 0,\n 'scatter': []}\n run_variables = [self.loss, self.conf, self.occ, self.occ_t, self.occ_loc, self.output['time'],\n self.output['location']]\n\n self.session.run(self.val_init)\n while True:\n try:\n (loss, confidences, occurrences, times,\n locations, time_output, location_output) = self.session.run(run_variables)\n n_pts = confidences.shape[0]\n stats['loss'] += loss * n_pts\n stats['accuracy'] += np.sum(np.round(confidences) == occurrences)\n stats['time_error'] += np.sum(np.abs(times-time_output))\n stats['location_error'] += np.sum(np.abs(times - time_output), axis=0)\n stats['scatter'].append(location_output - locations)\n stats['total_pts'] += n_pts\n except tf.errors.OutOfRangeError:\n stats['loss'] /= stats['total_pts']\n stats['accuracy'] /= stats['total_pts']\n stats['time_error'] /= stats['total_pts']\n stats['location_error'] /= stats['total_pts']\n stats['scatter'] = np.concatenate(stats['scatter'], axis=0)\n return stats", "def evaluate_acc(\n model,\n ds\n):\n n = 0\n correct = 0\n for batch_x, batch_y in ds:\n batch_pred = get_model_prediction(model, batch_x)\n correct += tf.math.reduce_sum(\n tf.cast(batch_pred == batch_y, dtype=tf.int32)\n )\n n += batch_y.shape[0]\n return correct / n", "def do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n inputs, targets):\n # And run one epoch of eval.\n true_count = 0 # Counts the number of correct predictions.\n steps_per_epoch = len(inputs) // batch_size\n num_examples = steps_per_epoch * batch_size\n start_index = 0;\n for step in xrange(steps_per_epoch):\n feed_dict = fill_feed_dict(images_placeholder, labels_placeholder, inputs, targets, start_index, batch_size)\n start_index = (start_index + batch_size)\n\n true_count += sess.run(eval_correct, feed_dict=feed_dict)\n precision = float(true_count) / num_examples\n print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %\n (num_examples, true_count, precision))", "def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n total_loss = losses.meters['loss_x'].avg\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n # if full_results:\n return [total_loss,losses.dict_results(),results]\n # return total_loss", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n print('*'*20, '\\n')\n print('precision:', p, 'recall:', r, 'f1:', f1, '\\n')\n print('*'*20)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def run_analyses(y_predict_train, y_train, y_predict, y_test):\n # calculate metrics\n _, training_error = output_error(y_predict_train, y_train)\n (precision, recall, f1, _), testing_error = output_error(y_predict, y_test)\n \n # print out metrics\n print 'Average Precision:', np.average(precision)\n print 'Average Recall:', np.average(recall)\n print 'Average F1:', np.average(f1)\n print 'Training Error:', training_error\n print 'Testing Error:', testing_error", "def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc", "def fit(self, sess, save=True):\n losses = []\n val_accuracies = []\n for epoch in range(self.config.num_epochs):\n start_time = time.time()\n average_loss, epoch_accuracies = self.run_epoch(sess, epoch)\n val_accuracies.extend(epoch_accuracies)\n duration = time.time() - start_time\n print('Epoch {:}: loss = {:.2f} ({:.3f} sec)'.format(epoch, average_loss, duration))\n losses.append(average_loss)\n if save: self.val_accuracies = val_accuracies\n return losses, val_accuracies", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def do_eval(sess,model,valid,batch_size):\n valid_X,valid_y,valid_p=valid\n number_examples=valid_X.shape[0]\n if number_examples>10000:\n number_examples=validation_size\n print(\"do_eval.valid.number_examples:\",number_examples)\n if number_examples>validation_size: valid_X,valid_y,valid_p=valid_X[0:validation_size],valid_y[0:validation_size],valid_p[0:validation_size]\n eval_loss,eval_counter,eval_acc=0.0,0,0.0\n for start,end in zip(range(0,number_examples,batch_size),range(batch_size,number_examples,batch_size)):\n feed_dict = {model.x_mask_lm: valid_X[start:end],model.y_mask_lm: valid_y[start:end],model.p_mask_lm:valid_p[start:end],\n model.dropout_keep_prob: 1.0} # FLAGS.dropout_keep_prob\n curr_eval_loss, logits_lm, accuracy_lm= sess.run([model.loss_val_lm,model.logits_lm,model.accuracy_lm],feed_dict) # logits:[batch_size,label_size]\n eval_loss=eval_loss+curr_eval_loss\n eval_acc=eval_acc+accuracy_lm\n eval_counter=eval_counter+1\n return eval_loss/float(eval_counter+small_value), eval_acc/float(eval_counter+small_value)", "def test(self):\n for data_tier in self.data_tiers:\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.floor(tot*0.2))\n test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])\n trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])\n avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])\n accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)\n accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)\n self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)\n self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def run_evaluation(self, epoch=0, global_step=0, verbose=True):\n\n # step-1, compute predictions on test set\n while True:\n try:\n preds_all = helper_utils.compute_predictions(\n self.session, self.meval, global_step, self.test_files, self.comet_exp\n )\n # If epoch trained without raising the below errors, break from loop.\n break\n except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:\n tf.logging.info('Retryable error caught: {}. Retrying.'.format(e))\n\n # step-2 evaluate on predictions\n results = helper_utils.eval_predictions(\n self.gt_depths, preds_all, global_step, min_depth=self.hparams.min_depth,\n max_depth=self.hparams.max_depth, verbose=verbose, comet_exp=self.comet_exp\n )\n return results, preds_all", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.data.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def evaluate(self, data, labels, batch_size=32, max_seq_len=128):\n test_dataloader = setup_dataloader(data, labels, max_seq_len, batch_size)\n accuracy = 0\n \n for batch in tqdm(test_dataloader, desc=\"Iteration\"):\n with torch.no_grad():\n labels = batch[\"labels\"]\n batch = {k: t.to(self.device) for k, t in batch.items() if k != \"labels\"}\n outputs = self.model(**batch)\n logits = outputs[0]\n accuracy += calculate_accuracy(logits, labels)\n \n batch = {k: t.detach().cpu() for k, t in batch.items()}\n del batch\n torch.cuda.empty_cache()\n\n accuracy = accuracy / len(test_dataloader)\n return accuracy", "def evaluate(model, optimizer, loss_function, loader, device, labels, log_every_n=10):\n\n model.eval()\n\n batch_wise_true_labels = []\n batch_wise_predictions = []\n\n loss_history = []\n running_loss = 0.\n running_loss_history = []\n\n with torch.no_grad(): # Disable gradient computation - required only during training\n for i, batch in tqdm(enumerate(loader)):\n\n logits = model(batch[0].to(device), batch[1]).squeeze()\n loss = loss_function(logits, batch[2].to(device))\n loss_history.append(loss.item())\n\n running_loss += (loss_history[-1] - running_loss) / (i + 1) # Compute rolling average\n\n running_loss_history.append(running_loss)\n\n predictions = torch.sigmoid(logits)\n\n batch_wise_true_labels.append(batch[2].view(-1).tolist())\n batch_wise_predictions.append(predictions.view(-1).tolist())\n\n # flatten the list of predictions using itertools\n all_true_labels = list(chain.from_iterable(batch_wise_true_labels))\n all_predictions = list(chain.from_iterable(batch_wise_predictions))\n all_predictions = [1 if p > 0.5 else 0 for p in all_predictions]\n\n\n print(\"Evaluation Loss: \", running_loss)\n # Now we can generate a classification report\n print(\"Classification report after epoch:\")\n print(f1_score(all_true_labels, all_predictions, average='micro'))\n print(classification_report(all_true_labels, all_predictions, labels=labels))\n\n return loss_history, running_loss_history", "def train_and_evaluate(self) -> None:\n with tf.Session() as self.sess:\n # Initialize computation graph.\n self.create_model()\n\n # Initialize variables.\n tf.global_variables_initializer().run()\n\n # Initialize summary writer.\n self.writer = tf.summary.FileWriter(logdir='conv_vis')\n\n for epoch_no in range(self.nb_epochs):\n # Train model on next batch\n batch_x, batch_y = self.mnist.train.next_batch(self.mb_size)\n results = self.train_on_batch(batch_x, batch_y, global_step=epoch_no)\n\n if epoch_no > 0 and epoch_no % self.lr_decay_time == 0:\n # Test on all samples.\n self.test_on_all()\n # Perform learning rate decay.\n self.learning_rate /= 2\n if epoch_no % 100 == 0:\n self.logger.info(\"Epoch {0}: Loss: {1[0]}, accuracy: {1[1]}\".format(epoch_no, results))\n batch_x_t, batch_y_t = self.mnist.test.next_batch(self.mb_size)\n test_results = self.test_on_batch(batch_x_t, batch_y_t)\n self.logger.info(\"(Test(batch): Loss: {0[0]}, accuracy: {0[1]}\".format(test_results))\n self.test_on_all()\n\n # Save the trained model with all valuable variables.\n saver = tf.train.Saver()\n saver.save(sess=self.sess, save_path='./saved_model', global_step=epoch_no)", "def train(self, sess): \n\n logging.info(\"////////////////////////////\")\n logging.info(\"///// BEGIN TRAINING /////\")\n logging.info(\"////////////////////////////\")\n\n # for TensorBoard\n summaryWriter = tf.summary.FileWriter(\n \"./checkpoints/\", \n sess.graph)\n\n # Initialize iterator\n sess.run(self.train_iter.initializer)\n\n # Print initial model predictions\n emaTrainLoss = self.get_loss(sess, dSet=\"train\")\n emaTrainAccr = self.get_accuracy(sess, dSet=\"train\")\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n logging.info(\"Initial training Loss / Accuracy: %f / %f)\" % (emaTrainLoss, emaTrainAccr))\n logging.info(\"Initial validation Loss / Accuracy: %f / %f)\" % (valLoss, valAccr))\n\n randomRatio = 1.0\n epoch = 0\n best_val_loss = None\n best_val_acc = None\n\n\n ###### Loop over epochs #####\n while (self.FLAGS.Nepochs is 0) or (epoch <= self.FLAGS.Nepochs):\n epoch += 1\n epoch_tic = time.time()\n\n # Evaluate test and validation data\n trnLoss = self.get_loss(sess, dSet=\"train\")\n trnAccr = self.get_accuracy(sess, dSet=\"train\")\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n\n print_info = \"Full Sets\\tTraining %.5f / %.5f \\tValidation %.5f / %.5f\" %\\\n (trnLoss, trnAccr, valLoss, valAccr)\n logging.info(\"\\n\\n///// Begin Epoch {} /////\\n\".format(epoch)\n + print_info)\n\n\n # Initialize iterator\n sess.run(self.train_iter.initializer)\n\n ##### Loop over mini batches #####\n while True:\n\n # Perform training step\n try :\n tstep_tic = time.time()\n curLoss, curAccr, global_step = self.run_train_step(sess, summaryWriter)\n tstep_toc = time.time()\n tstep_time = tstep_toc - tstep_tic\n except tf.errors.OutOfRangeError:\n break\n\n # Update training history parameters\n emaTrainLoss = curLoss*(1-self.FLAGS.train_variable_decay)\\\n + emaTrainLoss*self.FLAGS.train_variable_decay \n emaTrainAccr = curAccr*(1-self.FLAGS.train_variable_decay)\\\n + emaTrainAccr*self.FLAGS.train_variable_decay \n\n ### Evaluate model ###\n if global_step % self.FLAGS.eval_every == 0:\n\n # Save training data measurements\n self.writeSummary(emaTrainLoss, \"train/loss\", summaryWriter, global_step)\n self.writeSummary(emaTrainAccr, \"train/acc\", summaryWriter, global_step)\n self.history[\"step\"].append(global_step)\n self.history[\"trainLoss\"].append(emaTrainLoss)\n self.history[\"trainAccr\"].append(emaTrainAccr)\n\n # Evaluate validation data\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n\n self.writeSummary(valLoss, \"val/loss\", summaryWriter, global_step)\n self.writeSummary(valAccr, \"val/acc\", summaryWriter, global_step)\n self.history[\"validLoss\"].append(valLoss)\n self.history[\"validAccr\"].append(valAccr)\n\n # Logging results\n print_info = \"%i\\tTraining %.5f / %.5f \\tValidation %.5f / %.5f\" %\\\n (global_step, emaTrainLoss, emaTrainAccr, valLoss, valAccr)\n logging.info(print_info)\n\n # plot training progress\n self.plot_results()\n\n\n # Save model\n if global_step % self.FLAGS.save_every == 0:\n logging.info(\"Saving model at iteration {} to {}\".format(\n global_step, self.FLAGS.checkpoint_path))\n self.saver.save(sess, \n self.FLAGS.checkpoint_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.checkpoint_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n\n\n # Evaluate validation data\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccs = self.get_accuracy(sess, dSet=\"val\")\n\n # Save best models\n if (best_val_loss is None) or (valLoss < best_val_loss):\n logging.info(\"Saving best loss model at iteration {} in {}\".format(\n global_step, self.FLAGS.bestModel_loss_ckpt_path))\n best_val_loss = valLoss\n self.bestLossSaver.save(sess, \n self.FLAGS.bestModel_loss_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.bestModel_loss_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n if (best_val_acc is None) or (valAccs > best_val_acc):\n logging.info(\"Saving best accuracy model at iteration {} in {}\".format(\n global_step, self.FLAGS.bestModel_acc_ckpt_path))\n best_val_acc = valAccs\n self.bestAccSaver.save(sess, \n self.FLAGS.bestModel_acc_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.bestModel_acc_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n\n\n loss_train = self.get_loss(sess, dSet=\"train\")\n acc_train = self.get_accuracy(sess, dSet=\"train\")\n\n loss_val = self.get_loss(sess, dSet=\"val\")\n acc_val = self.get_accuracy(sess, dSet=\"val\")\n\n print(loss_train, acc_train)\n if self.FLAGS.verbose:\n print(\"\\n\\n\")\n print(\"###########################\")\n print(\"##### Final Results #####\")\n print(\"###########################\")\n print(\"\\nTraining [ Loss: %f\\t Accuracy: %f]\" \\\n % (loss_train, acc_train))\n print(\"Validation [ Loss: %f\\t Accuracy: %f]\" \\\n % (loss_val, acc_val))\n \n self.hasTrained = True", "def check_accuracy(validation_iterator, model, criterion):\n val_losses = []\n val_accuracies = []\n with torch.no_grad():\n for val_batch_idx, val_batch in enumerate(validation_iterator):\n val_hyp, val_hyp_length = val_batch.hypothesis\n val_prem, val_prem_length = val_batch.premise\n val_target = val_batch.label - 1\n scores = model(val_prem, val_hyp, val_prem_length, val_hyp_length)\n loss = criterion(scores, val_target)\n # return the indices of each prediction\n _, predictions = scores.max(1)\n num_correct = float((predictions == val_target).sum())\n num_sample = float(predictions.size(0))\n val_losses.append(loss.item())\n val_accuracies.append(num_correct / num_sample)\n return val_losses, val_accuracies", "def train_eval(model, train_set):\n num_train_batch = len(train_set)\n train_loss = np.zeros((num_train_batch, 1), dtype=float)\n train_acc = np.zeros((num_train_batch, 1), dtype=float)\n shuffle(train_set)\n for ibatch, batch in enumerate(train_set):\n result = model.train_on_batch({'input':batch[0]}, {'fp1':batch[1], 'fp2':batch[1], 'fp3':batch[1], 'ave':batch[1]})\n train_loss[ibatch] = result[0]\n train_acc[ibatch] = result[-1]\n return np.mean(train_loss), np.mean(train_acc)", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def evaluate_individual(predictions, test_files, models):\n\n print(\"\\nAccuracy for individual models\\n\")\n \n # Fix Location\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_location\"] == prediction[\"predicted_location\"]:\n correct_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = correct_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n if prediction[\"predicted_location\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = total_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Location accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Location accuracy overall is {accuracy * 100} %\")\n \n # Fix type\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_type\"] == prediction[\"predicted_type\"]:\n correct_predictions[FixType[prediction[\"predicted_type\"]].value] = correct_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n if prediction[\"predicted_type\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"predicted_type\"]].value] = total_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Type accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Type accuracy overall is {accuracy * 100} %\")\n \n # We repeat the predictions to evaluate the insert and modify models individually, regardless of the predicted fix type \n\n raw_training_samples = []\n\n if test_files.endswith(\".json\"): # Single JSON file\n with open(test_files) as file:\n logging.info(\"Source ending in .json. Predicting on single JSON file.\")\n raw_training_samples = json.load(file)\n else: # Folder path\n for filename in listdir(test_files):\n with open(test_files + filename) as file:\n raw_training_samples.extend(json.load(file))\n \n correct_predictions_insert = 0\n total_predictions_insert = 0\n correct_predictions_modify = 0\n total_predictions_modify = 0\n insert_tokens = []\n modify_tokens = []\n\n for sample in raw_training_samples:\n # Insert\n if sample[\"metadata\"][\"fix_type\"] == \"insert\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[2])\n token = IOProcessor.postprocess(pred, 2)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_insert = correct_predictions_insert + 1\n else: # Incorrect prediction\n insert_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_insert = total_predictions_insert + 1\n # Modify\n if sample[\"metadata\"][\"fix_type\"] == \"modify\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[3])\n token = IOProcessor.postprocess(pred, 3)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_modify = correct_predictions_modify + 1\n else: # Incorrect prediction\n modify_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_modify = total_predictions_modify + 1\n\n insert_accuracy = correct_predictions_insert / total_predictions_insert\n modify_accuracy = correct_predictions_modify / total_predictions_modify\n print(f\"Fix Token accuracy for insert is {insert_accuracy * 100} %\")\n print(f\"Fix Token accuracy for modify is {modify_accuracy * 100} %\")\n\n # The following code may be used to create a swarm plot of the erroneous predictions for fix locations\n # This does, however, require the installation of the pandas, seaborn, and matplotlib libraries.\n \n # import seaborn as sns\n # import matplotlib.pyplot as plt\n # import pandas as pd\n # location_distance_array = []\n # for prediction in predictions:\n # actual_sample, tokens = IOProcessor.preprocess(prediction[\"correct_data\"][\"wrong_code\"])\n # label = get_token_index(prediction[\"correct_data\"][\"wrong_code\"], tokens, prediction[\"correct_data\"][\"correct_location\"])\n # if prediction[\"predicted_token_location\"] - label == 0:\n # pass\n # else:\n # location_distance_array.append([prediction[\"predicted_token_location\"] - label, prediction[\"correct_data\"][\"correct_type\"]])\n \n # df = pd.DataFrame(data=location_distance_array)\n # sns.set_theme(style=\"whitegrid\")\n # f, ax = plt.subplots(figsize=(6, 4))\n # sns.despine(bottom=True, left=True)\n # sns.swarmplot(y=0, x=1, data=df, palette=\"dark\", size=6)\n # ax.set_xlabel('')\n # ax.set_ylabel('')\n # plt.ylim([-15, 16])\n \n # plt.savefig('line_plot.pdf', bbox_inches='tight', pad_inches=0)", "def do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n data_set):\n # And run one epoch of eval.\n true_count = 0 # Counts the number of correct predictions.\n steps_per_epoch = data_set.num_examples // FLAGS.batch_size\n num_examples = steps_per_epoch * FLAGS.batch_size\n for step in xrange(steps_per_epoch):\n feed_dict = fill_feed_dict(data_set,\n images_placeholder,\n labels_placeholder, phase_pl, False)\n true_count += sess.run(eval_correct, feed_dict=feed_dict)\n precision = float(true_count) / num_examples\n \n \n print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %\n (num_examples, true_count, precision))\n return precision", "def compute_model_score(model,train_X,train_Y,test_X,test_Y, id_run):\n\t##\n\t## IN PROGRESS\n\t##\n\n\t## parameters\n\tbatch_size = 32\n\tepochs = 2\n\tnum_classes = 2\n\n\t## data pre-processing\n\ttrain_X = train_X.astype('float32')\n\ttest_X = test_X.astype('float32')\n\ttrain_X = train_X / 255.\n\ttest_X = test_X / 255.\n\n\t# Change the labels from categorical to one-hot encoding\n\ttrain_Y_one_hot = to_categorical(train_Y)\n\ttest_Y_one_hot = to_categorical(test_Y)\n\n\t## split dtaa\n\ttrain_X,valid_X,train_label,valid_label = train_test_split(train_X, train_Y_one_hot, test_size=0.2, random_state=13)\n\n\t## prepare the model\n\tmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),metrics=['accuracy'])\n\n\tprint model.summary()\n\n\t## train the model\n\tfashion_train = model.fit(train_X, train_label, batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(valid_X, valid_label))\n\n\t# evaluate the model\n\tlog_file = open(\"log/\"+str(id_run)+\".log\", \"w\")\n\ttest_eval = model.evaluate(test_X, test_Y_one_hot, verbose=0)\n\tlog_file.write('Test loss:'+str(test_eval[0]))\n\tlog_file.write('Test accuracy:'+str(test_eval[1]))\n\tlog_file.close()\n\n\t## create a few figures\n\taccuracy = fashion_train.history['acc']\n\tval_accuracy = fashion_train.history['val_acc']\n\tloss = fashion_train.history['loss']\n\tval_loss = fashion_train.history['val_loss']\n\tepochs = range(len(accuracy))\n\tplt.figure()\n\tplt.plot(epochs, accuracy, 'bo', label='Training accuracy')\n\tplt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')\n\tplt.title('Training and validation accuracy')\n\tplt.legend()\n\tplt.savefig(\"log/\"+str(id_run)+\"_acc.png\")\n\tplt.close()\n\tplt.figure()\n\tplt.plot(epochs, loss, 'bo', label='Training loss')\n\tplt.plot(epochs, val_loss, 'b', label='Validation loss')\n\tplt.title('Training and validation loss')\n\tplt.legend()\n\tplt.savefig(\"log/\"+str(id_run)+\"_loss.png\")\n\tplt.close()\n\n\t## return accuracy as a score\n\treturn test_eval[1]", "def model_testing(X_train,y_train):\n\n # for testing amount of layers, each layer has 32 neurons\n # layers = [[32, 32], [32, 32, 32], [32, 32, 32, 32], [32, 32, 32, 32],\\\n # [32, 32, 32, 32, 32], [32, 32, 32, 32, 32, 32]]\n layers = [[8], [16], [32], [64], [128], [256]]\n\n # activation = [\"linear\", \"sigmoid\", \"relu\", \"softmax\"]\n activation = [\"relu\"]\n runs = 1\n for i, act in enumerate(activation):\n val_accs = []\n for layer in layers:\n acc_avg = []\n for run in range(runs):\n model = create_model_testing(layer, act)\n\n # train model on full train set, with 80/20 CV split\n training = model.fit(X_train, y_train, epochs=100, validation_split=0.2, verbose=0)\n val_acc = np.mean(training.history['val_accuracy'])\n print(\"Run \", run, \" - \", act + \" activation - layer \" + str(layer))\n acc_avg.append(val_acc)\n\n # save average accuracy of runs\n val_accs.append(round(np.mean(acc_avg)*100, 2))\n print(\"accuracy: \" + str(np.mean(acc_avg)))\n\n # plot line for each activation method\n plt.plot([1,2,4,8,16,32,64,128,256], val_accs, label=act)\n # plt.plot(val_accs, label=act)\n\n # plotting\n plt.title(\"Accuracy of neural network model with different layers (N=\" +\\\n str(len(layers)) + \")\", fontsize=22)\n plt.xlabel(\"Layers\", fontsize=20)\n # plt.xticks(np.arange(1, len(val_accs) + 1, 1), fontsize=18)\n plt.ylabel(\"Accuracy (%)\", fontsize=20)\n plt.legend()\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/linear-relu-\" + str(runs) + \"runs.png\")\n plt.show()", "def accuracy(self, X_train, X_test):\n loss, accuracy = self.estimator.evaluate(X_test, X_train)\n return accuracy", "def validate(self, inputs, labels):\n # Set the phase to test.\n tf.keras.backend.set_learning_phase(0)\n accuracy = self.sess.run([self.accuracy_eval],\n feed_dict={\n self.inputs_eval: inputs,\n self.labels_eval: labels\n })\n costs = self.sess.run(self.cost_eval,\n feed_dict={\n self.inputs_eval: inputs,\n self.labels_eval: labels\n })\n return accuracy, costs", "def evaluate_accuracy(net, data_iter): #@save\n if isinstance(net, torch.nn.Module):\n net.eval() # Set the model to evaluation mode\n metric = Accumulator(2) # No. of correct predictions, no. of predictions\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), len(y))\n return metric[0] / metric[1]", "def validation_epoch(self):\n self.model.eval()\n\n # Compute for training set\n train_loss, train_acc = compute_loss_and_accuracy(\n self.dataloader_train, self.model, self.loss_criterion\n )\n self.TRAIN_ACC.append(train_acc)\n self.TRAIN_LOSS.append(train_loss)\n\n # Compute for validation set\n validation_loss, validation_acc = compute_loss_and_accuracy(\n self.dataloader_val, self.model, self.loss_criterion\n )\n self.VALIDATION_ACC.append(validation_acc)\n self.VALIDATION_LOSS.append(validation_loss)\n print(\"Current validation loss:\", validation_loss, \" Accuracy:\", validation_acc)\n # Compute for testing set\n test_loss, test_acc = compute_loss_and_accuracy(\n self.dataloader_test, self.model, self.loss_criterion\n )\n self.TEST_ACC.append(test_acc)\n self.TEST_LOSS.append(test_loss)\n\n self.model.train()", "def _evaluate_during_fit(self, test_loader, epoch):", "def evaluate(model, iterations, use_cuda=False):\n\n logger.debug(\"Allocating input and target tensors on GPU : %r\", use_cuda)\n\n # create the instance of data loader\n data_loader = DataLoaderMnist(cuda=use_cuda, seed=1, shuffle=False, train_batch_size=64, test_batch_size=100)\n\n model.eval()\n total = 0\n correct = 0\n current_iterations = 0\n\n with torch.no_grad():\n for inputs, labels in data_loader.test_loader:\n inputs, labels = inputs.to(data_loader.device), labels.to(data_loader.device)\n output = model(inputs)\n current_iterations += 1\n _, predicted = torch.max(output.data, dim=1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if iterations is not None:\n if current_iterations >= iterations:\n break\n\n accuracy = correct / total\n return accuracy", "def accuracy(self):\r\n # Load tarined model using intent id.\r\n clf = joblib.load(filename=self.intention_id+'.pkl')\r\n # Compute accuracy for hole training data and return.\r\n return clf.score(X=self.training_data, y=self.target_data)", "def valid_one_epoch(self):\n prog_bar = tqdm(enumerate(self.valid_data), total=len(self.valid_data))\n self.model.eval()\n all_targets = []\n all_predictions = []\n with torch.no_grad():\n for idx, inputs in prog_bar:\n ids = inputs['inputs'].to(self.device, dtype=torch.long)\n mask = inputs['attention_mask'].to(self.device, dtype=torch.long)\n targets = inputs['targets'].to(self.device, dtype=torch.float)\n\n outputs = self.model(input_ids=ids, attention_mask=mask)\n all_targets.extend(targets.cpu().detach().numpy().tolist())\n all_predictions.extend(outputs.cpu().detach().numpy().tolist())\n\n val_rmse_loss = np.sqrt(mean_squared_error(all_targets, all_predictions))\n print('Validation RMSE: {:.2f}'.format(val_rmse_loss))\n \n return val_rmse_loss", "def evaluate(self):\n self.training = False", "def _running_eval_acc(y_hat: torch.Tensor, y_truth: torch.Tensor, label_mask,\n n_total: defaultdict = None, n_correct: defaultdict = None,\n soft_to_hard_fn: Callable = None,\n soft_to_hard_fn_kwargs: dict = None):\n\n y_hat_preds = torch.argmax(y_hat, dim=2)\n\n if not n_total:\n n_total = defaultdict(int)\n\n if not n_correct:\n n_correct = defaultdict(int)\n\n for batch in range(label_mask.shape[0]):\n mask = label_mask.data[batch]\n preds = y_hat_preds.data[batch]\n labels = y_truth.data[batch]\n\n for i, m in enumerate(mask):\n if m == 1:\n n_total[labels[i].item()] += 1\n n_correct[labels[i].item()] += preds[i].item() == labels[i].item()\n\n acc = 0.\n weight = 1. / len(n_total.keys())\n # 1.0 - n_total[class] / sum(n_total) = weight\n # Apply per class weight, sum to 1.0\n\n for k in n_total.keys():\n val = 0\n if n_total[k] > 0:\n val = float(n_correct[k]) / float(n_total[k])\n acc += val\n\n acc *= 100. * weight\n\n return acc, n_total, n_correct", "def evaluate_ensemble_acc(\n models,\n ds\n):\n n = 0\n correct = 0\n for batch_x, batch_y in ds:\n batch_pred = get_ensemble_model_prediction(\n models,\n batch_x,\n ensemble_method='soft',\n )\n correct += tf.math.reduce_sum(\n tf.cast(batch_pred == batch_y, dtype=tf.int32)\n )\n n += batch_y.shape[0]\n return correct / n", "def evaluate(X, yt, cls, name='data'):\n yp = cls.predict(X)\n acc = metrics.accuracy_score(yt, yp)\n return acc", "def _evaluate(self, train_x, train_y, test_x, test_y, n_targets, name):\n r_temp = {}\n for metric_name in self.metrics:\n r_temp.update({f\"{metric_name}_Model\": name, f\"{metric_name}_Sum\": 0,\n f\"{metric_name}_Min\": 1000000, f\"{metric_name}_Max\": 0})\n\n for i in range(self.repetitions):\n is_nan = True\n while (is_nan):\n model = self.get_model(train_x.shape[1], n_targets)\n model.fit(train_x, train_y, **self.fit_kwargs)\n result = model.predict(test_x)\n is_nan = np.any(np.isnan(result))\n del model\n\n for metric_name in self.metrics:\n metric = self.get_metrics(metric_name)\n value = metric(result, test_y)\n r_temp[f\"{metric_name}_Sum\"] += value\n if r_temp[f\"{metric_name}_Min\"] > value:\n r_temp[f\"{metric_name}_Min\"] = value\n if r_temp[f\"{metric_name}_Max\"] < value:\n r_temp[f\"{metric_name}_Max\"] = value\n keras.backend.clear_session()\n for metric_name in self.metrics:\n r_temp[f\"{metric_name}_Mean\"] = r_temp[f\"{metric_name}_Sum\"] / self.repetitions\n return r_temp", "def accuracy(self):\n total_predictions = self.tp + self.fp + self.tn + self.fn;\n return float(self.tp + self.tn) / total_predictions if total_predictions != 0 else 1", "def evaluate(model, data):\n n_targets = 0\n n_correct_predictions = 0\n\n # Set the model on evaluatio mode.\n model.eval()\n\n # Create progress bar.\n progress_bar = tqdm.tqdm(total=len(data),\n unit='batch',\n desc='[evaluate] batch accuracy: 0.000',\n leave=False)\n\n # Loop through validation batches.\n for inputs, targets in data:\n\n # Send data to GPU if CUDA is enabled.\n if next(model.parameters()).is_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Feed forward.\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n\n # Choose the class with maximum probability.\n _, predictions = torch.max(outputs, 1)\n\n accuracy = (predictions == targets).sum().item() / len(targets)\n progress_bar.update(1)\n progress_bar.set_description(\n '[evaluate] batch accuracy: {accuracy:.3f}'.format(\n accuracy=accuracy))\n\n # Accumulate targets and correct predictions count.\n n_targets += len(targets)\n n_correct_predictions += (predictions == targets).sum().item()\n\n # Close progress bar.\n progress_bar.close()\n\n return n_correct_predictions / n_targets", "def finish_online_evaluation_extended(self, task):\n # -- Get current True-Positive, False-Positive and False-Negative -- #\n self.online_eval_tp = np.sum(self.online_eval_tp, 0)\n self.online_eval_fp = np.sum(self.online_eval_fp, 0)\n self.online_eval_fn = np.sum(self.online_eval_fn, 0)\n\n # -- Calculate the IoU -- #\n global_iou_per_class = [i for i in [i / (i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n\n # -- Calculate the Dice -- #\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n\n # -- Store IoU and Dice values. Ensure it is float64 so its JSON serializable -- #\n # -- Do not use self.all_val_eval_metrics since this is used for plotting and then the -- #\n # -- plots do not build correctly because based on self.save_every more dice values than -- #\n # -- expected (epochs) are in there --> see plot_progress function in network_trainer.py -- #\n iou = np.mean(global_iou_per_class, dtype=\"float64\")\n dice = np.mean(global_dc_per_class, dtype=\"float64\")\n\n # -- Update the log file -- #\n self.print_to_log_file(\"Average global foreground IoU for task {}: {}\".format(task, str(global_iou_per_class)))\n self.print_to_log_file(\"(interpret this as an estimate for the IoU of the different classes. This is not \"\n \"exact.)\")\n self.print_to_log_file(\"Average global foreground Dice for task {}: {}\".format(task, str(global_dc_per_class)))\n self.print_to_log_file(\"(interpret this as an estimate for the Dice of the different classes. This is not \"\n \"exact.)\")\n\n # -- Add the results to self.validation_results based on task and epoch -- #\n if self.validation_results.get('epoch_'+str(self.epoch), None) is None:\n self.validation_results['epoch_'+str(self.epoch)] = { task: {\n 'IoU': iou,\n 'Dice': dice\n }\n }\n else: # Epoch entry does already exist in self.validation_results, so only add the task with the corresponding values\n self.validation_results['epoch_'+str(self.epoch)][task] = { 'IoU': iou,\n 'Dice': dice\n }\n \n # -- Empty the variables for next iteration -- #\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def evaluate_model(model, X_test_input, y_test_input):\r\n pred_class = [model.classes_[i] for i in model.predict_proba(X_test_input).argmax(axis=-1)]\r\n pred_accuracy = np.sum(np.array(y_test_input)==np.array(pred_class))/len(pred_class)\r\n return pred_class, pred_accuracy", "def eval(self):\n target_truth_labels = self.get_target_labels()\n for key in self.id_uncertainty_measures.keys():\n # deep copy needed as we mutate confidence values later on\n decision_fn_value = np.concatenate((copy.deepcopy(self.id_uncertainty_measures[key]),\n copy.deepcopy(self.ood_uncertainty_measures[key])),\n axis=0)\n # negation needed for confidence, as confidence is indicator of label=0 samples\n # i.e for correct classified samples.\n # But we need scores for label=1 samples i.e misclassified samples\n # to be higher, so we negate.\n if key == UncertaintyMeasuresEnum.CONFIDENCE or key == UncertaintyMeasuresEnum.PRECISION:\n decision_fn_value *= -1.0\n\n aupr, auroc = ClassifierPredictionEvaluator.compute_pr_roc_curves(\n decision_fn_value, target_truth_labels, self.result_dir, key._value_)\n\n with open(os.path.join(self.result_dir, 'results.txt'), 'a') as f:\n f.write('AUPR using ' + key._value_ + \": \" +\n str(np.round(aupr * 100.0, 1)) + '\\n')\n f.write('AUROC using ' + key._value_ + \": \" +\n str(np.round(auroc * 100.0, 1)) + '\\n')", "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def evaluate(trainy, yTrue):\n\n #calculates the accuracy using MFS\n counterTrain = Counter(trainy)\n maxCount = max(counterTrain.values())\n for key in list(counterTrain.keys()):\n if counterTrain[key] == maxCount:\n MFS = key\n counterTest = Counter(yTrue)\n print(\"\\nAccuracy using MFS\")\n accuracy = counterTest[MFS] / len(yTrue)\n print(accuracy)\n\n #creates the list of predicted values\n yPred = [MFS] * len(yTrue)\n\n #prints the confusion matrix\n print(\"\\nConfusion Matrix\")\n print(confusion_matrix(yTrue,yPred))\n\n return accuracy", "def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))", "def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break", "def evaluate(self, test_x, test_y):\n score = self._model.evaluate(test_x, test_y, verbose=self._verbose)\n print(\"Test score: \", score[0])\n print(\"Test accuracy: \", score[1])", "def one_shot_test(self, model, support_set_size, number_of_tasks_per_alphabet,\n is_validation):\n\n # Set some variables that depend on dataset\n if is_validation:\n alphabets = self._validation_alphabets\n print('\\nMaking One Shot Task on validation alphabets:')\n else:\n alphabets = self._evaluation_alphabets\n print('\\nMaking One Shot Task on evaluation alphabets:')\n\n mean_global_accuracy = 0\n\n for alphabet in alphabets:\n mean_alphabet_accuracy = 0\n for _ in range(number_of_tasks_per_alphabet):\n images, _ = self.get_one_shot_batch(\n support_set_size, is_validation=is_validation)\n probabilities = model.predict_on_batch(images)\n\n # Added this condition because noticed that sometimes the outputs\n # of the classifier was almost the same in all images, meaning that\n # the argmax would be always by defenition 0.\n if np.argmax(probabilities) == 0 and probabilities.std()>0.01:\n accuracy = 1.0\n else:\n accuracy = 0.0\n\n mean_alphabet_accuracy += accuracy\n mean_global_accuracy += accuracy\n\n mean_alphabet_accuracy /= number_of_tasks_per_alphabet\n\n print(alphabet + ' alphabet' + ', accuracy: ' +\n str(mean_alphabet_accuracy))\n if is_validation:\n self._current_validation_alphabet_index += 1\n else:\n self._current_evaluation_alphabet_index += 1\n\n mean_global_accuracy /= (len(alphabets) *\n number_of_tasks_per_alphabet)\n\n print('\\nMean global accuracy: ' + str(mean_global_accuracy))\n\n # reset counter\n if is_validation:\n self._current_validation_alphabet_index = 0\n else:\n self._current_evaluation_alphabet_index = 0\n\n return mean_global_accuracy", "def test(self, inputs, labels):\n n = inputs.shape[0]\n\n error = 0.0\n for idx in range(n):\n result = self.forward(inputs[idx:idx+1, :])\n error += abs(result - labels[idx:idx+1, :])\n\n error /= n\n accuracy = np.round((1 - error)*100, 3)\n self.accuracy_box.append(accuracy[0][0])\n print('accuracy: %.2f' % accuracy + '%')\n print('')", "def compute_accuracy(self):\n if not self.is_training:\n logits = self.test_logits\n labels = self.data.test_labels\n else:\n logits = self.train_logits\n labels = self.data.labels\n\n predictions = tf.cast(tf.argmax(logits, 1), tf.int32)\n correct = tf.equal(labels, predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def test(self):\n self.eval()\n test_mask = self.data.test_mask\n labels = self.data.y\n output = self.forward(self.data)\n # output = self.output\n loss_test = F.nll_loss(output[test_mask], labels[test_mask])\n acc_test = utils.accuracy(output[test_mask], labels[test_mask])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def model_run(self, model, estimators):\n model.fit(self.X_train, self.y_train)\n y_score = model.predict(self.X_test)\n accu_train = np.sum(model.predict(self.X_train) == self.y_train) / self.y_train.size\n accu_test = np.sum(y_score == self.y_test) / self.y_test.size\n\n self.results.write(\"Model Results\\n\")\n self.results.write(\"Number of Estimators: \" + str(estimators) + \"\\n\")\n self.results.write(\"Accuracy on Train: \" + str(accu_train) + \"\\n\")\n self.results.write(\"Accuracy on Test: \" + str(accu_test) + \"\\n\")\n return model", "def run_epoch(self, sess, epoch_num, validate=True):\n total_loss = 0\n accuracies = []\n for i in range(self.batches_per_epoch):\n batch = self.loader.get_batch()\n if self.config.print_every and i % self.config.print_every == 0:\n if validate:\n val_accuracy = self.eval_validation_accuracy()\n print(\"step {}, validation accuracy {:.3f}\".format(i, val_accuracy))\n accuracies.append((i + epoch_num * self.batches_per_epoch, val_accuracy))\n else:\n if self.include_coverage and self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2], batch[3])\n elif self.include_coverage:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n elif self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n else:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1])\n print(\"step {}, training accuracy {:.3f}\".format(i, train_accuracy))\n \n if self.include_coverage and self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.e: batch[2], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_coverage:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.e: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n else:\n attention, _, loss_val = sess.run([self.attention, self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.y_: batch[1],\n self.keep_prob: 1-self.config.dropout_prob})\n\t\tpdb.set_trace()\n\t\tnp.savetxt(\"a.csv\", attention[0], delimiter=\",\")\n total_loss += loss_val\n\n return total_loss / self.batches_per_epoch, accuracies", "def test(self, inputs, labels):\n n = inputs.shape[0]\n\n error = 0.0\n for idx in range(n):\n result = self.forward(inputs[idx:idx+1, :])\n error += abs(result - labels[idx:idx+1, :])\n\n print(\"error: \", error)\n error /= n\n print('accuracy: %.2f' % ((1 - error)*100) + '%')\n print('')", "def evaluate_model(sess, model, data_set):\n total_cost = 0.0\n total_r_cost = 0.0\n total_kl_cost = 0.0\n for batch in range(data_set.num_batches):\n unused_orig_x, x, s = data_set.get_batch(batch)\n feed = {model.input_data: x, model.sequence_lengths: s}\n (cost, r_cost,\n kl_cost) = sess.run([model.cost, model.r_cost, model.kl_cost], feed)\n total_cost += cost\n total_r_cost += r_cost\n total_kl_cost += kl_cost\n\n total_cost /= (data_set.num_batches)\n total_r_cost /= (data_set.num_batches)\n total_kl_cost /= (data_set.num_batches)\n return (total_cost, total_r_cost, total_kl_cost)", "def evaluate(self, inputs: ByteTensor, targets: IntTensor, unused) -> float:\n assert isinstance(inputs, ByteTensor)\n assert inputs.shape[1] == self.feature_count\n assert isinstance(targets, IntTensor)\n assert targets.shape == (inputs.shape[0], )\n\n errors = 0\n examples = targets.shape[0]\n for i in range(examples):\n input = inputs[i]\n prediction = self.predict(input)\n if prediction[0] != targets[i].long():\n errors += 1\n accuracy = (examples - errors) / examples\n return accuracy", "def evaluate(func, dset_path, model_path):\n dset = load_dataset(dset_path, 'trva', False)\n\n \"\"\"\n average class-based zero-shot accuracy\n \"\"\"\n scores = func(dset['Xte_unseen'], dset['Ste_unseen_gt'], model_path)\n preds = np.argmax(scores, 1)\n preds = dset['Cte_unseen'][preds]\n acc_zsl = compute_acc(dset['Lte_unseen'], preds)\n\n \"\"\"\n average class-based generalized zsl accuracy on seen test classes\n \"\"\"\n scores = func(dset['Xte_seen'], dset['Sall_gt'], model_path)\n preds = np.argmax(scores, 1)\n preds = dset['Call'][preds]\n acc_gzsl_seen = compute_acc(dset['Lte_seen'], preds)\n\n \"\"\"\n average class-based generalized zsl accuracy on unseen test classes\n \"\"\"\n scores = func(dset['Xte_unseen'], dset['Sall_gt'], model_path)\n preds = np.argmax(scores, 1)\n preds = dset['Call'][preds]\n acc_gzsl_unseen = compute_acc(dset['Lte_unseen'], preds)\n\n print 'ZSL accuracy: ', acc_zsl\n print 'Generalized ZSL accuracy on seen classes: ', acc_gzsl_seen\n print 'Generalized ZSL accuracy on unseen classes: ', acc_gzsl_unseen", "def eval_on_dataset(sess, G, iterator, dataset_name=\"validation\") :\n print(\">>> Evaluating model on %s\" % (dataset_name))\n step = 0\n current_epoch = iterator.epoch\n \n # Evaluate against validation before training to get baseline performance! \n step = 0\n cumulative_loss = 0.0\n all_probs = np.array([], dtype=np.float32)\n all_targets = np.array([], dtype=np.float32)\n while current_epoch == iterator.epoch : \n step += 1\n this_x, this_y, this_seqlen, this_mask = iterator.next()\n feed_dict = {G['input_placeholder']: this_x, \n G['target_placeholder']: this_y, \n G['seqlen_placeholder']: this_seqlen, \n G['loss_mask_placeholder']: this_mask}\n loss_value, probs = sess.run([G['loss'], G['output_probs']], feed_dict=feed_dict)\n cumulative_loss += loss_value\n all_probs = np.append(all_probs, probs)\n all_targets = np.append(all_targets, this_y)\n val_loss = cumulative_loss / float(step)\n auroc = roc_auc_score(all_targets, all_probs)\n auprc = average_precision_score(all_targets, all_probs)\n print(\">>> (%s) After epoch %d, loss = %.4f, auroc = %.4f, auprc = %.4f \" % (dataset_name, current_epoch, val_loss, auroc, auprc))\n iterator.epoch = current_epoch", "def update(self, y_true: list[Number], y_pred: list[Number]) -> ForecastingMetric:", "def auto_evaluation(model,x_train,y_train,x_test,y_test):\n\n y_train_prediction=model.predict(x_train)\n y_test_prediction=model.predict(x_test)\n\n plt.scatter(y_train,y_train_prediction,c=\"b\",s=1,alpha=0.5)\n plt.scatter(y_test,y_test_prediction,c=\"r\",s=2,alpha=0.5)\n plt.xlabel(\"actual\")\n plt.ylabel(\"predicted\")\n\n print(\"tr R2: {:.2f}\".format(r2_score(y_train_prediction,y_train)))\n print(\"te R2: {:.2f}\".format(r2_score(y_test_prediction,y_test))) \n \n return y_train_prediction,y_test_prediction", "def get_evaluations(self, pred_Y, Y):\n \n tp, fp, tn, fn = self._get_evaluations(pred_Y, Y)\n\n # calculate F1\n try:\n precision = tp / (tp+fp)\n except ZeroDivisionError:\n precision = tp\n try:\n recall = tp / (tp+fn)\n except ZeroDivisionError:\n recall = tp\n try:\n f1 = 2.0 * ((precision*recall) / (precision+recall))\n except ZeroDivisionError:\n f1 = 0.0\n # calculate accuracy\n accuracy = (tp+tn) / (tp+fp+tn+fn)\n\n return accuracy, f1, precision, recall", "def evaluate(train: pd.DataFrame, test: pd.DataFrame, algorithm):\n\n model = algorithm(train)\n\n test_labels = test['Labels']\n\n predictions = predict_data(test, model)\n\n error = mean_square_error(predictions, test_labels)\n\n acc = accuracy(predictions, test_labels)\n\n return acc, error", "def model_accuracy(model, X, y):\n acc = None\n ### YOUR CODE HERE 1-2 lines\n predictions = model.predict(X)\n acc = np.mean([1 if predict == y[target] else 0 for target, predict in enumerate(predictions)])\n ### END CODE\n return acc", "def eval_model(net, val_iter):\n correct = 0\n total = 0\n cm = conf.ConfusionMatrix([0, 1])\n net.eval()\n with torch.no_grad():\n for batch in val_iter:\n total += batch.correct.size(0)\n prediction = predict_batch(net, batch)\n cm.add_entry(batch.correct.tolist(), prediction.tolist())\n correct += (prediction == batch.correct).sum().item()\n\n return correct/total, cm.get_f1()", "def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n", "def run_evaluation(forecast_probabilities, observed_labels, output_dir_name):\n\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name)\n\n # TODO(thunderhoser): Make binarization threshold an input argument to this\n # method.\n (binarization_threshold, best_csi\n ) = model_eval.find_best_binarization_threshold(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels,\n threshold_arg=model_eval.THRESHOLD_ARG_FOR_UNIQUE_FORECASTS,\n criterion_function=model_eval.get_csi,\n optimization_direction=model_eval.MAX_OPTIMIZATION_DIRECTION,\n unique_forecast_precision=FORECAST_PRECISION_FOR_THRESHOLDS)\n\n print (\n 'Best binarization threshold = {0:.4f} ... corresponding CSI = {1:.4f}'\n ).format(binarization_threshold, best_csi)\n\n print 'Binarizing forecast probabilities...'\n forecast_labels = model_eval.binarize_forecast_probs(\n forecast_probabilities=forecast_probabilities,\n binarization_threshold=binarization_threshold)\n\n print 'Creating contingency table...'\n contingency_table_as_dict = model_eval.get_contingency_table(\n forecast_labels=forecast_labels, observed_labels=observed_labels)\n print '{0:s}\\n'.format(str(contingency_table_as_dict))\n\n print 'Computing performance metrics...'\n pod = model_eval.get_pod(contingency_table_as_dict)\n pofd = model_eval.get_pofd(contingency_table_as_dict)\n success_ratio = model_eval.get_success_ratio(contingency_table_as_dict)\n focn = model_eval.get_focn(contingency_table_as_dict)\n accuracy = model_eval.get_accuracy(contingency_table_as_dict)\n csi = model_eval.get_csi(contingency_table_as_dict)\n frequency_bias = model_eval.get_frequency_bias(contingency_table_as_dict)\n peirce_score = model_eval.get_peirce_score(contingency_table_as_dict)\n heidke_score = model_eval.get_heidke_score(contingency_table_as_dict)\n\n print (\n 'POD = {0:.4f} ... POFD = {1:.4f} ... success ratio = {2:.4f} ... '\n 'FOCN = {3:.4f} ... accuracy = {4:.4f} ... CSI = {5:.4f} ... frequency '\n 'bias = {6:.4f} ... Peirce score = {7:.4f} ... Heidke score = {8:.4f}\\n'\n ).format(pod, pofd, success_ratio, focn, accuracy, csi, frequency_bias,\n peirce_score, heidke_score)\n\n auc, scikit_learn_auc = _create_roc_curve(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n bss_dict = _create_attributes_diagram(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n aupd = _create_performance_diagram(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n evaluation_file_name = '{0:s}/model_evaluation.p'.format(output_dir_name)\n print 'Writing results to: \"{0:s}\"...'.format(evaluation_file_name)\n model_eval.write_results(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels,\n binarization_threshold=binarization_threshold, pod=pod, pofd=pofd,\n success_ratio=success_ratio, focn=focn, accuracy=accuracy, csi=csi,\n frequency_bias=frequency_bias, peirce_score=peirce_score,\n heidke_score=heidke_score, auc=auc, scikit_learn_auc=scikit_learn_auc,\n aupd=aupd, bss_dict=bss_dict, pickle_file_name=evaluation_file_name)" ]
[ "0.7057116", "0.69941413", "0.6860783", "0.6805387", "0.67818636", "0.67300326", "0.67083573", "0.6631807", "0.6592348", "0.6574836", "0.6571399", "0.65261126", "0.65081304", "0.6490171", "0.64706427", "0.64673305", "0.64662796", "0.64637923", "0.64597434", "0.6442338", "0.6421062", "0.6407823", "0.64071673", "0.6396262", "0.63904077", "0.63855964", "0.6380326", "0.63605714", "0.6336778", "0.63359106", "0.6324227", "0.63235265", "0.632225", "0.6318548", "0.6315752", "0.631198", "0.6311702", "0.63011616", "0.6300528", "0.6285659", "0.6284402", "0.62776965", "0.6274365", "0.6273465", "0.62647283", "0.62633175", "0.62598073", "0.62489265", "0.6244657", "0.62412995", "0.6239634", "0.6233116", "0.6232539", "0.6226703", "0.62252927", "0.6224887", "0.62238616", "0.62205374", "0.62094045", "0.6202329", "0.6197706", "0.619441", "0.61938846", "0.6189262", "0.61892354", "0.61885244", "0.6186161", "0.6184982", "0.61776483", "0.6176105", "0.6169964", "0.61676055", "0.61666423", "0.6161537", "0.6160322", "0.61600393", "0.6157245", "0.61478466", "0.6142647", "0.61402154", "0.61375564", "0.61321986", "0.61308014", "0.6129083", "0.61263853", "0.6122506", "0.61220044", "0.61176383", "0.61155564", "0.61136115", "0.611052", "0.61082435", "0.61080265", "0.61043453", "0.6101992", "0.60943824", "0.6092417", "0.6089286", "0.6088268", "0.6086728", "0.6084376" ]
0.0
-1
Evaluates the accuracy of the model for each past task.
def evaluate_nlp(model: ContinualModel, dataset: ContinualDataset, last=False) -> Tuple[list, list]: status = model.net.training model.net.eval() accs, accs_mask_classes = [], [] # todo: change the mask recorder for k, test_loader in enumerate(dataset.test_loaders): if last and k < len(dataset.test_loaders) - 1: continue correct, correct_mask_classes, total = 0.0, 0.0, 0.0 for data in test_loader: xs, ys, x_token_idxs, x_token_masks, y_token_idxs, y_token_masks, y_idxs = data x_token_idxs = x_token_idxs.to(model.device) x_token_masks = x_token_masks.to(model.device) y_token_idxs = y_token_idxs.to(model.device) y_token_masks = y_token_masks.to(model.device) y_idxs = y_idxs.to(model.device) task_id = torch.tensor(k, dtype=torch.int64) task_id = task_id.to(model.device) # todo: change the label recorder if 'class-il' not in model.COMPATIBILITY: outputs = model(x_token_idxs, x_token_masks, task_id) else: outputs = model.forward_nlp(x_token_idxs, x_token_masks, task_id) _, pred = torch.max(outputs.data, 1) correct += torch.sum(pred == y_idxs).item() total += y_idxs.shape[0] if dataset.SETTING == 'class-il': mask_classes(outputs, dataset, k) _, pred = torch.max(outputs.data, 1) correct_mask_classes += torch.sum(pred == y_idxs).item() accs.append(correct / total * 100 if 'class-il' in model.COMPATIBILITY else 0) accs_mask_classes.append(correct_mask_classes / total * 100) model.net.train(status) return accs, accs_mask_classes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())", "def eval_model(self, model):\n evaluation = model.evaluate(x=self.xt_test, y=self.yt_test)\n print(\"loss : \" + str(round(evaluation[0]*100, 2)) + \"%\")\n print(\"accuracy: \" + str(round(evaluation[1]*100, 2)) + \"%\")", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def get_accuracy(model, task, batchmanager, test_set=False):\n\n model.eval()\n count, num = 0., 0\n batchmanager = batchmanager if isinstance(batchmanager, BatchManager) else batchmanager.batchmanagers[task]\n\n iter = batchmanager.test_iter if test_set else batchmanager.dev_iter\n\n with torch.no_grad():\n for batch in iter: \n data, targets = batch\n out = model(data, task)\n predicted = out.argmax(dim=1)\n count += (predicted == targets).sum().item()\n num += len(targets)\n\n model.train()\n return count / num", "def train(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.EXAMPLES_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return acc", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc", "def accuracy(self):\n if not self.run:\n self._run()\n return self.model_acc", "def test(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n \r\n dataset.set_split('test')\r\n batch_generator = generate_nmt_batches(dataset, \r\n batch_size=len(dataset), \r\n device=args.device)\r\n\r\n acc_sum = 0.0\r\n model.eval()\r\n \r\n for batch_index, batch_dict in enumerate(batch_generator):\r\n # step 1. compute the output\r\n if isinstance(model,NMTModelWithMLTM):\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_mltm_vector'],\r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n else:\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n\r\n acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index)\r\n acc_sum += acc_t\r\n \r\n return acc_sum / (batch_index+1)", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def get_accuracy(model, iter):\n\n model.eval()\n count, num = 0., 0\n with torch.no_grad():\n for i, batch in enumerate(iter):\n data, targets = batch\n out = model(data)\n predicted = out.argmax(dim=1)\n count += (predicted == targets).sum().item()\n num += len(targets)\n\n model.train()\n return count / num", "def evaluate_test(model, history, class_labels, train_X, test_X, train_y, test_y):\n train_loss, train_acc = model.evaluate(train_X, train_y, verbose=0)\n test_loss, test_acc = model.evaluate(test_X, test_y, verbose=0)\n print('Accuracy \\n Train: %.3f, Test: %.3f' % (train_acc, test_acc))\n print('Loss \\n Train: %.3f, Test: %.3f \\n' % (train_loss, test_loss))\n # plot loss during training\n plt.subplots_adjust(hspace = .5, wspace = 0.5)\n plt.subplot(211)\n plt.title('Loss', weight='bold')\n plt.plot(history.history['loss'], label='train')\n plt.plot(history.history['val_loss'], label='val')\n plt.legend()\n # plot accuracy during training\n plt.subplot(212)\n plt.title('Accuracy', weight='bold')\n plt.plot(history.history['acc'], label='train')\n plt.plot(history.history['val_acc'], label='val')\n plt.legend()\n plt.show()\n print('\\n')\n # predict probabilities for test set\n yhat_probs = model.predict(test_X, verbose=0)\n # predict classes for test set\n yhat_classes = model.predict_classes(test_X, verbose=0)\n # reduce to 1d array\n yhat_probs = yhat_probs[:, 0]\n yhat_classes = yhat_classes[:, 0]\n # calculate metrics\n report = metrics.classification_report(test_y, yhat_classes, target_names=class_labels)\n confusion_matrix = metrics.confusion_matrix(test_y, yhat_classes)\n plot_confusion_matrix(confusion_matrix, class_labels)\n print('\\n')\n return report", "def evaluate(self, input_fn, eval_times, checkpoint_path=None):\n if not checkpoint_path:\n checkpoint_path = self._estimator.latest_checkpoint()\n\n if self._params.eval.type == 'customized':\n metrics = self._estimator.evaluate(\n input_fn, steps=eval_times, checkpoint_path=checkpoint_path)\n else:\n if not self._evaluator:\n self.prepare_evaluation()\n if checkpoint_path:\n current_step = int(os.path.basename(checkpoint_path).split('-')[1])\n else:\n current_step = 0\n predictor = self._estimator.predict(\n input_fn=input_fn,\n checkpoint_path=checkpoint_path,\n yield_single_examples=False)\n losses = collections.defaultdict(lambda: 0.0)\n\n counter = 0\n try:\n while eval_times is None or counter < eval_times:\n outputs = six.next(predictor)\n predictions = {}\n groundtruths = {}\n for key, val in outputs.items():\n if key[0:5] == 'pred_':\n predictions[key[5::]] = val\n if key[0:3] == 'gt_':\n groundtruths[key[3::]] = val\n if key[0:5] == 'loss_':\n losses[key[5::]] += np.mean(val)\n self._evaluator.update(\n predictions,\n groundtruths=(None if self._params.eval.use_json_file\n else groundtruths))\n counter = counter + 1\n tf.logging.info(\n f'Finish eval step {counter} out of total {eval_times} steps.')\n except (tf.errors.OutOfRangeError, StopIteration):\n logging.info(\n 'Evaluation reaches the end after running %d times.', counter)\n\n for key, val in outputs.items():\n if key[0:5] == 'loss_':\n losses[key[5::]] /= counter\n metrics = self._evaluator.evaluate()\n\n # Summary writer writes out eval metrics.\n output_dir = os.path.join(self._model_dir,\n 'eval' + self._params.eval.suffix)\n tf.gfile.MakeDirs(output_dir)\n summary_writer = tf.summary.FileWriter(output_dir)\n write_summary(metrics, summary_writer, current_step)\n write_summary(losses, summary_writer, current_step)\n summary_writer.close()\n\n logging.info('Eval result: %s', metrics)\n return metrics", "def eval(self):\n\n # How many questions we get right at precision@1.\n correct = 0\n\n total = self._analogy_questions.shape[0]\n start = 0\n while start < total:\n limit = start + 2500\n sub = self._analogy_questions[start:limit, :]\n idx = self._predict(sub)\n start = limit\n for question in xrange(sub.shape[0]):\n if sub[question, 3] in idx[question]:\n # print(sub[question, 3], idx[question])\n correct += 1\n\n print()\n print(\"Eval %4d/%d accuracy @ top5= %4.1f%%\" % (correct, total,\n correct * 100. / total)\n )", "def eval(self, test_docs, test_labels):\n assert len(test_docs)==len(test_labels)\n preds = [] # predicted labels\n for doc,y_gold in zip(test_docs,test_labels):\n y_pred = self.predict(doc)\n preds.append(y_pred)\n ev = Eval(test_labels, preds)\n return ev.accuracy()", "def evaluate_model(model, ds_valid):\n print(\"-- Evaluate Model:\")\n for features, labels in ds_valid:\n valid_step(model, features, labels)\n logs = \"\\nValid Loss: {}, Valid Accuracy: {}\"\n tf.print(tf.strings.format(logs, (valid_loss.result(), valid_metric.result())))\n valid_loss.reset_states()\n train_metric.reset_states()\n valid_metric.reset_states()", "def evaluate(self, ts_loader=None):\n # start evaluation of the model\n self.tr_model.eval()\n samples, correct = 0, 0\n \n # check if a dataloader was provided for evaluation\n loader = self.ts_loader if not ts_loader else ts_loader\n \n with torch.no_grad():\n for x, y in loader:\n \n x, y = x.to(device), y.to(device)\n \n y_ = self.tr_model(x)\n _, predicted = torch.max(y_.detach(), 1)\n \n samples += y.shape[0]\n correct += (predicted == y).sum().item()\n \n # return evaluation statistics\n return {\"accuracy\" : correct/samples}", "def _compute_final_accuracies(self, meval):\n valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val')\n if self.hparams.eval_test:\n test_accuracy = self.eval_child_model(meval, self.data_loader, 'test')\n else:\n test_accuracy = 0\n tf.logging.info('Test Accuracy: {}'.format(test_accuracy))\n return valid_accuracy, test_accuracy", "def test_eval(model, test_set):\n num_test_batch = len(test_set)\n test_loss = np.zeros((num_test_batch, 1), dtype=float)\n test_acc = np.zeros((num_test_batch, 1), dtype=float)\n for ibatch, batch in enumerate(test_set):\n result = model.test_on_batch({'input':batch[0]}, {'fp1':batch[1], 'fp2':batch[1], 'fp3':batch[1], 'ave':batch[1]})\n test_loss[ibatch] = result[0]\n test_acc[ibatch] = result[-1]\n return np.mean(test_loss), np.mean(test_acc)", "def evaluate(self, y_pred, y_test):\n for i in range(len(y_pred)):\n if y_pred[i] == y_test.iloc[i]:\n self.accuracy += 1\n self.accuracy = (self.accuracy/len(y_pred))", "def do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n data_set):\n # And run one epoch of eval.\n true_count = 0 # Counts the number of correct predictions.\n steps_per_epoch = data_set.num_examples // FLAGS.batch_size\n num_examples = steps_per_epoch * FLAGS.batch_size\n for step in range(steps_per_epoch):\n feed_dict = fill_feed_dict(data_set,\n images_placeholder,\n labels_placeholder)\n true_count += sess.run(eval_correct, feed_dict=feed_dict)\n precision = true_count / num_examples\n print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %\n (num_examples, true_count, precision))", "def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy", "def test(self):\n with torch.no_grad():\n self.model.eval()\n p10_forecast, p10_forecast, p90_forecast, target = None, None, None, None\n\n t = time()\n for step, sample in enumerate(self.test_loader):\n\n # Hide future predictions from input vector, set to 0 (or 1) values where timestep > encoder_steps\n steps = self.cnf.all_params['num_encoder_steps']\n pred_len = sample['outputs'].shape[1]\n x = sample['inputs'].float().to(self.cnf.device)\n x[:, steps:, 0] = 1\n\n # Feed input to the model\n if self.cnf.all_params[\"model\"] == \"transformer\" or self.cnf.all_params[\"model\"] == \"grn_transformer\":\n\n # Auto-regressive prediction\n for i in range(pred_len):\n output = self.model.forward(x)\n x[:, steps + i, 0] = output[:, i, 1]\n output = self.model.forward(x)\n\n elif self.cnf.all_params[\"model\"] == \"tf_transformer\":\n output, _, _ = self.model.forward(x)\n else:\n raise NameError\n\n output = output.squeeze()\n y, y_pred = sample['outputs'].squeeze().float().to(self.cnf.device), output\n\n # Compute loss\n loss, _ = self.loss(y_pred, y)\n smape = symmetric_mean_absolute_percentage_error(output[:, :, 1].detach().cpu().numpy(),\n sample['outputs'][:, :, 0].detach().cpu().numpy())\n\n # De-Normalize to compute metrics\n target = unnormalize_tensor(self.data_formatter, y, sample['identifier'][0][0])\n p10_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 0], sample['identifier'][0][0])\n p50_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 1], sample['identifier'][0][0])\n p90_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 2], sample['identifier'][0][0])\n\n # Compute metrics\n self.test_losses['p10'].append(self.loss.numpy_normalised_quantile_loss(p10_forecast, target, 0.1))\n self.test_losses['p50'].append(self.loss.numpy_normalised_quantile_loss(p50_forecast, target, 0.5))\n self.test_losses['p90'].append(self.loss.numpy_normalised_quantile_loss(p90_forecast, target, 0.9))\n\n self.test_loss.append(loss.item())\n self.test_smape.append(smape)\n\n # Plot serie prediction\n p1, p2, p3, target = np.expand_dims(p10_forecast, axis=-1), np.expand_dims(p50_forecast, axis=-1), \\\n np.expand_dims(p90_forecast, axis=-1), np.expand_dims(target, axis=-1)\n p = np.concatenate((p1, p2, p3), axis=-1)\n plot_temporal_serie(p, target)\n\n # Log stuff\n for k in self.test_losses.keys():\n mean_test_loss = np.mean(self.test_losses[k])\n print(f'\\t● AVG {k} Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n\n # log log log\n mean_test_loss = np.mean(self.test_loss)\n mean_smape = np.mean(self.test_smape)\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n print(f'\\t● AVG SMAPE on TEST-set: {mean_smape:.6f} │ T: {time() - t:.2f} s')", "def evaluate_epoch(\n axes,\n tr_loader,\n val_loader,\n te_loader,\n model,\n criterion,\n epoch,\n stats,\n include_test=False,\n update_plot=True,\n multiclass=False,\n):\n\n def _get_metrics(loader):\n y_true, y_pred = [], []\n # correct, total = 0, 0\n\n running_loss = []\n\n # for X, y in loader:\n for i, batch in enumerate(loader):\n with torch.no_grad():\n print(\"evaluating... batch number\", i)\n # X = torch.Tensor(batch[\"image\"]).to(\"cuda\")\n # y = torch.Tensor(batch[\"depth\"]).to(device=\"cuda\")\n X = torch.Tensor(batch[\"image\"])\n y = torch.Tensor(batch[\"depth\"])\n\n output = model(X)\n predicted = predictions(output.data)\n y_true.append(y)\n y_pred.append(predicted)\n\n # total += y.size(0)\n\n # correct += (predicted == y).sum().item()\n\n # Calculate the net loss of this batch\n loss = criterion(predicted, y)\n # gradient_loss = gradient_criterion(predicted, y, device=\"cuda\")\n # running_loss.append(criterion(output, y).item())\n running_loss.append(loss)\n\n y_true = torch.cat(y_true)\n y_pred = torch.cat(y_pred)\n\n loss = np.mean(running_loss)\n\n acc = 0.9\n auroc = 0.9\n\n return acc, loss, auroc\n\n print(\"evaluating training set...\")\n train_acc, train_loss, train_auc = _get_metrics(tr_loader)\n print(\"evaluating validation set\")\n val_acc, val_loss, val_auc = _get_metrics(val_loader)\n\n stats_at_epoch = [\n val_acc,\n val_loss,\n val_auc,\n train_acc,\n train_loss,\n train_auc,\n ]\n if include_test:\n print(\"evaluating testing set...\")\n stats_at_epoch += list(_get_metrics(te_loader))\n\n stats.append(stats_at_epoch)\n # utils.log_training(epoch, stats)\n # if update_plot:\n # utils.update_training_plot(axes, epoch, stats)", "def run_validation(self):\n stats = {'loss': 0,\n 'accuracy': 0,\n 'time_error': 0,\n 'location_error': np.zeros(3),\n 'total_pts': 0,\n 'scatter': []}\n run_variables = [self.loss, self.conf, self.occ, self.occ_t, self.occ_loc, self.output['time'],\n self.output['location']]\n\n self.session.run(self.val_init)\n while True:\n try:\n (loss, confidences, occurrences, times,\n locations, time_output, location_output) = self.session.run(run_variables)\n n_pts = confidences.shape[0]\n stats['loss'] += loss * n_pts\n stats['accuracy'] += np.sum(np.round(confidences) == occurrences)\n stats['time_error'] += np.sum(np.abs(times-time_output))\n stats['location_error'] += np.sum(np.abs(times - time_output), axis=0)\n stats['scatter'].append(location_output - locations)\n stats['total_pts'] += n_pts\n except tf.errors.OutOfRangeError:\n stats['loss'] /= stats['total_pts']\n stats['accuracy'] /= stats['total_pts']\n stats['time_error'] /= stats['total_pts']\n stats['location_error'] /= stats['total_pts']\n stats['scatter'] = np.concatenate(stats['scatter'], axis=0)\n return stats", "def evaluate_acc(\n model,\n ds\n):\n n = 0\n correct = 0\n for batch_x, batch_y in ds:\n batch_pred = get_model_prediction(model, batch_x)\n correct += tf.math.reduce_sum(\n tf.cast(batch_pred == batch_y, dtype=tf.int32)\n )\n n += batch_y.shape[0]\n return correct / n", "def do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n inputs, targets):\n # And run one epoch of eval.\n true_count = 0 # Counts the number of correct predictions.\n steps_per_epoch = len(inputs) // batch_size\n num_examples = steps_per_epoch * batch_size\n start_index = 0;\n for step in xrange(steps_per_epoch):\n feed_dict = fill_feed_dict(images_placeholder, labels_placeholder, inputs, targets, start_index, batch_size)\n start_index = (start_index + batch_size)\n\n true_count += sess.run(eval_correct, feed_dict=feed_dict)\n precision = float(true_count) / num_examples\n print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %\n (num_examples, true_count, precision))", "def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n total_loss = losses.meters['loss_x'].avg\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n # if full_results:\n return [total_loss,losses.dict_results(),results]\n # return total_loss", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n print('*'*20, '\\n')\n print('precision:', p, 'recall:', r, 'f1:', f1, '\\n')\n print('*'*20)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def run_analyses(y_predict_train, y_train, y_predict, y_test):\n # calculate metrics\n _, training_error = output_error(y_predict_train, y_train)\n (precision, recall, f1, _), testing_error = output_error(y_predict, y_test)\n \n # print out metrics\n print 'Average Precision:', np.average(precision)\n print 'Average Recall:', np.average(recall)\n print 'Average F1:', np.average(f1)\n print 'Training Error:', training_error\n print 'Testing Error:', testing_error", "def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc", "def fit(self, sess, save=True):\n losses = []\n val_accuracies = []\n for epoch in range(self.config.num_epochs):\n start_time = time.time()\n average_loss, epoch_accuracies = self.run_epoch(sess, epoch)\n val_accuracies.extend(epoch_accuracies)\n duration = time.time() - start_time\n print('Epoch {:}: loss = {:.2f} ({:.3f} sec)'.format(epoch, average_loss, duration))\n losses.append(average_loss)\n if save: self.val_accuracies = val_accuracies\n return losses, val_accuracies", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def do_eval(sess,model,valid,batch_size):\n valid_X,valid_y,valid_p=valid\n number_examples=valid_X.shape[0]\n if number_examples>10000:\n number_examples=validation_size\n print(\"do_eval.valid.number_examples:\",number_examples)\n if number_examples>validation_size: valid_X,valid_y,valid_p=valid_X[0:validation_size],valid_y[0:validation_size],valid_p[0:validation_size]\n eval_loss,eval_counter,eval_acc=0.0,0,0.0\n for start,end in zip(range(0,number_examples,batch_size),range(batch_size,number_examples,batch_size)):\n feed_dict = {model.x_mask_lm: valid_X[start:end],model.y_mask_lm: valid_y[start:end],model.p_mask_lm:valid_p[start:end],\n model.dropout_keep_prob: 1.0} # FLAGS.dropout_keep_prob\n curr_eval_loss, logits_lm, accuracy_lm= sess.run([model.loss_val_lm,model.logits_lm,model.accuracy_lm],feed_dict) # logits:[batch_size,label_size]\n eval_loss=eval_loss+curr_eval_loss\n eval_acc=eval_acc+accuracy_lm\n eval_counter=eval_counter+1\n return eval_loss/float(eval_counter+small_value), eval_acc/float(eval_counter+small_value)", "def test(self):\n for data_tier in self.data_tiers:\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.floor(tot*0.2))\n test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])\n trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])\n avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])\n accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)\n accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)\n self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)\n self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def run_evaluation(self, epoch=0, global_step=0, verbose=True):\n\n # step-1, compute predictions on test set\n while True:\n try:\n preds_all = helper_utils.compute_predictions(\n self.session, self.meval, global_step, self.test_files, self.comet_exp\n )\n # If epoch trained without raising the below errors, break from loop.\n break\n except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:\n tf.logging.info('Retryable error caught: {}. Retrying.'.format(e))\n\n # step-2 evaluate on predictions\n results = helper_utils.eval_predictions(\n self.gt_depths, preds_all, global_step, min_depth=self.hparams.min_depth,\n max_depth=self.hparams.max_depth, verbose=verbose, comet_exp=self.comet_exp\n )\n return results, preds_all", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.data.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def evaluate(self, data, labels, batch_size=32, max_seq_len=128):\n test_dataloader = setup_dataloader(data, labels, max_seq_len, batch_size)\n accuracy = 0\n \n for batch in tqdm(test_dataloader, desc=\"Iteration\"):\n with torch.no_grad():\n labels = batch[\"labels\"]\n batch = {k: t.to(self.device) for k, t in batch.items() if k != \"labels\"}\n outputs = self.model(**batch)\n logits = outputs[0]\n accuracy += calculate_accuracy(logits, labels)\n \n batch = {k: t.detach().cpu() for k, t in batch.items()}\n del batch\n torch.cuda.empty_cache()\n\n accuracy = accuracy / len(test_dataloader)\n return accuracy", "def evaluate(model, optimizer, loss_function, loader, device, labels, log_every_n=10):\n\n model.eval()\n\n batch_wise_true_labels = []\n batch_wise_predictions = []\n\n loss_history = []\n running_loss = 0.\n running_loss_history = []\n\n with torch.no_grad(): # Disable gradient computation - required only during training\n for i, batch in tqdm(enumerate(loader)):\n\n logits = model(batch[0].to(device), batch[1]).squeeze()\n loss = loss_function(logits, batch[2].to(device))\n loss_history.append(loss.item())\n\n running_loss += (loss_history[-1] - running_loss) / (i + 1) # Compute rolling average\n\n running_loss_history.append(running_loss)\n\n predictions = torch.sigmoid(logits)\n\n batch_wise_true_labels.append(batch[2].view(-1).tolist())\n batch_wise_predictions.append(predictions.view(-1).tolist())\n\n # flatten the list of predictions using itertools\n all_true_labels = list(chain.from_iterable(batch_wise_true_labels))\n all_predictions = list(chain.from_iterable(batch_wise_predictions))\n all_predictions = [1 if p > 0.5 else 0 for p in all_predictions]\n\n\n print(\"Evaluation Loss: \", running_loss)\n # Now we can generate a classification report\n print(\"Classification report after epoch:\")\n print(f1_score(all_true_labels, all_predictions, average='micro'))\n print(classification_report(all_true_labels, all_predictions, labels=labels))\n\n return loss_history, running_loss_history", "def train_and_evaluate(self) -> None:\n with tf.Session() as self.sess:\n # Initialize computation graph.\n self.create_model()\n\n # Initialize variables.\n tf.global_variables_initializer().run()\n\n # Initialize summary writer.\n self.writer = tf.summary.FileWriter(logdir='conv_vis')\n\n for epoch_no in range(self.nb_epochs):\n # Train model on next batch\n batch_x, batch_y = self.mnist.train.next_batch(self.mb_size)\n results = self.train_on_batch(batch_x, batch_y, global_step=epoch_no)\n\n if epoch_no > 0 and epoch_no % self.lr_decay_time == 0:\n # Test on all samples.\n self.test_on_all()\n # Perform learning rate decay.\n self.learning_rate /= 2\n if epoch_no % 100 == 0:\n self.logger.info(\"Epoch {0}: Loss: {1[0]}, accuracy: {1[1]}\".format(epoch_no, results))\n batch_x_t, batch_y_t = self.mnist.test.next_batch(self.mb_size)\n test_results = self.test_on_batch(batch_x_t, batch_y_t)\n self.logger.info(\"(Test(batch): Loss: {0[0]}, accuracy: {0[1]}\".format(test_results))\n self.test_on_all()\n\n # Save the trained model with all valuable variables.\n saver = tf.train.Saver()\n saver.save(sess=self.sess, save_path='./saved_model', global_step=epoch_no)", "def train(self, sess): \n\n logging.info(\"////////////////////////////\")\n logging.info(\"///// BEGIN TRAINING /////\")\n logging.info(\"////////////////////////////\")\n\n # for TensorBoard\n summaryWriter = tf.summary.FileWriter(\n \"./checkpoints/\", \n sess.graph)\n\n # Initialize iterator\n sess.run(self.train_iter.initializer)\n\n # Print initial model predictions\n emaTrainLoss = self.get_loss(sess, dSet=\"train\")\n emaTrainAccr = self.get_accuracy(sess, dSet=\"train\")\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n logging.info(\"Initial training Loss / Accuracy: %f / %f)\" % (emaTrainLoss, emaTrainAccr))\n logging.info(\"Initial validation Loss / Accuracy: %f / %f)\" % (valLoss, valAccr))\n\n randomRatio = 1.0\n epoch = 0\n best_val_loss = None\n best_val_acc = None\n\n\n ###### Loop over epochs #####\n while (self.FLAGS.Nepochs is 0) or (epoch <= self.FLAGS.Nepochs):\n epoch += 1\n epoch_tic = time.time()\n\n # Evaluate test and validation data\n trnLoss = self.get_loss(sess, dSet=\"train\")\n trnAccr = self.get_accuracy(sess, dSet=\"train\")\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n\n print_info = \"Full Sets\\tTraining %.5f / %.5f \\tValidation %.5f / %.5f\" %\\\n (trnLoss, trnAccr, valLoss, valAccr)\n logging.info(\"\\n\\n///// Begin Epoch {} /////\\n\".format(epoch)\n + print_info)\n\n\n # Initialize iterator\n sess.run(self.train_iter.initializer)\n\n ##### Loop over mini batches #####\n while True:\n\n # Perform training step\n try :\n tstep_tic = time.time()\n curLoss, curAccr, global_step = self.run_train_step(sess, summaryWriter)\n tstep_toc = time.time()\n tstep_time = tstep_toc - tstep_tic\n except tf.errors.OutOfRangeError:\n break\n\n # Update training history parameters\n emaTrainLoss = curLoss*(1-self.FLAGS.train_variable_decay)\\\n + emaTrainLoss*self.FLAGS.train_variable_decay \n emaTrainAccr = curAccr*(1-self.FLAGS.train_variable_decay)\\\n + emaTrainAccr*self.FLAGS.train_variable_decay \n\n ### Evaluate model ###\n if global_step % self.FLAGS.eval_every == 0:\n\n # Save training data measurements\n self.writeSummary(emaTrainLoss, \"train/loss\", summaryWriter, global_step)\n self.writeSummary(emaTrainAccr, \"train/acc\", summaryWriter, global_step)\n self.history[\"step\"].append(global_step)\n self.history[\"trainLoss\"].append(emaTrainLoss)\n self.history[\"trainAccr\"].append(emaTrainAccr)\n\n # Evaluate validation data\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccr = self.get_accuracy(sess, dSet=\"val\")\n\n self.writeSummary(valLoss, \"val/loss\", summaryWriter, global_step)\n self.writeSummary(valAccr, \"val/acc\", summaryWriter, global_step)\n self.history[\"validLoss\"].append(valLoss)\n self.history[\"validAccr\"].append(valAccr)\n\n # Logging results\n print_info = \"%i\\tTraining %.5f / %.5f \\tValidation %.5f / %.5f\" %\\\n (global_step, emaTrainLoss, emaTrainAccr, valLoss, valAccr)\n logging.info(print_info)\n\n # plot training progress\n self.plot_results()\n\n\n # Save model\n if global_step % self.FLAGS.save_every == 0:\n logging.info(\"Saving model at iteration {} to {}\".format(\n global_step, self.FLAGS.checkpoint_path))\n self.saver.save(sess, \n self.FLAGS.checkpoint_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.checkpoint_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n\n\n # Evaluate validation data\n valLoss = self.get_loss(sess, dSet=\"val\")\n valAccs = self.get_accuracy(sess, dSet=\"val\")\n\n # Save best models\n if (best_val_loss is None) or (valLoss < best_val_loss):\n logging.info(\"Saving best loss model at iteration {} in {}\".format(\n global_step, self.FLAGS.bestModel_loss_ckpt_path))\n best_val_loss = valLoss\n self.bestLossSaver.save(sess, \n self.FLAGS.bestModel_loss_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.bestModel_loss_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n if (best_val_acc is None) or (valAccs > best_val_acc):\n logging.info(\"Saving best accuracy model at iteration {} in {}\".format(\n global_step, self.FLAGS.bestModel_acc_ckpt_path))\n best_val_acc = valAccs\n self.bestAccSaver.save(sess, \n self.FLAGS.bestModel_acc_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n self.saveTrainingHistory(\n fileName=self.FLAGS.bestModel_acc_ckpt_path + \"/\" + self.FLAGS.experiment_name, \n global_step=global_step)\n\n\n loss_train = self.get_loss(sess, dSet=\"train\")\n acc_train = self.get_accuracy(sess, dSet=\"train\")\n\n loss_val = self.get_loss(sess, dSet=\"val\")\n acc_val = self.get_accuracy(sess, dSet=\"val\")\n\n print(loss_train, acc_train)\n if self.FLAGS.verbose:\n print(\"\\n\\n\")\n print(\"###########################\")\n print(\"##### Final Results #####\")\n print(\"###########################\")\n print(\"\\nTraining [ Loss: %f\\t Accuracy: %f]\" \\\n % (loss_train, acc_train))\n print(\"Validation [ Loss: %f\\t Accuracy: %f]\" \\\n % (loss_val, acc_val))\n \n self.hasTrained = True", "def check_accuracy(validation_iterator, model, criterion):\n val_losses = []\n val_accuracies = []\n with torch.no_grad():\n for val_batch_idx, val_batch in enumerate(validation_iterator):\n val_hyp, val_hyp_length = val_batch.hypothesis\n val_prem, val_prem_length = val_batch.premise\n val_target = val_batch.label - 1\n scores = model(val_prem, val_hyp, val_prem_length, val_hyp_length)\n loss = criterion(scores, val_target)\n # return the indices of each prediction\n _, predictions = scores.max(1)\n num_correct = float((predictions == val_target).sum())\n num_sample = float(predictions.size(0))\n val_losses.append(loss.item())\n val_accuracies.append(num_correct / num_sample)\n return val_losses, val_accuracies", "def train_eval(model, train_set):\n num_train_batch = len(train_set)\n train_loss = np.zeros((num_train_batch, 1), dtype=float)\n train_acc = np.zeros((num_train_batch, 1), dtype=float)\n shuffle(train_set)\n for ibatch, batch in enumerate(train_set):\n result = model.train_on_batch({'input':batch[0]}, {'fp1':batch[1], 'fp2':batch[1], 'fp3':batch[1], 'ave':batch[1]})\n train_loss[ibatch] = result[0]\n train_acc[ibatch] = result[-1]\n return np.mean(train_loss), np.mean(train_acc)", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def evaluate_individual(predictions, test_files, models):\n\n print(\"\\nAccuracy for individual models\\n\")\n \n # Fix Location\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_location\"] == prediction[\"predicted_location\"]:\n correct_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = correct_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n if prediction[\"predicted_location\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = total_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Location accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Location accuracy overall is {accuracy * 100} %\")\n \n # Fix type\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_type\"] == prediction[\"predicted_type\"]:\n correct_predictions[FixType[prediction[\"predicted_type\"]].value] = correct_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n if prediction[\"predicted_type\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"predicted_type\"]].value] = total_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Type accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Type accuracy overall is {accuracy * 100} %\")\n \n # We repeat the predictions to evaluate the insert and modify models individually, regardless of the predicted fix type \n\n raw_training_samples = []\n\n if test_files.endswith(\".json\"): # Single JSON file\n with open(test_files) as file:\n logging.info(\"Source ending in .json. Predicting on single JSON file.\")\n raw_training_samples = json.load(file)\n else: # Folder path\n for filename in listdir(test_files):\n with open(test_files + filename) as file:\n raw_training_samples.extend(json.load(file))\n \n correct_predictions_insert = 0\n total_predictions_insert = 0\n correct_predictions_modify = 0\n total_predictions_modify = 0\n insert_tokens = []\n modify_tokens = []\n\n for sample in raw_training_samples:\n # Insert\n if sample[\"metadata\"][\"fix_type\"] == \"insert\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[2])\n token = IOProcessor.postprocess(pred, 2)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_insert = correct_predictions_insert + 1\n else: # Incorrect prediction\n insert_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_insert = total_predictions_insert + 1\n # Modify\n if sample[\"metadata\"][\"fix_type\"] == \"modify\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[3])\n token = IOProcessor.postprocess(pred, 3)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_modify = correct_predictions_modify + 1\n else: # Incorrect prediction\n modify_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_modify = total_predictions_modify + 1\n\n insert_accuracy = correct_predictions_insert / total_predictions_insert\n modify_accuracy = correct_predictions_modify / total_predictions_modify\n print(f\"Fix Token accuracy for insert is {insert_accuracy * 100} %\")\n print(f\"Fix Token accuracy for modify is {modify_accuracy * 100} %\")\n\n # The following code may be used to create a swarm plot of the erroneous predictions for fix locations\n # This does, however, require the installation of the pandas, seaborn, and matplotlib libraries.\n \n # import seaborn as sns\n # import matplotlib.pyplot as plt\n # import pandas as pd\n # location_distance_array = []\n # for prediction in predictions:\n # actual_sample, tokens = IOProcessor.preprocess(prediction[\"correct_data\"][\"wrong_code\"])\n # label = get_token_index(prediction[\"correct_data\"][\"wrong_code\"], tokens, prediction[\"correct_data\"][\"correct_location\"])\n # if prediction[\"predicted_token_location\"] - label == 0:\n # pass\n # else:\n # location_distance_array.append([prediction[\"predicted_token_location\"] - label, prediction[\"correct_data\"][\"correct_type\"]])\n \n # df = pd.DataFrame(data=location_distance_array)\n # sns.set_theme(style=\"whitegrid\")\n # f, ax = plt.subplots(figsize=(6, 4))\n # sns.despine(bottom=True, left=True)\n # sns.swarmplot(y=0, x=1, data=df, palette=\"dark\", size=6)\n # ax.set_xlabel('')\n # ax.set_ylabel('')\n # plt.ylim([-15, 16])\n \n # plt.savefig('line_plot.pdf', bbox_inches='tight', pad_inches=0)", "def do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n data_set):\n # And run one epoch of eval.\n true_count = 0 # Counts the number of correct predictions.\n steps_per_epoch = data_set.num_examples // FLAGS.batch_size\n num_examples = steps_per_epoch * FLAGS.batch_size\n for step in xrange(steps_per_epoch):\n feed_dict = fill_feed_dict(data_set,\n images_placeholder,\n labels_placeholder, phase_pl, False)\n true_count += sess.run(eval_correct, feed_dict=feed_dict)\n precision = float(true_count) / num_examples\n \n \n print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %\n (num_examples, true_count, precision))\n return precision", "def compute_model_score(model,train_X,train_Y,test_X,test_Y, id_run):\n\t##\n\t## IN PROGRESS\n\t##\n\n\t## parameters\n\tbatch_size = 32\n\tepochs = 2\n\tnum_classes = 2\n\n\t## data pre-processing\n\ttrain_X = train_X.astype('float32')\n\ttest_X = test_X.astype('float32')\n\ttrain_X = train_X / 255.\n\ttest_X = test_X / 255.\n\n\t# Change the labels from categorical to one-hot encoding\n\ttrain_Y_one_hot = to_categorical(train_Y)\n\ttest_Y_one_hot = to_categorical(test_Y)\n\n\t## split dtaa\n\ttrain_X,valid_X,train_label,valid_label = train_test_split(train_X, train_Y_one_hot, test_size=0.2, random_state=13)\n\n\t## prepare the model\n\tmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),metrics=['accuracy'])\n\n\tprint model.summary()\n\n\t## train the model\n\tfashion_train = model.fit(train_X, train_label, batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(valid_X, valid_label))\n\n\t# evaluate the model\n\tlog_file = open(\"log/\"+str(id_run)+\".log\", \"w\")\n\ttest_eval = model.evaluate(test_X, test_Y_one_hot, verbose=0)\n\tlog_file.write('Test loss:'+str(test_eval[0]))\n\tlog_file.write('Test accuracy:'+str(test_eval[1]))\n\tlog_file.close()\n\n\t## create a few figures\n\taccuracy = fashion_train.history['acc']\n\tval_accuracy = fashion_train.history['val_acc']\n\tloss = fashion_train.history['loss']\n\tval_loss = fashion_train.history['val_loss']\n\tepochs = range(len(accuracy))\n\tplt.figure()\n\tplt.plot(epochs, accuracy, 'bo', label='Training accuracy')\n\tplt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')\n\tplt.title('Training and validation accuracy')\n\tplt.legend()\n\tplt.savefig(\"log/\"+str(id_run)+\"_acc.png\")\n\tplt.close()\n\tplt.figure()\n\tplt.plot(epochs, loss, 'bo', label='Training loss')\n\tplt.plot(epochs, val_loss, 'b', label='Validation loss')\n\tplt.title('Training and validation loss')\n\tplt.legend()\n\tplt.savefig(\"log/\"+str(id_run)+\"_loss.png\")\n\tplt.close()\n\n\t## return accuracy as a score\n\treturn test_eval[1]", "def model_testing(X_train,y_train):\n\n # for testing amount of layers, each layer has 32 neurons\n # layers = [[32, 32], [32, 32, 32], [32, 32, 32, 32], [32, 32, 32, 32],\\\n # [32, 32, 32, 32, 32], [32, 32, 32, 32, 32, 32]]\n layers = [[8], [16], [32], [64], [128], [256]]\n\n # activation = [\"linear\", \"sigmoid\", \"relu\", \"softmax\"]\n activation = [\"relu\"]\n runs = 1\n for i, act in enumerate(activation):\n val_accs = []\n for layer in layers:\n acc_avg = []\n for run in range(runs):\n model = create_model_testing(layer, act)\n\n # train model on full train set, with 80/20 CV split\n training = model.fit(X_train, y_train, epochs=100, validation_split=0.2, verbose=0)\n val_acc = np.mean(training.history['val_accuracy'])\n print(\"Run \", run, \" - \", act + \" activation - layer \" + str(layer))\n acc_avg.append(val_acc)\n\n # save average accuracy of runs\n val_accs.append(round(np.mean(acc_avg)*100, 2))\n print(\"accuracy: \" + str(np.mean(acc_avg)))\n\n # plot line for each activation method\n plt.plot([1,2,4,8,16,32,64,128,256], val_accs, label=act)\n # plt.plot(val_accs, label=act)\n\n # plotting\n plt.title(\"Accuracy of neural network model with different layers (N=\" +\\\n str(len(layers)) + \")\", fontsize=22)\n plt.xlabel(\"Layers\", fontsize=20)\n # plt.xticks(np.arange(1, len(val_accs) + 1, 1), fontsize=18)\n plt.ylabel(\"Accuracy (%)\", fontsize=20)\n plt.legend()\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/linear-relu-\" + str(runs) + \"runs.png\")\n plt.show()", "def accuracy(self, X_train, X_test):\n loss, accuracy = self.estimator.evaluate(X_test, X_train)\n return accuracy", "def validate(self, inputs, labels):\n # Set the phase to test.\n tf.keras.backend.set_learning_phase(0)\n accuracy = self.sess.run([self.accuracy_eval],\n feed_dict={\n self.inputs_eval: inputs,\n self.labels_eval: labels\n })\n costs = self.sess.run(self.cost_eval,\n feed_dict={\n self.inputs_eval: inputs,\n self.labels_eval: labels\n })\n return accuracy, costs", "def evaluate_accuracy(net, data_iter): #@save\n if isinstance(net, torch.nn.Module):\n net.eval() # Set the model to evaluation mode\n metric = Accumulator(2) # No. of correct predictions, no. of predictions\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), len(y))\n return metric[0] / metric[1]", "def validation_epoch(self):\n self.model.eval()\n\n # Compute for training set\n train_loss, train_acc = compute_loss_and_accuracy(\n self.dataloader_train, self.model, self.loss_criterion\n )\n self.TRAIN_ACC.append(train_acc)\n self.TRAIN_LOSS.append(train_loss)\n\n # Compute for validation set\n validation_loss, validation_acc = compute_loss_and_accuracy(\n self.dataloader_val, self.model, self.loss_criterion\n )\n self.VALIDATION_ACC.append(validation_acc)\n self.VALIDATION_LOSS.append(validation_loss)\n print(\"Current validation loss:\", validation_loss, \" Accuracy:\", validation_acc)\n # Compute for testing set\n test_loss, test_acc = compute_loss_and_accuracy(\n self.dataloader_test, self.model, self.loss_criterion\n )\n self.TEST_ACC.append(test_acc)\n self.TEST_LOSS.append(test_loss)\n\n self.model.train()", "def _evaluate_during_fit(self, test_loader, epoch):", "def evaluate(model, iterations, use_cuda=False):\n\n logger.debug(\"Allocating input and target tensors on GPU : %r\", use_cuda)\n\n # create the instance of data loader\n data_loader = DataLoaderMnist(cuda=use_cuda, seed=1, shuffle=False, train_batch_size=64, test_batch_size=100)\n\n model.eval()\n total = 0\n correct = 0\n current_iterations = 0\n\n with torch.no_grad():\n for inputs, labels in data_loader.test_loader:\n inputs, labels = inputs.to(data_loader.device), labels.to(data_loader.device)\n output = model(inputs)\n current_iterations += 1\n _, predicted = torch.max(output.data, dim=1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if iterations is not None:\n if current_iterations >= iterations:\n break\n\n accuracy = correct / total\n return accuracy", "def accuracy(self):\r\n # Load tarined model using intent id.\r\n clf = joblib.load(filename=self.intention_id+'.pkl')\r\n # Compute accuracy for hole training data and return.\r\n return clf.score(X=self.training_data, y=self.target_data)", "def valid_one_epoch(self):\n prog_bar = tqdm(enumerate(self.valid_data), total=len(self.valid_data))\n self.model.eval()\n all_targets = []\n all_predictions = []\n with torch.no_grad():\n for idx, inputs in prog_bar:\n ids = inputs['inputs'].to(self.device, dtype=torch.long)\n mask = inputs['attention_mask'].to(self.device, dtype=torch.long)\n targets = inputs['targets'].to(self.device, dtype=torch.float)\n\n outputs = self.model(input_ids=ids, attention_mask=mask)\n all_targets.extend(targets.cpu().detach().numpy().tolist())\n all_predictions.extend(outputs.cpu().detach().numpy().tolist())\n\n val_rmse_loss = np.sqrt(mean_squared_error(all_targets, all_predictions))\n print('Validation RMSE: {:.2f}'.format(val_rmse_loss))\n \n return val_rmse_loss", "def evaluate(self):\n self.training = False", "def _running_eval_acc(y_hat: torch.Tensor, y_truth: torch.Tensor, label_mask,\n n_total: defaultdict = None, n_correct: defaultdict = None,\n soft_to_hard_fn: Callable = None,\n soft_to_hard_fn_kwargs: dict = None):\n\n y_hat_preds = torch.argmax(y_hat, dim=2)\n\n if not n_total:\n n_total = defaultdict(int)\n\n if not n_correct:\n n_correct = defaultdict(int)\n\n for batch in range(label_mask.shape[0]):\n mask = label_mask.data[batch]\n preds = y_hat_preds.data[batch]\n labels = y_truth.data[batch]\n\n for i, m in enumerate(mask):\n if m == 1:\n n_total[labels[i].item()] += 1\n n_correct[labels[i].item()] += preds[i].item() == labels[i].item()\n\n acc = 0.\n weight = 1. / len(n_total.keys())\n # 1.0 - n_total[class] / sum(n_total) = weight\n # Apply per class weight, sum to 1.0\n\n for k in n_total.keys():\n val = 0\n if n_total[k] > 0:\n val = float(n_correct[k]) / float(n_total[k])\n acc += val\n\n acc *= 100. * weight\n\n return acc, n_total, n_correct", "def evaluate_ensemble_acc(\n models,\n ds\n):\n n = 0\n correct = 0\n for batch_x, batch_y in ds:\n batch_pred = get_ensemble_model_prediction(\n models,\n batch_x,\n ensemble_method='soft',\n )\n correct += tf.math.reduce_sum(\n tf.cast(batch_pred == batch_y, dtype=tf.int32)\n )\n n += batch_y.shape[0]\n return correct / n", "def evaluate(X, yt, cls, name='data'):\n yp = cls.predict(X)\n acc = metrics.accuracy_score(yt, yp)\n return acc", "def _evaluate(self, train_x, train_y, test_x, test_y, n_targets, name):\n r_temp = {}\n for metric_name in self.metrics:\n r_temp.update({f\"{metric_name}_Model\": name, f\"{metric_name}_Sum\": 0,\n f\"{metric_name}_Min\": 1000000, f\"{metric_name}_Max\": 0})\n\n for i in range(self.repetitions):\n is_nan = True\n while (is_nan):\n model = self.get_model(train_x.shape[1], n_targets)\n model.fit(train_x, train_y, **self.fit_kwargs)\n result = model.predict(test_x)\n is_nan = np.any(np.isnan(result))\n del model\n\n for metric_name in self.metrics:\n metric = self.get_metrics(metric_name)\n value = metric(result, test_y)\n r_temp[f\"{metric_name}_Sum\"] += value\n if r_temp[f\"{metric_name}_Min\"] > value:\n r_temp[f\"{metric_name}_Min\"] = value\n if r_temp[f\"{metric_name}_Max\"] < value:\n r_temp[f\"{metric_name}_Max\"] = value\n keras.backend.clear_session()\n for metric_name in self.metrics:\n r_temp[f\"{metric_name}_Mean\"] = r_temp[f\"{metric_name}_Sum\"] / self.repetitions\n return r_temp", "def accuracy(self):\n total_predictions = self.tp + self.fp + self.tn + self.fn;\n return float(self.tp + self.tn) / total_predictions if total_predictions != 0 else 1", "def evaluate(model, data):\n n_targets = 0\n n_correct_predictions = 0\n\n # Set the model on evaluatio mode.\n model.eval()\n\n # Create progress bar.\n progress_bar = tqdm.tqdm(total=len(data),\n unit='batch',\n desc='[evaluate] batch accuracy: 0.000',\n leave=False)\n\n # Loop through validation batches.\n for inputs, targets in data:\n\n # Send data to GPU if CUDA is enabled.\n if next(model.parameters()).is_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Feed forward.\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n\n # Choose the class with maximum probability.\n _, predictions = torch.max(outputs, 1)\n\n accuracy = (predictions == targets).sum().item() / len(targets)\n progress_bar.update(1)\n progress_bar.set_description(\n '[evaluate] batch accuracy: {accuracy:.3f}'.format(\n accuracy=accuracy))\n\n # Accumulate targets and correct predictions count.\n n_targets += len(targets)\n n_correct_predictions += (predictions == targets).sum().item()\n\n # Close progress bar.\n progress_bar.close()\n\n return n_correct_predictions / n_targets", "def finish_online_evaluation_extended(self, task):\n # -- Get current True-Positive, False-Positive and False-Negative -- #\n self.online_eval_tp = np.sum(self.online_eval_tp, 0)\n self.online_eval_fp = np.sum(self.online_eval_fp, 0)\n self.online_eval_fn = np.sum(self.online_eval_fn, 0)\n\n # -- Calculate the IoU -- #\n global_iou_per_class = [i for i in [i / (i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n\n # -- Calculate the Dice -- #\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n\n # -- Store IoU and Dice values. Ensure it is float64 so its JSON serializable -- #\n # -- Do not use self.all_val_eval_metrics since this is used for plotting and then the -- #\n # -- plots do not build correctly because based on self.save_every more dice values than -- #\n # -- expected (epochs) are in there --> see plot_progress function in network_trainer.py -- #\n iou = np.mean(global_iou_per_class, dtype=\"float64\")\n dice = np.mean(global_dc_per_class, dtype=\"float64\")\n\n # -- Update the log file -- #\n self.print_to_log_file(\"Average global foreground IoU for task {}: {}\".format(task, str(global_iou_per_class)))\n self.print_to_log_file(\"(interpret this as an estimate for the IoU of the different classes. This is not \"\n \"exact.)\")\n self.print_to_log_file(\"Average global foreground Dice for task {}: {}\".format(task, str(global_dc_per_class)))\n self.print_to_log_file(\"(interpret this as an estimate for the Dice of the different classes. This is not \"\n \"exact.)\")\n\n # -- Add the results to self.validation_results based on task and epoch -- #\n if self.validation_results.get('epoch_'+str(self.epoch), None) is None:\n self.validation_results['epoch_'+str(self.epoch)] = { task: {\n 'IoU': iou,\n 'Dice': dice\n }\n }\n else: # Epoch entry does already exist in self.validation_results, so only add the task with the corresponding values\n self.validation_results['epoch_'+str(self.epoch)][task] = { 'IoU': iou,\n 'Dice': dice\n }\n \n # -- Empty the variables for next iteration -- #\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def evaluate_model(model, X_test_input, y_test_input):\r\n pred_class = [model.classes_[i] for i in model.predict_proba(X_test_input).argmax(axis=-1)]\r\n pred_accuracy = np.sum(np.array(y_test_input)==np.array(pred_class))/len(pred_class)\r\n return pred_class, pred_accuracy", "def eval(self):\n target_truth_labels = self.get_target_labels()\n for key in self.id_uncertainty_measures.keys():\n # deep copy needed as we mutate confidence values later on\n decision_fn_value = np.concatenate((copy.deepcopy(self.id_uncertainty_measures[key]),\n copy.deepcopy(self.ood_uncertainty_measures[key])),\n axis=0)\n # negation needed for confidence, as confidence is indicator of label=0 samples\n # i.e for correct classified samples.\n # But we need scores for label=1 samples i.e misclassified samples\n # to be higher, so we negate.\n if key == UncertaintyMeasuresEnum.CONFIDENCE or key == UncertaintyMeasuresEnum.PRECISION:\n decision_fn_value *= -1.0\n\n aupr, auroc = ClassifierPredictionEvaluator.compute_pr_roc_curves(\n decision_fn_value, target_truth_labels, self.result_dir, key._value_)\n\n with open(os.path.join(self.result_dir, 'results.txt'), 'a') as f:\n f.write('AUPR using ' + key._value_ + \": \" +\n str(np.round(aupr * 100.0, 1)) + '\\n')\n f.write('AUROC using ' + key._value_ + \": \" +\n str(np.round(auroc * 100.0, 1)) + '\\n')", "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def evaluate(trainy, yTrue):\n\n #calculates the accuracy using MFS\n counterTrain = Counter(trainy)\n maxCount = max(counterTrain.values())\n for key in list(counterTrain.keys()):\n if counterTrain[key] == maxCount:\n MFS = key\n counterTest = Counter(yTrue)\n print(\"\\nAccuracy using MFS\")\n accuracy = counterTest[MFS] / len(yTrue)\n print(accuracy)\n\n #creates the list of predicted values\n yPred = [MFS] * len(yTrue)\n\n #prints the confusion matrix\n print(\"\\nConfusion Matrix\")\n print(confusion_matrix(yTrue,yPred))\n\n return accuracy", "def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))", "def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break", "def evaluate(self, test_x, test_y):\n score = self._model.evaluate(test_x, test_y, verbose=self._verbose)\n print(\"Test score: \", score[0])\n print(\"Test accuracy: \", score[1])", "def one_shot_test(self, model, support_set_size, number_of_tasks_per_alphabet,\n is_validation):\n\n # Set some variables that depend on dataset\n if is_validation:\n alphabets = self._validation_alphabets\n print('\\nMaking One Shot Task on validation alphabets:')\n else:\n alphabets = self._evaluation_alphabets\n print('\\nMaking One Shot Task on evaluation alphabets:')\n\n mean_global_accuracy = 0\n\n for alphabet in alphabets:\n mean_alphabet_accuracy = 0\n for _ in range(number_of_tasks_per_alphabet):\n images, _ = self.get_one_shot_batch(\n support_set_size, is_validation=is_validation)\n probabilities = model.predict_on_batch(images)\n\n # Added this condition because noticed that sometimes the outputs\n # of the classifier was almost the same in all images, meaning that\n # the argmax would be always by defenition 0.\n if np.argmax(probabilities) == 0 and probabilities.std()>0.01:\n accuracy = 1.0\n else:\n accuracy = 0.0\n\n mean_alphabet_accuracy += accuracy\n mean_global_accuracy += accuracy\n\n mean_alphabet_accuracy /= number_of_tasks_per_alphabet\n\n print(alphabet + ' alphabet' + ', accuracy: ' +\n str(mean_alphabet_accuracy))\n if is_validation:\n self._current_validation_alphabet_index += 1\n else:\n self._current_evaluation_alphabet_index += 1\n\n mean_global_accuracy /= (len(alphabets) *\n number_of_tasks_per_alphabet)\n\n print('\\nMean global accuracy: ' + str(mean_global_accuracy))\n\n # reset counter\n if is_validation:\n self._current_validation_alphabet_index = 0\n else:\n self._current_evaluation_alphabet_index = 0\n\n return mean_global_accuracy", "def test(self, inputs, labels):\n n = inputs.shape[0]\n\n error = 0.0\n for idx in range(n):\n result = self.forward(inputs[idx:idx+1, :])\n error += abs(result - labels[idx:idx+1, :])\n\n error /= n\n accuracy = np.round((1 - error)*100, 3)\n self.accuracy_box.append(accuracy[0][0])\n print('accuracy: %.2f' % accuracy + '%')\n print('')", "def compute_accuracy(self):\n if not self.is_training:\n logits = self.test_logits\n labels = self.data.test_labels\n else:\n logits = self.train_logits\n labels = self.data.labels\n\n predictions = tf.cast(tf.argmax(logits, 1), tf.int32)\n correct = tf.equal(labels, predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def test(self):\n self.eval()\n test_mask = self.data.test_mask\n labels = self.data.y\n output = self.forward(self.data)\n # output = self.output\n loss_test = F.nll_loss(output[test_mask], labels[test_mask])\n acc_test = utils.accuracy(output[test_mask], labels[test_mask])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def model_run(self, model, estimators):\n model.fit(self.X_train, self.y_train)\n y_score = model.predict(self.X_test)\n accu_train = np.sum(model.predict(self.X_train) == self.y_train) / self.y_train.size\n accu_test = np.sum(y_score == self.y_test) / self.y_test.size\n\n self.results.write(\"Model Results\\n\")\n self.results.write(\"Number of Estimators: \" + str(estimators) + \"\\n\")\n self.results.write(\"Accuracy on Train: \" + str(accu_train) + \"\\n\")\n self.results.write(\"Accuracy on Test: \" + str(accu_test) + \"\\n\")\n return model", "def run_epoch(self, sess, epoch_num, validate=True):\n total_loss = 0\n accuracies = []\n for i in range(self.batches_per_epoch):\n batch = self.loader.get_batch()\n if self.config.print_every and i % self.config.print_every == 0:\n if validate:\n val_accuracy = self.eval_validation_accuracy()\n print(\"step {}, validation accuracy {:.3f}\".format(i, val_accuracy))\n accuracies.append((i + epoch_num * self.batches_per_epoch, val_accuracy))\n else:\n if self.include_coverage and self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2], batch[3])\n elif self.include_coverage:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n elif self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n else:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1])\n print(\"step {}, training accuracy {:.3f}\".format(i, train_accuracy))\n \n if self.include_coverage and self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.e: batch[2], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_coverage:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.e: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n else:\n attention, _, loss_val = sess.run([self.attention, self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.y_: batch[1],\n self.keep_prob: 1-self.config.dropout_prob})\n\t\tpdb.set_trace()\n\t\tnp.savetxt(\"a.csv\", attention[0], delimiter=\",\")\n total_loss += loss_val\n\n return total_loss / self.batches_per_epoch, accuracies", "def test(self, inputs, labels):\n n = inputs.shape[0]\n\n error = 0.0\n for idx in range(n):\n result = self.forward(inputs[idx:idx+1, :])\n error += abs(result - labels[idx:idx+1, :])\n\n print(\"error: \", error)\n error /= n\n print('accuracy: %.2f' % ((1 - error)*100) + '%')\n print('')", "def evaluate_model(sess, model, data_set):\n total_cost = 0.0\n total_r_cost = 0.0\n total_kl_cost = 0.0\n for batch in range(data_set.num_batches):\n unused_orig_x, x, s = data_set.get_batch(batch)\n feed = {model.input_data: x, model.sequence_lengths: s}\n (cost, r_cost,\n kl_cost) = sess.run([model.cost, model.r_cost, model.kl_cost], feed)\n total_cost += cost\n total_r_cost += r_cost\n total_kl_cost += kl_cost\n\n total_cost /= (data_set.num_batches)\n total_r_cost /= (data_set.num_batches)\n total_kl_cost /= (data_set.num_batches)\n return (total_cost, total_r_cost, total_kl_cost)", "def evaluate(self, inputs: ByteTensor, targets: IntTensor, unused) -> float:\n assert isinstance(inputs, ByteTensor)\n assert inputs.shape[1] == self.feature_count\n assert isinstance(targets, IntTensor)\n assert targets.shape == (inputs.shape[0], )\n\n errors = 0\n examples = targets.shape[0]\n for i in range(examples):\n input = inputs[i]\n prediction = self.predict(input)\n if prediction[0] != targets[i].long():\n errors += 1\n accuracy = (examples - errors) / examples\n return accuracy", "def evaluate(func, dset_path, model_path):\n dset = load_dataset(dset_path, 'trva', False)\n\n \"\"\"\n average class-based zero-shot accuracy\n \"\"\"\n scores = func(dset['Xte_unseen'], dset['Ste_unseen_gt'], model_path)\n preds = np.argmax(scores, 1)\n preds = dset['Cte_unseen'][preds]\n acc_zsl = compute_acc(dset['Lte_unseen'], preds)\n\n \"\"\"\n average class-based generalized zsl accuracy on seen test classes\n \"\"\"\n scores = func(dset['Xte_seen'], dset['Sall_gt'], model_path)\n preds = np.argmax(scores, 1)\n preds = dset['Call'][preds]\n acc_gzsl_seen = compute_acc(dset['Lte_seen'], preds)\n\n \"\"\"\n average class-based generalized zsl accuracy on unseen test classes\n \"\"\"\n scores = func(dset['Xte_unseen'], dset['Sall_gt'], model_path)\n preds = np.argmax(scores, 1)\n preds = dset['Call'][preds]\n acc_gzsl_unseen = compute_acc(dset['Lte_unseen'], preds)\n\n print 'ZSL accuracy: ', acc_zsl\n print 'Generalized ZSL accuracy on seen classes: ', acc_gzsl_seen\n print 'Generalized ZSL accuracy on unseen classes: ', acc_gzsl_unseen", "def eval_on_dataset(sess, G, iterator, dataset_name=\"validation\") :\n print(\">>> Evaluating model on %s\" % (dataset_name))\n step = 0\n current_epoch = iterator.epoch\n \n # Evaluate against validation before training to get baseline performance! \n step = 0\n cumulative_loss = 0.0\n all_probs = np.array([], dtype=np.float32)\n all_targets = np.array([], dtype=np.float32)\n while current_epoch == iterator.epoch : \n step += 1\n this_x, this_y, this_seqlen, this_mask = iterator.next()\n feed_dict = {G['input_placeholder']: this_x, \n G['target_placeholder']: this_y, \n G['seqlen_placeholder']: this_seqlen, \n G['loss_mask_placeholder']: this_mask}\n loss_value, probs = sess.run([G['loss'], G['output_probs']], feed_dict=feed_dict)\n cumulative_loss += loss_value\n all_probs = np.append(all_probs, probs)\n all_targets = np.append(all_targets, this_y)\n val_loss = cumulative_loss / float(step)\n auroc = roc_auc_score(all_targets, all_probs)\n auprc = average_precision_score(all_targets, all_probs)\n print(\">>> (%s) After epoch %d, loss = %.4f, auroc = %.4f, auprc = %.4f \" % (dataset_name, current_epoch, val_loss, auroc, auprc))\n iterator.epoch = current_epoch", "def update(self, y_true: list[Number], y_pred: list[Number]) -> ForecastingMetric:", "def auto_evaluation(model,x_train,y_train,x_test,y_test):\n\n y_train_prediction=model.predict(x_train)\n y_test_prediction=model.predict(x_test)\n\n plt.scatter(y_train,y_train_prediction,c=\"b\",s=1,alpha=0.5)\n plt.scatter(y_test,y_test_prediction,c=\"r\",s=2,alpha=0.5)\n plt.xlabel(\"actual\")\n plt.ylabel(\"predicted\")\n\n print(\"tr R2: {:.2f}\".format(r2_score(y_train_prediction,y_train)))\n print(\"te R2: {:.2f}\".format(r2_score(y_test_prediction,y_test))) \n \n return y_train_prediction,y_test_prediction", "def get_evaluations(self, pred_Y, Y):\n \n tp, fp, tn, fn = self._get_evaluations(pred_Y, Y)\n\n # calculate F1\n try:\n precision = tp / (tp+fp)\n except ZeroDivisionError:\n precision = tp\n try:\n recall = tp / (tp+fn)\n except ZeroDivisionError:\n recall = tp\n try:\n f1 = 2.0 * ((precision*recall) / (precision+recall))\n except ZeroDivisionError:\n f1 = 0.0\n # calculate accuracy\n accuracy = (tp+tn) / (tp+fp+tn+fn)\n\n return accuracy, f1, precision, recall", "def evaluate(train: pd.DataFrame, test: pd.DataFrame, algorithm):\n\n model = algorithm(train)\n\n test_labels = test['Labels']\n\n predictions = predict_data(test, model)\n\n error = mean_square_error(predictions, test_labels)\n\n acc = accuracy(predictions, test_labels)\n\n return acc, error", "def model_accuracy(model, X, y):\n acc = None\n ### YOUR CODE HERE 1-2 lines\n predictions = model.predict(X)\n acc = np.mean([1 if predict == y[target] else 0 for target, predict in enumerate(predictions)])\n ### END CODE\n return acc", "def eval_model(net, val_iter):\n correct = 0\n total = 0\n cm = conf.ConfusionMatrix([0, 1])\n net.eval()\n with torch.no_grad():\n for batch in val_iter:\n total += batch.correct.size(0)\n prediction = predict_batch(net, batch)\n cm.add_entry(batch.correct.tolist(), prediction.tolist())\n correct += (prediction == batch.correct).sum().item()\n\n return correct/total, cm.get_f1()", "def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n", "def run_evaluation(forecast_probabilities, observed_labels, output_dir_name):\n\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name)\n\n # TODO(thunderhoser): Make binarization threshold an input argument to this\n # method.\n (binarization_threshold, best_csi\n ) = model_eval.find_best_binarization_threshold(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels,\n threshold_arg=model_eval.THRESHOLD_ARG_FOR_UNIQUE_FORECASTS,\n criterion_function=model_eval.get_csi,\n optimization_direction=model_eval.MAX_OPTIMIZATION_DIRECTION,\n unique_forecast_precision=FORECAST_PRECISION_FOR_THRESHOLDS)\n\n print (\n 'Best binarization threshold = {0:.4f} ... corresponding CSI = {1:.4f}'\n ).format(binarization_threshold, best_csi)\n\n print 'Binarizing forecast probabilities...'\n forecast_labels = model_eval.binarize_forecast_probs(\n forecast_probabilities=forecast_probabilities,\n binarization_threshold=binarization_threshold)\n\n print 'Creating contingency table...'\n contingency_table_as_dict = model_eval.get_contingency_table(\n forecast_labels=forecast_labels, observed_labels=observed_labels)\n print '{0:s}\\n'.format(str(contingency_table_as_dict))\n\n print 'Computing performance metrics...'\n pod = model_eval.get_pod(contingency_table_as_dict)\n pofd = model_eval.get_pofd(contingency_table_as_dict)\n success_ratio = model_eval.get_success_ratio(contingency_table_as_dict)\n focn = model_eval.get_focn(contingency_table_as_dict)\n accuracy = model_eval.get_accuracy(contingency_table_as_dict)\n csi = model_eval.get_csi(contingency_table_as_dict)\n frequency_bias = model_eval.get_frequency_bias(contingency_table_as_dict)\n peirce_score = model_eval.get_peirce_score(contingency_table_as_dict)\n heidke_score = model_eval.get_heidke_score(contingency_table_as_dict)\n\n print (\n 'POD = {0:.4f} ... POFD = {1:.4f} ... success ratio = {2:.4f} ... '\n 'FOCN = {3:.4f} ... accuracy = {4:.4f} ... CSI = {5:.4f} ... frequency '\n 'bias = {6:.4f} ... Peirce score = {7:.4f} ... Heidke score = {8:.4f}\\n'\n ).format(pod, pofd, success_ratio, focn, accuracy, csi, frequency_bias,\n peirce_score, heidke_score)\n\n auc, scikit_learn_auc = _create_roc_curve(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n bss_dict = _create_attributes_diagram(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n aupd = _create_performance_diagram(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n evaluation_file_name = '{0:s}/model_evaluation.p'.format(output_dir_name)\n print 'Writing results to: \"{0:s}\"...'.format(evaluation_file_name)\n model_eval.write_results(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels,\n binarization_threshold=binarization_threshold, pod=pod, pofd=pofd,\n success_ratio=success_ratio, focn=focn, accuracy=accuracy, csi=csi,\n frequency_bias=frequency_bias, peirce_score=peirce_score,\n heidke_score=heidke_score, auc=auc, scikit_learn_auc=scikit_learn_auc,\n aupd=aupd, bss_dict=bss_dict, pickle_file_name=evaluation_file_name)" ]
[ "0.7057116", "0.69941413", "0.6860783", "0.6805387", "0.67818636", "0.67300326", "0.67083573", "0.6631807", "0.6592348", "0.6574836", "0.6571399", "0.65261126", "0.65081304", "0.6490171", "0.64706427", "0.64673305", "0.64662796", "0.64637923", "0.64597434", "0.6442338", "0.6421062", "0.6407823", "0.64071673", "0.6396262", "0.63904077", "0.63855964", "0.6380326", "0.63605714", "0.6336778", "0.63359106", "0.6324227", "0.63235265", "0.632225", "0.6318548", "0.6315752", "0.631198", "0.6311702", "0.63011616", "0.6300528", "0.6285659", "0.6284402", "0.62776965", "0.6274365", "0.6273465", "0.62647283", "0.62633175", "0.62598073", "0.62489265", "0.6244657", "0.62412995", "0.6239634", "0.6233116", "0.6232539", "0.6226703", "0.62252927", "0.6224887", "0.62238616", "0.62205374", "0.62094045", "0.6202329", "0.6197706", "0.619441", "0.61938846", "0.6189262", "0.61892354", "0.61885244", "0.6186161", "0.6184982", "0.61776483", "0.6176105", "0.6169964", "0.61676055", "0.61666423", "0.6161537", "0.6160322", "0.61600393", "0.6157245", "0.61478466", "0.6142647", "0.61402154", "0.61375564", "0.61321986", "0.61308014", "0.6129083", "0.61263853", "0.6122506", "0.61220044", "0.61176383", "0.61155564", "0.61136115", "0.611052", "0.61082435", "0.61080265", "0.61043453", "0.6101992", "0.60943824", "0.6092417", "0.6089286", "0.6088268", "0.6086728", "0.6084376" ]
0.0
-1
The training process, including evaluations and loggers.
def train(model: ContinualModel, dataset: ContinualDataset, args: Namespace) -> None: model.net.to(model.device) results, results_mask_classes = [], [] model_stash = create_stash(model, args, dataset) if args.csv_log: csv_logger = CsvLogger(dataset.SETTING, dataset.NAME, model.NAME) if args.tensorboard: tb_logger = TensorboardLogger(args, dataset.SETTING, model_stash) model_stash['tensorboard_name'] = tb_logger.get_name() dataset_copy = get_dataset(args) for t in range(dataset.N_TASKS): model.net.train() _, _ = dataset_copy.get_data_loaders() if model.NAME != 'icarl' and model.NAME != 'pnn': random_results_class, random_results_task = evaluate(model, dataset_copy) print(file=sys.stderr) for t in range(dataset.N_TASKS): model.net.train() train_loader, test_loader = dataset.get_data_loaders() if hasattr(model, 'begin_task'): model.begin_task(dataset) if t: accs = evaluate(model, dataset, last=True) results[t - 1] = results[t - 1] + accs[0] if dataset.SETTING == 'class-il': results_mask_classes[t - 1] = results_mask_classes[t - 1] + accs[1] for epoch in range(args.n_epochs): for i, data in enumerate(train_loader): if hasattr(dataset.train_loader.dataset, 'logits'): inputs, labels, not_aug_inputs, logits = data inputs = inputs.to(model.device) labels = labels.to(model.device) not_aug_inputs = not_aug_inputs.to(model.device) logits = logits.to(model.device) loss = model.observe(inputs, labels, not_aug_inputs, logits) else: inputs, labels, not_aug_inputs = data inputs, labels = inputs.to(model.device), labels.to( model.device) not_aug_inputs = not_aug_inputs.to(model.device) loss = model.observe(inputs, labels, not_aug_inputs) progress_bar(i, len(train_loader), epoch, t, loss) if args.tensorboard: tb_logger.log_loss(loss, args, epoch, t, i) model_stash['batch_idx'] = i + 1 model_stash['epoch_idx'] = epoch + 1 model_stash['batch_idx'] = 0 model_stash['task_idx'] = t + 1 model_stash['epoch_idx'] = 0 if hasattr(model, 'end_task'): model.end_task(dataset) accs = evaluate(model, dataset) results.append(accs[0]) results_mask_classes.append(accs[1]) mean_acc = np.mean(accs, axis=1) print_mean_accuracy(mean_acc, t + 1, dataset.SETTING) model_stash['mean_accs'].append(mean_acc) if args.csv_log: csv_logger.log(mean_acc) if args.tensorboard: tb_logger.log_accuracy(np.array(accs), mean_acc, args, t) if args.csv_log: csv_logger.add_bwt(results, results_mask_classes) csv_logger.add_forgetting(results, results_mask_classes) if model.NAME != 'icarl' and model.NAME != 'pnn': csv_logger.add_fwt(results, random_results_class, results_mask_classes, random_results_task) if args.tensorboard: tb_logger.close() if args.csv_log: csv_logger.write(vars(args))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def _train(self):\n training_environment = self._training_environment\n evaluation_environment = self._evaluation_environment\n policy = self._policy\n pool = self._pool\n\n if not self._training_started:\n self._init_training()\n\n self._initial_exploration_hook(\n training_environment, self._initial_exploration_policy, pool)\n\n self.sampler.initialize(training_environment, policy, pool)\n\n gt.reset_root()\n gt.rename_root('RLAlgorithm')\n gt.set_def_unique(False)\n\n self._training_before_hook()\n\n for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):\n self._epoch_before_hook()\n gt.stamp('epoch_before_hook')\n\n start_samples = self.sampler._total_samples\n for i in count():\n samples_now = self.sampler._total_samples\n self._timestep = samples_now - start_samples\n\n if (samples_now >= start_samples + self._epoch_length\n and self.ready_to_train):\n break\n\n self._timestep_before_hook()\n gt.stamp('timestep_before_hook')\n\n self._do_sampling(timestep=self._total_timestep)\n gt.stamp('sample')\n\n if self.ready_to_train:\n self._do_training_repeats(timestep=self._total_timestep)\n gt.stamp('train')\n\n self._timestep_after_hook()\n gt.stamp('timestep_after_hook')\n\n training_paths = self.sampler.get_last_n_paths(math.ceil(self._epoch_length / self.sampler._max_path_length))\n gt.stamp('training_paths')\n evaluation_paths = self._evaluation_paths(policy, evaluation_environment)\n gt.stamp('evaluation_paths')\n\n training_metrics = self._evaluate_rollouts(training_paths, training_environment)\n gt.stamp('training_metrics')\n if evaluation_paths:\n evaluation_metrics = self._evaluate_rollouts(\n evaluation_paths, evaluation_environment)\n gt.stamp('evaluation_metrics')\n else:\n evaluation_metrics = {}\n\n self._epoch_after_hook(training_paths)\n gt.stamp('epoch_after_hook')\n\n sampler_diagnostics = self.sampler.get_diagnostics()\n\n diagnostics = self.get_diagnostics(\n iteration=self._total_timestep,\n batch=self._evaluation_batch(),\n training_paths=training_paths,\n evaluation_paths=evaluation_paths)\n\n time_diagnostics = gt.get_times().stamps.itrs\n\n diagnostics.update(OrderedDict((\n *(\n (f'evaluation/{key}', evaluation_metrics[key])\n for key in sorted(evaluation_metrics.keys())\n ),\n *(\n (f'training/{key}', training_metrics[key])\n for key in sorted(training_metrics.keys())\n ),\n *(\n (f'times/{key}', time_diagnostics[key][-1])\n for key in sorted(time_diagnostics.keys())\n ),\n *(\n (f'sampler/{key}', sampler_diagnostics[key])\n for key in sorted(sampler_diagnostics.keys())\n ),\n ('epoch', self._epoch),\n ('timestep', self._timestep),\n ('timesteps_total', self._total_timestep),\n ('train-steps', self._num_train_steps),\n )))\n\n if self._eval_render_kwargs and hasattr(\n evaluation_environment, 'render_rollouts'):\n # TODO(hartikainen): Make this consistent such that there's no\n # need for the hasattr check.\n training_environment.render_rollouts(evaluation_paths)\n\n yield diagnostics\n\n self.sampler.terminate()\n\n self._training_after_hook()\n\n yield {'done': True, **diagnostics}", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def start_training(self):\n self.training = True", "def train(self):\n return", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def train(self):\n self.training = True", "def training(self):\n self.training = True", "def train_step(self):\n pass", "def train(self, training_data):\n pass", "def train():\n pass", "def start_training(self, logdir: str, **info):\n pass", "def start_training(self):\n if self.task_env is None:\n rospy.logfatal(\"No task environment found for training.\")\n if self.agent is None:\n rospy.logfatal(\"No agent found for training.\")\n self.agent.start_training()", "def train(self):\n raise NotImplementedError", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def run_step(self):\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss for loss in loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n \n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def start_training(self):\n self.training()\n \n images, true_labels, pred_labels, pred_probs = self.evaluate_model(proba=True)\n \n metrics = Metrics(images, true_labels, pred_labels, pred_probs, self.classes)\n\n cm = metrics.get_confusion_matrix()\n print('The confusion matrix is:\\n', cm)\n print('*'*100)\n \n cr = metrics.get_classification_report()\n print('The classification report is:\\n', cr)\n print('*'*100)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train(self):\n p = self._params\n if self.train_data != None:\n tens_to_log = self.params.tens_to_log\n logging_hook = tf.train.LoggingTensorHook(tensors = tens_to_log,\n every_n_iter = p.logging_step,\n )\n t_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.train_data[\"x\"]},\n y = self.train_data[\"y\"],\n batch_size = p.batch_size,\n num_epochs = None,\n shuffle = True,\n )\n self._model.train(input_fn = t_fn,\n steps = self.params.training_steps,\n hooks = [logging_hook],\n )\n \n if self.eval_data != None:\n e_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.eval_data[\"x\"]},\n y = self.eval_data[\"y\"],\n num_epochs = 1,\n shuffle = False,\n )\n eval_results = self.model.evaluate(input_fn = e_fn,\n checkpoint_path = self.model_dir,\n )\n print(eval_results)", "def train(self, ):\n raise NotImplementedError", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None", "def train(self):\n\t\traise NotImplementedError", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def train(self):\n raise NotImplementedError()", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def on_train_begin(self, logs=None):", "def on_train_begin(self, logs=None):", "def start_training(self):\n i = 0\n for _ in range(self.train_steps):\n print(f\"Start Training Step {i + 1}\")\n self.model.learn(total_timesteps=self.total_time_steps)\n self.model.save(self.save_path)\n print(f\"Finished Training Step {i + 1}\")\n i += 1", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def on_train_begin(self, logs):\n self.train_start = timeit.default_timer()\n self.metrics_names = self.model.metrics_names\n print('Training for {} steps ...'.format(self.params['nb_steps']))", "def train(self, training_steps=10):", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def on_train_begin(self, logs=None):\n self.start_time = datetime.datetime.now()\n print(f\"Starting training at {self.start_time}\")", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def on_train_begin(self, state: FuseManagerState) -> None:\n # File writer imports are done here in order to workaround the GPU issues -\n # when importing torch.tensorboard cuda gets occupied - do that only AFTER CUDA_VISIBLE_DEVICES is set\n try:\n # available only from torch 1.2\n from torch.utils.tensorboard import SummaryWriter\n self.writer_class = SummaryWriter\n self.use_summary_tf = False\n except ModuleNotFoundError:\n # fallback, use tensorflow file writer\n from tensorflow.summary import FileWriter\n import tensorflow as tf\n self.writer_class = FileWriter\n self.tf_summary = tf.Summary\n self.use_summary_tf = True\n\n tensorboard_train_dir = os.path.join(self.model_dir, 'train')\n tensorboard_validation_dir = os.path.join(self.model_dir, 'validation')\n\n # make sure we have these folders\n file.create_dir(tensorboard_train_dir, error_if_exist=False)\n file.create_dir(tensorboard_validation_dir, error_if_exist=False)\n\n # Get TensorBoard loggers\n self.tensorboard_logger_train = self.writer_class(tensorboard_train_dir)\n self.tensorboard_logger_validation = self.writer_class(tensorboard_validation_dir)\n pass", "def train(self, training_data, cfg, **kwargs):\n pass", "def on_train_begin(self, logs=None):\n pass", "def run(self):\n # This should do nothing if the user has already configured\n # logging, and will it least enable error messages otherwise.\n logging.basicConfig()\n\n # If this is resumption from a checkpoint, it is crucial to\n # reset `profile.current`. Otherwise, it simply does not hurt.\n self.profile.current = []\n\n # Sanity check for the most common case\n if (self._model and isinstance(self._model, Model) and\n isinstance(self.algorithm, GradientDescent)):\n if not (set(self._model.get_parameter_dict().values()) ==\n set(self.algorithm.parameters)):\n logger.warning(\"different parameters for model and algorithm\")\n\n with change_recursion_limit(config.recursion_limit):\n self.original_sigint_handler = signal.signal(\n signal.SIGINT, self._handle_epoch_interrupt)\n self.original_sigterm_handler = signal.signal(\n signal.SIGTERM, self._handle_batch_interrupt)\n try:\n logger.info(\"Entered the main loop\")\n if not self.status['training_started']:\n for extension in self.extensions:\n extension.main_loop = self\n self._run_extensions('before_training')\n with Timer('initialization', self.profile):\n self.algorithm.initialize()\n self.status['training_started'] = True\n # We can not write \"else:\" here because extensions\n # called \"before_training\" could have changed the status\n # of the main loop.\n if self.log.status['iterations_done'] > 0:\n self.log.resume()\n self._run_extensions('on_resumption')\n self.status['epoch_interrupt_received'] = False\n self.status['batch_interrupt_received'] = False\n with Timer('training', self.profile):\n while self._run_epoch():\n pass\n except TrainingFinish:\n self.log.current_row['training_finished'] = True\n except Exception as e:\n self._restore_signal_handlers()\n self.log.current_row['got_exception'] = traceback.format_exc()\n logger.error(\"Error occured during training.\" + error_message)\n try:\n self._run_extensions('on_error')\n except Exception:\n logger.error(traceback.format_exc())\n logger.error(\"Error occured when running extensions.\" +\n error_in_error_handling_message)\n reraise_as(e)\n finally:\n self._restore_signal_handlers()\n if self.log.current_row.get('training_finished', False):\n self._run_extensions('after_training')\n if config.profile:\n self.profile.report()", "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "def run_pretraining(self):\n if self.is_main_process:\n logging.info(\"*********************************\")\n logging.info(\"*** Starting pre-training ***\")\n logging.info(\"*********************************\")\n logging.info(\"Training on GPU: %s\", torch.cuda.get_device_name(0))\n logging.info(\"Target batch size: %s\", self.target_batch_size)\n logging.info(\"Number of accumulation steps: %s\", self.num_accumulation_steps)\n logging.info(\"Actual batch size: %s\", self.batch_size)\n\n self.model.train()\n self.most_recent_ckpts_paths = []\n average_loss = 0.0 # averaged loss every self.log_freq steps\n epoch = 0\n training_steps = 0\n pool = ProcessPoolExecutor(1)\n if self.is_main_process:\n tensorboard_log_fpath = os.path.join(\n WORKDIR,\n '.tensorboard_logs',\n self.tensorboard_id,\n self.start_datetime.strftime(\"%d-%m-%Y_%H-%M-%S\")\n )\n logging.info(\n \"Writing TensorBoard logs in: %s\",\n tensorboard_log_fpath.replace(WORKDIR, '$WORKDIR'))\n self.tensorboard_writer = SummaryWriter(tensorboard_log_fpath)\n\n # NOTE: Infinite loop over epochs, termination is handled via iteration count\n while True:\n\n # If beginning of pre-training: read files from hdf5_directory and shuffle\n if (not self.resume_pretraining) or (epoch > 0) \\\n or (self.phase2 and self.global_step < 1) or self.init_checkpoint:\n files = []\n for fname in os.listdir(self.hdf5_directory):\n fpath = os.path.join(self.hdf5_directory, fname)\n if os.path.isfile(fpath) and fname.startswith('training.') and fname.endswith('.hdf5'):\n files.append(fpath)\n f_start_id = 0\n files.sort()\n random.Random(self.random_seed + epoch).shuffle(files)\n # Else: get id of next file\n else:\n f_start_id = self.checkpoint['files'][0]\n files = self.checkpoint['files'][1:]\n self.resume_pretraining = False\n num_files = len(files)\n\n # Get the current process hdf5 file\n # and handle case where there are more processes than files left:\n if \\\n torch.distributed.is_initialized() \\\n and torch.distributed.get_world_size() > num_files:\n\n remainder = torch.distributed.get_world_size() % num_files\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_start_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n\n # Set previous_file variable for next iteration\n previous_file = hdf5_fpath\n\n # Load the pre-training data from the .hdf5 file\n pretraining_data = PretrainingDataset(\n hdf5_fpath=hdf5_fpath,\n max_masked_tokens_per_input=self.max_masked_tokens_per_input\n )\n train_sampler = RandomSampler(pretraining_data)\n train_dataloader = DataLoader(\n pretraining_data,\n sampler=train_sampler,\n batch_size=self.batch_size * self.n_gpu,\n num_workers=4, pin_memory=True\n )\n overflow_buf = None\n if self.allreduce_post_accumulation:\n overflow_buf = torch.cuda.IntTensor([0])\n\n # Loop over the rest of pre-training data files\n if len(files) == 1:\n f_start_id = -1\n for f_id in range(f_start_id + 1, len(files)):\n\n # Submit creation of next DataLoader\n if torch.distributed.get_world_size() > num_files:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n if self.is_main_process:\n logging.info(\n \"Local rank: %s | File n° %s: %s\",\n self.local_rank, f_id, os.path.basename(previous_file)\n )\n previous_file = hdf5_fpath\n dataset_future = pool.submit(\n create_pretraining_dataloader,\n hdf5_fpath,\n self.max_masked_tokens_per_input,\n self.batch_size * self.n_gpu,\n )\n\n # Iterate over batches (w/ progress bar for main process)\n training_batches = tqdm(\n train_dataloader,\n desc=\"Pre-training...\"\n ) if self.is_main_process else train_dataloader\n for batch in training_batches:\n training_steps += 1\n (\n input_ids,\n segment_ids,\n input_mask,\n masked_lm_labels,\n next_sentence_labels\n ) = [tensor.to(self.device) for tensor in batch]\n\n # Forward Pass\n model_output = self.model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n labels=masked_lm_labels,\n next_sentence_label=next_sentence_labels)\n loss = model_output['loss']\n if self.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n\n divisor = self.num_accumulation_steps\n if self.num_accumulation_steps > 1:\n if not self.allreduce_post_accumulation:\n # this division was merged into predivision\n loss = loss / self.num_accumulation_steps\n divisor = 1.0\n\n # Compute gradients\n if self.fp16:\n with amp.scale_loss(\n loss, self.optimizer,\n delay_overflow_check=self.allreduce_post_accumulation) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n average_loss += loss.item()\n\n # Take optimizer/scheduler step every (gradient_acc_steps) steps\n # This is the model parameter update:\n if training_steps % self.num_accumulation_steps == 0:\n self.lr_scheduler.step() # learning rate warmup\n self.take_optimizer_step(overflow_buf)\n\n # If reached max steps save everything and log final loss:\n if self.global_step >= self.total_steps:\n last_num_steps = int(\n training_steps / self.num_accumulation_steps\n ) % self.log_freq\n last_num_steps = self.log_freq if last_num_steps == 0 else last_num_steps\n average_loss = torch.tensor(average_loss, dtype=torch.float32).cuda()\n average_loss = average_loss / (last_num_steps * divisor)\n if torch.distributed.is_initialized():\n average_loss /= torch.distributed.get_world_size()\n torch.distributed.all_reduce(average_loss)\n if self.is_main_process:\n logging.info(\n \"Total Steps: %s | Final Loss = %.3f\",\n int(training_steps / self.num_accumulation_steps),\n average_loss.item()\n )\n self.tensorboard_writer.add_scalar(\n \"Avg. training loss\",\n average_loss.item(), global_step=self.global_step)\n\n # If at a logging step:\n elif training_steps % (self.log_freq * self.num_accumulation_steps) == 0:\n if self.is_main_process:\n logging_message = (\n f\"Global step: {self.global_step} | \"\n f\"Learning Rate: {self.optimizer.param_groups[0]['lr']:.2E} | \"\n f\"Step Loss: {loss.item() * self.num_accumulation_steps / divisor:.3f} | \"\n f\"Avg. Loss: {average_loss / (self.log_freq * divisor):.3f}\"\n )\n # Update the tqdm description\n training_batches.set_description(logging_message, refresh=True)\n # Log average training loss to TensorBoard:\n self.tensorboard_writer.add_scalar(\n \"Avg. training loss\",\n average_loss / (self.log_freq * divisor),\n global_step=self.global_step)\n average_loss = 0\n\n # If reached max steps at log step or reached checkpoint step:\n if \\\n self.global_step >= self.total_steps \\\n or training_steps % \\\n (self.checkpoint_interval * self.num_accumulation_steps) == 0:\n\n # Check if model has improved then save a checkpoint if so\n if self.do_validation:\n model_has_improved = self.run_validation()\n else:\n model_has_improved = True\n if self.is_main_process and model_has_improved:\n self.make_checkpoint(f_id, files)\n\n # End pre-training if reached max steps\n if self.global_step >= self.total_steps:\n del train_dataloader\n return # NOTE: breaks out of the training loop\n\n # Move to next file after using up all batches of current file\n del train_dataloader\n train_dataloader, hdf5_fpath = \\\n dataset_future.result(timeout=None)\n\n # Update epoch after going through all .hdf5 files\n epoch += 1", "def train():\n # YOUR TRAINING CODE GOES HERE", "def on_train_start(self, trainer, pl_module):\n self.client.set_tags(self.run_id, {\"Mode\": \"training\"})\n\n params = {\"epochs\": trainer.max_epochs}\n\n # TODO For logging optimizer params - Following scenarios are to revisited.\n # 1. In the current scenario, only the first optimizer details are logged.\n # Code to be enhanced to log params when multiple optimizers are used.\n # 2. mlflow.log_params is used to store optimizer default values into mlflow.\n # The keys in default dictionary are too short, Ex: (lr - learning_rate).\n # Efficient mapping technique needs to be introduced\n # to rename the optimizer parameters based on keys in default dictionary.\n\n if hasattr(trainer, \"optimizers\"):\n optimizer = trainer.optimizers[0]\n params[\"optimizer_name\"] = _get_optimizer_name(optimizer)\n\n if hasattr(optimizer, \"defaults\"):\n params.update(optimizer.defaults)\n\n self.client.log_params(self.run_id, params)\n self.client.flush(synchronous=True)", "def train(self) -> None:\n\n # Check if in the saved model path there is already a trained model\n if self.config.TRN_HYPERP[\"save_path\"]:\n if tf.saved_model.contains_saved_model(self.config.TRN_HYPERP[\"save_path\"]):\n print(\"INFO: An existing saved model will be used for inference\\n\")\n else:\n params = {**self.config.TRN_HYPERP, **self.config.DATASET_HYPERP}\n trainer = Trainer(**params)\n\n print(f\"INFO: Starting training ... \\n\")\n start_time = time.time()\n trainer.train()\n print(f\"\\nINFO: Training completed in {round((time.time() - start_time)/60, 2)} minutes.\\n\")\n\n # Instantiate the saved translator for inference\n saved_path = self.config.TRN_HYPERP[\"save_path\"]\n self.saved_translator = tf.saved_model.load(saved_path)\n else:\n print(\"INFO: Path to save model wasn't provided in config file. Can't train the model\\n\")", "def do(self):\n super().do()\n logger.info(\"TrainPipeStep started...\")\n records = self._get_current_step_records()\n logger.debug(\"load pipestep records: {}\".format(records))\n self.num_models = len(records)\n self.num_epochs = self.num_models * TrainerConfig.epochs\n self.update_status(Status.running)\n self.master = create_master()\n self._train_multi_models(records)\n self.master.join()\n ReportServer().output_step_all_records(step_name=self.task.step_name)\n self.master.close()\n ReportServer().backup_output_path()\n self.update_status(Status.finished)", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def train(self, train_loader):\n pass", "def train(self):\n logging.info(\"Training DINTModel.\")\n start = time.time()\n tr = self.classifier.train()\n return time.time() - start", "def train():\n import trace\n trace.train()", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self):\n if self.retrain:\n self.states = self.get_states()\n self.transitions = self.get_transitions()\n self.matrix = self.get_matrix()\n self.save_training()\n else:\n self.load_training()", "def training(self):\r\n self.model, self.voc = svm_clf_training('all', self.dataset)\r\n return 0", "def _training_before_hook(self):\n pass", "def train(self, render_env: bool = False) -> None:\n\n print(\":: Environment rendering autorised: {}\".format(render_env))\n\n consol_print_learning_stats = ConsolPrintLearningStats(self.exp_spec,\n self.exp_spec.print_metric_every_what_epoch)\n\n \"\"\" ---- Setup run dir name ---- \"\"\"\n self.this_run_dir = setup_commented_run_dir_str(self.exp_spec, self.agent_root_dir)\n\n \"\"\" ---- Create run dir & setup file writer for TensorBoard ---- \"\"\"\n self.writer = tf_cv1.summary.FileWriter(self.this_run_dir, tf_cv1.get_default_graph())\n\n \"\"\" ---- Log experiment spec in run directory ---- \"\"\"\n try:\n with open(\"{}/config.txt\".format(self.this_run_dir), \"w\") as f:\n f.write(self.exp_spec.__repr__())\n except IOError as e:\n raise IOError(\"The config file cannot be saved in the run directory!\") from e\n\n \"\"\" ---- Start training agent ---- \"\"\"\n for epoch in self._training_epoch_generator(consol_print_learning_stats, render_env):\n (epoch, epoch_loss, batch_average_trjs_return, batch_average_trjs_lenght) = epoch\n\n \"\"\" ---- Teardown ---- \"\"\"\n consol_print_learning_stats.print_experiment_stats(print_plot=self.exp_spec.show_plot)\n\n self.writer.close()\n return None", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self):\n\n # Set the pretrain log\n trlog = {}\n trlog['args'] = vars(self.args)\n trlog['train_loss'] = []\n trlog['val_loss'] = []\n trlog['train_acc'] = []\n trlog['val_acc'] = []\n trlog['train_iou']=[]\n trlog['val_iou']=[]\n trlog['max_iou'] = 0.0\n trlog['max_iou_epoch'] = 0\n\n # Set the timer\n timer = Timer()\n # Set global count to zero\n global_count = 0\n # Set tensorboardX\n writer = SummaryWriter(comment=self.args.save_path)\n\n # Start pretrain\n for epoch in range(1, self.args.pre_max_epoch + 1):\n # Update learning rate\n self.lr_scheduler.step()\n # Set the model to train mode\n self.model.train()\n self.model.mode = 'train'\n # Set averager classes to record training losses and accuracies\n train_loss_averager = Averager()\n train_acc_averager = Averager()\n train_iou_averager = Averager()\n\n # Using tqdm to read samples from train loader\n tqdm_gen = tqdm.tqdm(self.train_loader)\n\n for i, batch in enumerate(tqdm_gen, 1):\n # Update global count number \n global_count = global_count + 1\n if torch.cuda.is_available():\n data, label = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n label = batch[1]\n\n # Output logits for model\n logits = self.model(data)\n # Calculate train loss\n # CD loss is modified in the whole project to incorporate ony Cross Entropy loss. Modify as per requirement.\n #loss = self.FL(logits, label) + self.CD(logits,label) + self.LS(logits,label)\n loss = self.CD(logits,label) \n \n # Calculate train accuracy\n self._reset_metrics()\n seg_metrics = eval_metrics(logits, label, self.args.num_classes)\n self._update_seg_metrics(*seg_metrics)\n pixAcc, mIoU, _ = self._get_seg_metrics(self.args.num_classes).values()\n\n # Add loss and accuracy for the averagers\n train_loss_averager.add(loss.item())\n train_acc_averager.add(pixAcc)\n train_iou_averager.add(mIoU)\n\n # Print loss and accuracy till this step\n tqdm_gen.set_description('Epoch {}, Loss={:.4f} Acc={:.4f} IOU={:.4f}'.format(epoch, train_loss_averager.item(),train_acc_averager.item()*100.0,train_iou_averager.item()))\n \n # Loss backwards and optimizer updates\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the averagers\n train_loss_averager = train_loss_averager.item()\n train_acc_averager = train_acc_averager.item()\n train_iou_averager = train_iou_averager.item()\n\n writer.add_scalar('data/train_loss(Pre)', float(train_loss_averager), epoch)\n writer.add_scalar('data/train_acc(Pre)', float(train_acc_averager)*100.0, epoch) \n writer.add_scalar('data/train_iou (Pre)', float(train_iou_averager), epoch)\n \n print('Epoch {}, Train: Loss={:.4f}, Acc={:.4f}, IoU={:.4f}'.format(epoch, train_loss_averager, train_acc_averager*100.0,train_iou_averager)) \n \n # Start validation for this epoch, set model to eval mode\n self.model.eval()\n self.model.mode = 'val'\n\n # Set averager classes to record validation losses and accuracies\n val_loss_averager = Averager()\n val_acc_averager = Averager()\n val_iou_averager = Averager()\n\n # Print previous information \n if epoch % 1 == 0:\n print('Best Val Epoch {}, Best Val IoU={:.4f}'.format(trlog['max_iou_epoch'], trlog['max_iou']))\n\n # Run validation\n for i, batch in enumerate(self.val_loader, 1):\n if torch.cuda.is_available():\n data, labels,_ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n label=labels[0]\n p = self.args.way*self.args.shot\n data_shot, data_query = data[:p], data[p:]\n label_shot,label=labels[:p],labels[p:]\n \n par=data_shot, label_shot, data_query\n logits = self.model(par)\n # Calculate preval loss\n \n #loss = self.FL(logits, label) + self.CD(logits,label) + self.LS(logits,label)\n loss = self.CD(logits,label) \n \n # Calculate val accuracy\n self._reset_metrics()\n seg_metrics = eval_metrics(logits, label, self.args.way)\n self._update_seg_metrics(*seg_metrics)\n pixAcc, mIoU, _ = self._get_seg_metrics(self.args.way).values()\n\n val_loss_averager.add(loss.item())\n val_acc_averager.add(pixAcc)\n val_iou_averager.add(mIoU) \n\n # Update validation averagers\n val_loss_averager = val_loss_averager.item()\n val_acc_averager = val_acc_averager.item()\n val_iou_averager = val_iou_averager.item()\n \n writer.add_scalar('data/val_loss(Pre)', float(val_loss_averager), epoch)\n writer.add_scalar('data/val_acc(Pre)', float(val_acc_averager)*100.0, epoch) \n writer.add_scalar('data/val_iou (Pre)', float(val_iou_averager), epoch) \n \n # Print loss and accuracy for this epoch\n print('Epoch {}, Val: Loss={:.4f} Acc={:.4f} IoU={:.4f}'.format(epoch, val_loss_averager, val_acc_averager*100.0,val_iou_averager))\n\n # Update best saved model\n if val_iou_averager > trlog['max_iou']:\n trlog['max_iou'] = val_iou_averager\n trlog['max_iou_epoch'] = epoch\n print(\"model saved in max_iou\")\n self.save_model('max_iou')\n\n # Save model every 10 epochs\n if epoch % 10 == 0:\n self.save_model('epoch'+str(epoch))\n\n # Update the logs\n trlog['train_loss'].append(train_loss_averager)\n trlog['train_acc'].append(train_acc_averager)\n trlog['val_loss'].append(val_loss_averager)\n trlog['val_acc'].append(val_acc_averager)\n trlog['train_iou'].append(train_iou_averager)\n trlog['val_iou'].append(val_iou_averager)\n\n # Save log\n torch.save(trlog, osp.join(self.args.save_path, 'trlog'))\n\n if epoch % 1 == 0:\n print('Running Time: {}, Estimated Time: {}'.format(timer.measure(), timer.measure(epoch / self.args.max_epoch)))\n writer.close()", "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def main(_):\n utility.set_up_logging()\n if not FLAGS.config:\n raise KeyError('You must specify a configuration.')\n logdir = FLAGS.logdir and os.path.expanduser(os.path.join(\n FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))\n try:\n config = utility.load_config(logdir)\n except IOError:\n config = tools.AttrDict(getattr(configs, FLAGS.config)())\n config = utility.save_config(config, logdir)\n for score in train(config, FLAGS.env_processes):\n tf.logging.info('Score {}.'.format(score))", "def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs", "def train(self, batch):\n pass", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def train(self):\n # Restore models\n global_step = self._restore_models_and_step()\n \n if self.gold and global_step >= self.gold_step:\n self.netD.use_gold = True\n\n print(\"INFO: Starting training from global step {}...\".format(\n global_step))\n logit_save_num = 0\n\n self.logit_results = defaultdict(dict)\n\n try:\n start_time = time.time()\n\n # Mixed precision\n if self.amp:\n print(\"INFO: Using mixed precision training...\")\n scaler = torch.cuda.amp.GradScaler()\n else:\n scaler = None\n\n # Iterate through data\n iter_dataloader = iter(self.dataloader)\n if self.train_drs:\n iter_dataloader_drs = iter(self.dataloader_drs)\n while global_step < self.num_steps:\n log_data = metric_log.MetricLog() # log data for tensorboard\n\n if self.topk:\n self.netG.decay_topk_rate(global_step, epoch_steps=len(self.dataloader))\n\n if self.gold and global_step == self.gold_step:\n self.netD.use_gold = True\n # -------------------------\n # One Training Step\n # -------------------------\n # Update n_dis times for D\n for i in range(self.n_dis):\n iter_dataloader, real_batch = self._fetch_data(\n iter_dataloader=iter_dataloader)\n\n # ------------------------\n # Update D Network\n # -----------------------\n log_data = self.netD.train_step(\n real_batch=real_batch,\n netG=self.netG,\n optD=self.optD,\n log_data=log_data,\n global_step=global_step,\n device=self.device,\n scaler=scaler)\n\n # train netD2 for DRS\n if self.train_drs:\n iter_dataloader_drs, real_batch_drs = self._fetch_data(\n iter_dataloader=iter_dataloader_drs)\n log_data = self.netD_drs.train_step(\n real_batch=real_batch_drs,\n netG=self.netG,\n optD=self.optD_drs,\n log_data=log_data,\n global_step=global_step,\n device=self.device,\n scaler=scaler)\n\n # -----------------------\n # Update G Network\n # -----------------------\n # Update G, but only once.\n if i == (self.n_dis - 1):\n log_data = self.netG.train_step(\n real_batch=real_batch,\n netD=self.netD,\n optG=self.optG,\n global_step=global_step,\n log_data=log_data,\n device=self.device,\n scaler=scaler)\n\n # --------------------------------\n # Update Training Variables\n # -------------------------------\n global_step += 1\n\n log_data = self.scheduler.step(log_data=log_data,\n global_step=global_step)\n\n # -------------------------\n # Logging and Metrics\n # -------------------------\n if global_step % self.log_steps == 0:\n self.logger.write_summaries(log_data=log_data,\n global_step=global_step)\n\n if global_step % self.print_steps == 0:\n curr_time = time.time()\n topk_rate = self.netG.topk_rate if hasattr(self.netG, 'topk_rate') else 1\n log_data.add_metric(f'topk_rate', topk_rate, group='topk_rate', precision=6)\n self.logger.print_log(global_step=global_step,\n log_data=log_data,\n time_taken=(curr_time - start_time) /\n self.print_steps)\n start_time = curr_time\n\n if global_step % self.vis_steps == 0:\n if 'gaussian' in self.log_dir:\n plot_gaussian_samples(netG=self.netG,\n global_step=global_step,\n log_dir=self.log_dir,\n device=self.device)\n else:\n self.logger.vis_images(netG=self.netG,\n global_step=global_step)\n \n if self.save_logits and global_step % self.logit_save_steps == 0 and global_step >= self.save_logit_after and global_step <= self.stop_save_logit_after:\n if self.train_drs:\n netD = self.netD_drs\n netD_name = 'netD_drs'\n else:\n netD = self.netD\n netD_name = 'netD'\n mode = 'eval' if self.save_eval_logits else 'train'\n print(f\"INFO: logit saving {mode} netD: {netD_name}...\")\n logit_list = self._get_logit(netD=netD, eval_mode=mode=='eval')\n self.logit_results[f'{netD_name}_{mode}'][global_step] = logit_list\n\n logit_save_num += 1\n\n if global_step % self.save_steps == 0:\n print(\"INFO: Saving checkpoints...\")\n self._save_model_checkpoints(global_step)\n if self.save_logits and global_step >= self.save_logit_after:\n self._save_logit(self.logit_results)\n\n print(\"INFO: Saving final checkpoints...\")\n self._save_model_checkpoints(global_step)\n if self.save_logits and global_step >= self.save_logit_after:\n self._save_logit(self.logit_results)\n\n except KeyboardInterrupt:\n print(\"INFO: Saving checkpoints from keyboard interrupt...\")\n self._save_model_checkpoints(global_step)\n if self.save_logits and global_step >= self.save_logit_after:\n self._save_logit(self.logit_results)\n\n finally:\n self.logger.close_writers()\n\n print(\"INFO: Training Ended.\")", "def train(self, trainData):\n pass", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def setup(self) -> None:\n self.logger.info(\"ML Train task: setup method called.\")", "def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n trainer = trainer_class(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n trainer.training_loop(n_epochs=n_epochs)", "def on_train_forward(self, runner):\n self.on_iter_forward(runner)", "def train(self) -> Any:\n pass", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def start_training(params):\n\n\n\n # CREATE A FOLDER TO HOLD RESULTS\n\n\n exp_pref = \"../results/\" + params.EXPERIMENT_PREFIX\n time_str = time.strftime(\"_%m-%d-%H-%M_\", time.gmtime())\n exp_dir = exp_pref + time_str + \\\n \"{}\".format(params.LEARNING_RATE).replace(\".\", \"p\") + \"_\" \\\n + \"{}\".format(params.DISCOUNT).replace(\".\", \"p\")\n\n try:\n os.stat(exp_dir)\n except OSError:\n os.makedirs(exp_dir)\n\n logger = logging.getLogger(\"DeepLogger\")\n logger.setLevel(logging.INFO)\n\n # Logging filehandler\n #fh = logging.FileHandler(exp_dir + \"/log.log\")\n # Rotate file when filesize is 5 mb\n fh = RotatingFileHandler(exp_dir + \"/log.log\", maxBytes=5000000, backupCount=100)\n\n fh.setLevel(logging.INFO)\n\n # Console filehandler\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n\n # Prevent nohup from producing large log file, logging to file is handled internally\n # logger.addHandler(ch)\n\n log_params(logger, params)\n\n #logging.basicConfig(level=logging.INFO, filename=exp_dir + \"/log.log\")\n\n\n if params.DETERMINISTIC:\n rng = np.random.RandomState(12345)\n else:\n rng = np.random.RandomState()\n\n if params.CUDNN_DETERMINISTIC:\n theano.config.dnn.conv.algo_bwd = 'deterministic'\n\n # Init ale\n ale = ale_python_interface.ALEInterface()\n ale.setInt('random_seed', 123)\n ale.setBool('display_screen', params.DISPLAY_SCREEN)\n ale.setFloat('repeat_action_probability', params.REPEAT_ACTION_PROBABILITY)\n full_rom_path = os.path.join(params.ROM_PATH, params.ROM_NAME)\n ale.loadROM(full_rom_path)\n num_actions = len(ale.getMinimalActionSet())\n\n print \"Legal actions: \", num_actions\n print ale.getMinimalActionSet()\n\n # Instantiate network\n logger.info(\"Setting up network...\")\n network = None # Be able to continue training from a network or watch a network play\n if (params.NETWORK_PICKLE_FILE is None):\n logger.info(\"Initializing a new random network...\")\n network = q_network.DeepQLearner(params.RESIZED_WIDTH,\n params.RESIZED_HEIGHT,\n num_actions,\n params.PHI_LENGTH,\n params.DISCOUNT,\n params.LEARNING_RATE,\n params.RMS_DECAY,\n params.RMS_EPSILON,\n params.MOMENTUM,\n params.CLIP_DELTA,\n params.FREEZE_INTERVAL,\n params.BATCH_SIZE,\n params.NETWORK_TYPE,\n params.UPDATE_RULE,\n params.BATCH_ACCUMULATOR,\n rng)\n else:\n logger.info(\"Loading network instance from file...\")\n handle = open(params.NETWORK_PICKLE_FILE, 'r')\n network = cPickle.load(handle)\n\n\n # Only used when getting a random network\n if params.RANDOM_NETWORK_PICKLE:\n import sys\n sys.setrecursionlimit(10000)\n result_net_file = open(params.EXPERIMENT_PREFIX + '.pkl', 'w')\n print \"File opened\"\n cPickle.dump(network, result_net_file, -1)\n print \"Pickle dumped\"\n result_net_file.close()\n sys.exit(0)\n\n\n # Instatiate agent\n logger.info(\"Setting up agent...\")\n agent = ale_agent.NeuralAgent(network,\n params.EPSILON_START,\n params.EPSILON_MIN,\n params.EPSILON_DECAY,\n params.REPLAY_MEMORY_SIZE,\n exp_dir,\n params.REPLAY_START_SIZE,\n params.UPDATE_FREQUENCY,\n rng)\n\n # Instantiate experient\n logger.info(\"Setting up experiment...\")\n experiment = ale_experiment.ALEExperiment(ale, agent,\n params.RESIZED_WIDTH,\n params.RESIZED_HEIGHT,\n params.RESIZE_METHOD,\n params.EPOCHS,\n params.STEPS_PER_EPOCH,\n params.STEPS_PER_TEST,\n params.FRAME_SKIP,\n params.DEATH_ENDS_EPISODE,\n params.MAX_START_NULLOPS,\n rng)\n\n\n # Run experiment\n logger.info(\"Running experiment...\")\n experiment.run()", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def test_train(self):\n print \"x=\",self.trainer.train()", "def before_train(self, logs=None):\n self.config = self.trainer.config\n self.unrolled = self.trainer.config.unrolled\n self.device = self.trainer.config.device\n self.model = self.trainer.model\n self.optimizer = self.trainer.optimizer\n self.lr_scheduler = self.trainer.lr_scheduler\n self.loss = self.trainer.loss\n self.search_alg = SearchAlgorithm(SearchSpace())\n self._set_algorithm_model(self.model)\n self.trainer.train_loader = self.trainer._init_dataloader(mode='train')\n self.trainer.valid_loader = self.trainer._init_dataloader(mode='val')\n normal_selected_idxs = torch.tensor(len(self.model.alphas_normal) * [-1],\n requires_grad=False, dtype=torch.int).cuda()\n reduce_selected_idxs = torch.tensor(len(self.model.alphas_reduce) * [-1],\n requires_grad=False, dtype=torch.int).cuda()\n normal_candidate_flags = torch.tensor(len(self.model.alphas_normal) * [True],\n requires_grad=False, dtype=torch.bool).cuda()\n reduce_candidate_flags = torch.tensor(len(self.model.alphas_reduce) * [True],\n requires_grad=False, dtype=torch.bool).cuda()\n logging.info('normal_selected_idxs: {}'.format(normal_selected_idxs))\n logging.info('reduce_selected_idxs: {}'.format(reduce_selected_idxs))\n logging.info('normal_candidate_flags: {}'.format(normal_candidate_flags))\n logging.info('reduce_candidate_flags: {}'.format(reduce_candidate_flags))\n self.model.normal_selected_idxs = normal_selected_idxs\n self.model.reduce_selected_idxs = reduce_selected_idxs\n self.model.normal_candidate_flags = normal_candidate_flags\n self.model.reduce_candidate_flags = reduce_candidate_flags\n logging.info(F.softmax(torch.stack(self.model.alphas_normal, dim=0), dim=-1).detach())\n logging.info(F.softmax(torch.stack(self.model.alphas_reduce, dim=0), dim=-1).detach())\n self.normal_probs_history = []\n self.reduce_probs_history = []", "def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()", "def train(self, log_in_tensorboard=True):\n if log_in_tensorboard or self.config.save_model:\n os.makedirs(self.config.results_path, exist_ok=True)\n\n # Manage GPUs\n if 0 < self.num_gpus:\n num_gpus_per_worker = self.num_gpus / (\n self.config.train_on_gpu\n + self.config.num_workers * self.config.selfplay_on_gpu\n + log_in_tensorboard * self.config.selfplay_on_gpu\n + self.config.use_last_model_value * self.config.reanalyse_on_gpu\n )\n if 1 < num_gpus_per_worker:\n num_gpus_per_worker = math.floor(num_gpus_per_worker)\n else:\n num_gpus_per_worker = 0\n\n # Initialize workers\n self.training_worker = trainer.Trainer.options(\n num_cpus=0, num_gpus=num_gpus_per_worker if self.config.train_on_gpu else 0,\n ).remote(self.checkpoint, self.config)\n\n self.shared_storage_worker = shared_storage.SharedStorage.remote(\n self.checkpoint, self.config,\n )\n self.shared_storage_worker.set_info.remote(\"terminate\", False)\n\n self.replay_buffer_worker = replay_buffer.ReplayBuffer.remote(\n self.checkpoint, self.replay_buffer, self.config\n )\n\n if self.config.use_last_model_value:\n self.reanalyse_worker = replay_buffer.Reanalyse.options(\n num_cpus=0,\n num_gpus=num_gpus_per_worker if self.config.reanalyse_on_gpu else 0,\n ).remote(self.checkpoint, self.config)\n\n self.self_play_workers = [\n self_play.SelfPlay.options(\n num_cpus=0,\n num_gpus=num_gpus_per_worker if self.config.selfplay_on_gpu else 0,\n ).remote(\n self.checkpoint, self.Game, self.config, self.config.seed + seed,\n )\n for seed in range(self.config.num_workers)\n ]\n\n # Launch workers\n [\n self_play_worker.continuous_self_play.remote(\n self.shared_storage_worker, self.replay_buffer_worker\n )\n for self_play_worker in self.self_play_workers\n ]\n self.training_worker.continuous_update_weights.remote(\n self.replay_buffer_worker, self.shared_storage_worker\n )\n if self.config.use_last_model_value:\n self.reanalyse_worker.reanalyse.remote(\n self.replay_buffer_worker, self.shared_storage_worker\n )\n\n if log_in_tensorboard:\n self.logging_loop(\n num_gpus_per_worker if self.config.selfplay_on_gpu else 0,\n )", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def train(self, session, train_examples, dev_examples, train_dir):\n\n # some free code to print out number of parameters in your model\n # it's always good to check!\n # you will also want to save your model parameters in train_dir\n # so that you can use your trained model to make predictions, or\n # even continue training\n\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n if self.summary_flag:\n self.train_writer = tf.summary.FileWriter(self.summaries_dir + '/train', session.graph)\n\n logging.info(\"Train Loss File: {}\".format(self.train_loss_log))\n logging.info(\"Dev Loss File: {}\".format(self.dev_loss_log))\n best_score = 100000\n train_log = open(self.train_loss_log, \"w\")\n dev_log = open(self.dev_loss_log, \"w\")\n for epoch in range(self.n_epoch):\n print(\"Epoch {:} out of {:}\".format(epoch + 1, self.n_epoch))\n dev_score = self.run_epoch(session, train_examples, dev_examples, epoch, train_log)\n dev_log.write(\"{},{}\\n\".format(epoch + 1, dev_score))\n logging.info(\"Average Dev Cost: {}\".format(dev_score))\n logging.info(\"train F1 & EM\")\n f1, em = self.evaluate_answer(session, train_examples, self.rev_vocab, log = True)\n logging.info(\"Dev F1 & EM\")\n f1, em = self.evaluate_answer(session, dev_examples, self.rev_vocab, log = True)\n if dev_score < best_score:\n best_score = dev_score\n print(\"New best dev score! Saving model in {}\".format(train_dir + \"/\" + self.model_name))\n self.saver.save(session, train_dir + \"/\" + self.model_name)\n\n return best_score", "def run(self):\n def update_logs(summary_writer, episode, reward, loss, epsilon):\n summary_writer.add_scalar('Reward', reward, episode)\n summary_writer.add_scalar('Loss', loss, episode)\n summary_writer.add_scalar('Epsilon', epsilon, episode)\n \n # Print model and init summary_writer\n summary(self.policy_net, (1, self.num_inputs))\n summary_writer = SummaryWriter(log_dir=f'./logs/{self.name}/')\n\n sum_reward = 0\n\n # Run nb_games\n for n in range(self.nb_games):\n\n reward, loss = self._run_one_game()\n\n # Update values and logs\n episode = self.nb_iter_prev + n\n sum_reward += reward\n self.epsilon = max(self.min_epsilon, self.epsilon * self.decay)\n update_logs(summary_writer, episode, reward, loss, self.epsilon)\n \n # Each update_frequency print and update target_net\n if (episode + 1) % self.update_frequency == 0:\n print(f'Episode: {episode + 1}, Epsilon: {self.epsilon}, '\n f'Reward: {reward}, Loss: {loss}, '\n f'Mean reward: {sum_reward/self.update_frequency}.')\n sum_reward = 0\n self._update_target_net()\n\n # End of the training\n self.nb_iter_prev += self.nb_games\n self.save()", "def _on_training_start(self) -> None:\n if self.eval_freq > 0:\n self.solver.run_tests(0, draw=self.draw, verbose=self.verbose)", "def train(self): \n self.current_step = 0\n self.log = log_setup(self.args)\n self.current_gamma = self.args.initial_gamma\n with tf.Session(graph = self.computation_graph) as session:\n self.init.run()\n print(\"Model Initialized.\")\n for repetition in range(0, self.args.epochs):\n\n random.shuffle(self.nodes)\n self.optimization_time = 0 \n self.average_loss = 0\n\n epoch_printer(repetition)\n for i in tqdm(range(int(len(self.edges)/self.args.batch_size))):\n self.current_step = self.current_step + 1\n self.current_gamma = gamma_incrementer(self.current_step, self.args.initial_gamma, self.current_gamma, self.true_step_size)\n feed_dict = self.feed_dict_generator(self.edges[i*self.args.batch_size:(i+1)*self.args.batch_size], self.current_step, self.current_gamma)\n start = time.time()\n _, loss = session.run([self.train_op , self.loss], feed_dict=feed_dict)\n end = time.time()\n self.optimization_time = self.optimization_time + (end-start)\n self.average_loss = self.average_loss + loss\n\n print(\"\")\n self.average_loss = self.average_loss/self.vocab_size\n self.final_embeddings = self.factorization_layer.embedding_matrix.eval()\n if \"CODE\" in self.args.model: \n self.c_means = self.cluster_layer.cluster_means.eval()\n self.modularity_score, assignments = neural_modularity_calculator(self.graph, self.final_embeddings, self.c_means)\n else:\n self.modularity_score, assignments = classical_modularity_calculator(self.graph, self.final_embeddings, self.args)\n self.log = log_updater(self.log, repetition, self.average_loss, self.optimization_time, self.modularity_score)\n tab_printer(self.log)\n if \"CODE\" in self.args.model: \n initiate_dump_grafcode(self.log, assignments, self.args, self.final_embeddings, self.c_means)\n else:\n initiate_dump_graf(self.log, assignments, self.args, self.final_embeddings)", "def train(self):\n start_time = time()\n self.model.train()\n\n for step, sample in enumerate(self.train_loader):\n self.optimizer.zero_grad()\n\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n loss.backward()\n self.train_losses.append(loss.item())\n\n self.optimizer.step(None)\n\n # print an incredible progress bar\n print(f'\\r{self.progress_bar} │ Loss: {np.mean(self.train_losses):.6f}', end='')\n self.progress_bar.inc()\n\n # log average loss of this epoch\n mean_epoch_loss = np.mean(self.train_losses)\n self.sw.add_scalar(tag='train_loss', scalar_value=mean_epoch_loss, global_step=self.epoch)\n self.train_losses = []\n\n # log epoch duration\n print(f' │ T: {time() - start_time:.2f} s')", "def train(\n self, training_data: TrainingData, cfg: DazuConfig, **kwargs: Any\n ) -> None:", "def call_training_routine(self):\n training_command = \"th main.lua \"\\\n \"-GPU_id %(GPU_identifier)i \"\\\n \"-number_of_GPUs %(number_of_GPUs)i \"\\\n \"-training_dataset %(training_dataset)s \"\\\n \"-testing_dataset %(testing_dataset)s \"\\\n \"-modelFilePath %(modelFilePath)s \"\\\n \"-maxepoch %(maxepoch)i \"\\\n \"-savingDirectory %(savingDirectory)s \"\\\n \"-learningRate %(learningRate)f \"\\\n \"-batchSize %(batchSize)i \"\\\n \"-momentum %(momentum)f\" % self.training_parameters\n\n if self.training_parameters[\"presavedModelPath\"] != \"\":\n training_command += \" -presavedModelPath %s\" %\\\n self.training_parameters[\"presavedModelPath\"]\n\n # Call the training command\n subprocess.call(training_command, shell=True)", "def train(self, request):\n model = request.get(\"model\")\n if not model:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n \"Model is not provided for Training Task\",\n )\n\n task = self._trainers.get(model)\n if not task:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n f\"Train Task is not Initialized. There is no model '{model}' available\",\n )\n\n request = copy.deepcopy(request)\n result = task(request, self.datastore())\n\n # Run all scoring methods\n if self._auto_update_scoring:\n self.async_scoring(None)\n return result", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model" ]
[ "0.78187734", "0.766623", "0.7507915", "0.74226147", "0.7411162", "0.7411162", "0.7411162", "0.7411162", "0.7411162", "0.73725444", "0.7329948", "0.7323073", "0.73172176", "0.72957176", "0.7275571", "0.72495633", "0.7228919", "0.7227007", "0.72220093", "0.7217784", "0.7202225", "0.7198964", "0.7191397", "0.7179416", "0.71673715", "0.71509296", "0.7140122", "0.71321064", "0.7115376", "0.7102492", "0.7094984", "0.7081416", "0.70713323", "0.70447314", "0.7033591", "0.7033591", "0.70217884", "0.70092636", "0.699168", "0.69715476", "0.696812", "0.6967148", "0.694918", "0.69451183", "0.69449806", "0.69420004", "0.6936235", "0.6930559", "0.69267786", "0.6917924", "0.69102967", "0.6889197", "0.6880349", "0.68795496", "0.6876861", "0.6875265", "0.687089", "0.68702203", "0.6868504", "0.68683493", "0.68644226", "0.6860388", "0.6858473", "0.685073", "0.6845569", "0.6842699", "0.68292016", "0.6820533", "0.67902416", "0.6787302", "0.6783641", "0.67831063", "0.6779687", "0.67631805", "0.6753253", "0.67528796", "0.6751373", "0.6736025", "0.67342824", "0.67337", "0.6729016", "0.67272806", "0.67214614", "0.6718474", "0.6718474", "0.6714168", "0.670036", "0.6697921", "0.66952664", "0.66921985", "0.6691674", "0.6691341", "0.6675888", "0.6673282", "0.6672983", "0.6672351", "0.6665191", "0.66612506", "0.66529596", "0.66518956", "0.6651485" ]
0.0
-1
The training process, including evaluations and loggers.
def train_nlp(model: ContinualModel, dataset: ContinualDataset, args: Namespace) -> None: model.net.to(model.device) results, results_mask_classes = [], [] model_stash = create_stash(model, args, dataset) if args.csv_log: csv_logger = CsvLogger(dataset.SETTING, dataset.NAME, model.NAME) if args.tensorboard: tb_logger = TensorboardLogger(args, dataset.SETTING, model_stash) model_stash['tensorboard_name'] = tb_logger.get_name() dataset_copy = get_dataset(args) for t in range(dataset.N_TASKS): model.net.train() _, _ = dataset_copy.get_data_loaders() if model.NAME != 'icarl' and model.NAME != 'pnn': # for forward transfer calculation random_results_class, random_results_task = evaluate_nlp(model, dataset_copy) print(file=sys.stderr) # start time start_time = time.time() for t in range(dataset.N_TASKS): model.net.train() train_loader, test_loader = dataset.get_data_loaders() if hasattr(model, 'begin_task'): model.begin_task(dataset) if t: accs = evaluate_nlp(model, dataset, last=True) results[t - 1] = results[t - 1] + accs[0] if dataset.SETTING == 'class-il': results_mask_classes[t - 1] = results_mask_classes[t - 1] + accs[1] for epoch in range(args.n_epochs): for i, data in enumerate(train_loader): if hasattr(dataset.train_loader.dataset, 'logits'): # todo: to add logits pass else: xs, ys, x_token_idxs, x_token_masks, y_token_idxs, y_token_masks, y_idxs = data x_token_idxs = x_token_idxs.to(model.device) x_token_masks = x_token_masks.to(model.device) y_token_idxs = y_token_idxs.to(model.device) y_token_masks = y_token_masks.to(model.device) y_idxs = y_idxs.to(model.device) task_id = torch.tensor(t, dtype=torch.int64, requires_grad=False) task_id = task_id.to(model.device) if model.require_task_id: loss = model.observe(inputs=x_token_idxs, inputs_mask=x_token_masks, labels=y_idxs, labels_name=y_token_idxs, labels_mask=y_token_masks, task_labels=task_id) else: loss = model.observe(inputs=x_token_idxs, inputs_mask=x_token_masks, labels=y_idxs, labels_name=y_token_idxs, labels_mask=y_token_masks) progress_bar(i, len(train_loader), epoch, t, loss) if args.tensorboard: tb_logger.log_loss(loss, args, epoch, t, i) model_stash['batch_idx'] = i + 1 model_stash['epoch_idx'] = epoch + 1 model_stash['batch_idx'] = 0 model_stash['task_idx'] = t + 1 model_stash['epoch_idx'] = 0 if hasattr(model, 'end_task'): model.end_task(dataset) # reduce the running freq # if (t+1) % args.eval_freq == 0: accs = evaluate_nlp(model, dataset) results.append(accs[0]) results_mask_classes.append(accs[1]) mean_acc = np.mean(accs, axis=1) print_mean_accuracy(mean_acc, t + 1, dataset.SETTING) # prob_model if args.prob_all_tasks: if args.prob_type != "": for prob_l in range(12): prob_l += 1 if args.prob_type == "proto": p_accs = prob_proto_nlp(model, dataset, prob_l=prob_l) else: p_accs = prob_final_nlp(model, dataset, prob_l=prob_l) p_mean_acc = np.mean(p_accs, axis=1) print("task {} prob_l {}: mean_acc {}, masked_mean_acc {}".format(t + 1, prob_l, round(p_mean_acc[0], 2), round(p_mean_acc[1], 2))) if args.csv_log: csv_logger.log_task_prob(p_mean_acc, t) model_stash['mean_accs'].append(mean_acc) if args.csv_log: csv_logger.log(mean_acc) if args.tensorboard: tb_logger.log_accuracy(np.array(accs), mean_acc, args, t) running_time = time.time() - start_time # prob_model if args.prob_type != "" and not args.prob_all_tasks: for prob_l in range(12): prob_l += 1 if args.prob_type == "proto": accs = prob_proto_nlp(model, dataset, prob_l=prob_l) else: accs = prob_final_nlp(model, dataset, prob_l=prob_l) mean_acc = np.mean(accs, axis=1) print("prob_l {}: mean_acc {}, masked_mean_acc {}".format(prob_l, mean_acc[0], mean_acc[1])) if args.csv_log: csv_logger.log_prob(mean_acc) if args.csv_log: csv_logger.add_bwt(results, results_mask_classes) csv_logger.add_running_time(running_time) csv_logger.add_forgetting(results, results_mask_classes) if model.NAME != 'icarl' and model.NAME != 'pnn': csv_logger.add_fwt(results, random_results_class, results_mask_classes, random_results_task) if args.tensorboard: tb_logger.close() if args.csv_log: csv_logger.write(vars(args))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def _train(self):\n training_environment = self._training_environment\n evaluation_environment = self._evaluation_environment\n policy = self._policy\n pool = self._pool\n\n if not self._training_started:\n self._init_training()\n\n self._initial_exploration_hook(\n training_environment, self._initial_exploration_policy, pool)\n\n self.sampler.initialize(training_environment, policy, pool)\n\n gt.reset_root()\n gt.rename_root('RLAlgorithm')\n gt.set_def_unique(False)\n\n self._training_before_hook()\n\n for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):\n self._epoch_before_hook()\n gt.stamp('epoch_before_hook')\n\n start_samples = self.sampler._total_samples\n for i in count():\n samples_now = self.sampler._total_samples\n self._timestep = samples_now - start_samples\n\n if (samples_now >= start_samples + self._epoch_length\n and self.ready_to_train):\n break\n\n self._timestep_before_hook()\n gt.stamp('timestep_before_hook')\n\n self._do_sampling(timestep=self._total_timestep)\n gt.stamp('sample')\n\n if self.ready_to_train:\n self._do_training_repeats(timestep=self._total_timestep)\n gt.stamp('train')\n\n self._timestep_after_hook()\n gt.stamp('timestep_after_hook')\n\n training_paths = self.sampler.get_last_n_paths(math.ceil(self._epoch_length / self.sampler._max_path_length))\n gt.stamp('training_paths')\n evaluation_paths = self._evaluation_paths(policy, evaluation_environment)\n gt.stamp('evaluation_paths')\n\n training_metrics = self._evaluate_rollouts(training_paths, training_environment)\n gt.stamp('training_metrics')\n if evaluation_paths:\n evaluation_metrics = self._evaluate_rollouts(\n evaluation_paths, evaluation_environment)\n gt.stamp('evaluation_metrics')\n else:\n evaluation_metrics = {}\n\n self._epoch_after_hook(training_paths)\n gt.stamp('epoch_after_hook')\n\n sampler_diagnostics = self.sampler.get_diagnostics()\n\n diagnostics = self.get_diagnostics(\n iteration=self._total_timestep,\n batch=self._evaluation_batch(),\n training_paths=training_paths,\n evaluation_paths=evaluation_paths)\n\n time_diagnostics = gt.get_times().stamps.itrs\n\n diagnostics.update(OrderedDict((\n *(\n (f'evaluation/{key}', evaluation_metrics[key])\n for key in sorted(evaluation_metrics.keys())\n ),\n *(\n (f'training/{key}', training_metrics[key])\n for key in sorted(training_metrics.keys())\n ),\n *(\n (f'times/{key}', time_diagnostics[key][-1])\n for key in sorted(time_diagnostics.keys())\n ),\n *(\n (f'sampler/{key}', sampler_diagnostics[key])\n for key in sorted(sampler_diagnostics.keys())\n ),\n ('epoch', self._epoch),\n ('timestep', self._timestep),\n ('timesteps_total', self._total_timestep),\n ('train-steps', self._num_train_steps),\n )))\n\n if self._eval_render_kwargs and hasattr(\n evaluation_environment, 'render_rollouts'):\n # TODO(hartikainen): Make this consistent such that there's no\n # need for the hasattr check.\n training_environment.render_rollouts(evaluation_paths)\n\n yield diagnostics\n\n self.sampler.terminate()\n\n self._training_after_hook()\n\n yield {'done': True, **diagnostics}", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def start_training(self):\n self.training = True", "def train(self):\n return", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def train(self):\n self.training = True", "def training(self):\n self.training = True", "def train_step(self):\n pass", "def train(self, training_data):\n pass", "def train():\n pass", "def start_training(self, logdir: str, **info):\n pass", "def start_training(self):\n if self.task_env is None:\n rospy.logfatal(\"No task environment found for training.\")\n if self.agent is None:\n rospy.logfatal(\"No agent found for training.\")\n self.agent.start_training()", "def train(self):\n raise NotImplementedError", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def run_step(self):\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss for loss in loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n \n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def start_training(self):\n self.training()\n \n images, true_labels, pred_labels, pred_probs = self.evaluate_model(proba=True)\n \n metrics = Metrics(images, true_labels, pred_labels, pred_probs, self.classes)\n\n cm = metrics.get_confusion_matrix()\n print('The confusion matrix is:\\n', cm)\n print('*'*100)\n \n cr = metrics.get_classification_report()\n print('The classification report is:\\n', cr)\n print('*'*100)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train(self):\n p = self._params\n if self.train_data != None:\n tens_to_log = self.params.tens_to_log\n logging_hook = tf.train.LoggingTensorHook(tensors = tens_to_log,\n every_n_iter = p.logging_step,\n )\n t_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.train_data[\"x\"]},\n y = self.train_data[\"y\"],\n batch_size = p.batch_size,\n num_epochs = None,\n shuffle = True,\n )\n self._model.train(input_fn = t_fn,\n steps = self.params.training_steps,\n hooks = [logging_hook],\n )\n \n if self.eval_data != None:\n e_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.eval_data[\"x\"]},\n y = self.eval_data[\"y\"],\n num_epochs = 1,\n shuffle = False,\n )\n eval_results = self.model.evaluate(input_fn = e_fn,\n checkpoint_path = self.model_dir,\n )\n print(eval_results)", "def train(self, ):\n raise NotImplementedError", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None", "def train(self):\n\t\traise NotImplementedError", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def train(self):\n raise NotImplementedError()", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def on_train_begin(self, logs=None):", "def on_train_begin(self, logs=None):", "def start_training(self):\n i = 0\n for _ in range(self.train_steps):\n print(f\"Start Training Step {i + 1}\")\n self.model.learn(total_timesteps=self.total_time_steps)\n self.model.save(self.save_path)\n print(f\"Finished Training Step {i + 1}\")\n i += 1", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def on_train_begin(self, logs):\n self.train_start = timeit.default_timer()\n self.metrics_names = self.model.metrics_names\n print('Training for {} steps ...'.format(self.params['nb_steps']))", "def train(self, training_steps=10):", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def on_train_begin(self, logs=None):\n self.start_time = datetime.datetime.now()\n print(f\"Starting training at {self.start_time}\")", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def on_train_begin(self, state: FuseManagerState) -> None:\n # File writer imports are done here in order to workaround the GPU issues -\n # when importing torch.tensorboard cuda gets occupied - do that only AFTER CUDA_VISIBLE_DEVICES is set\n try:\n # available only from torch 1.2\n from torch.utils.tensorboard import SummaryWriter\n self.writer_class = SummaryWriter\n self.use_summary_tf = False\n except ModuleNotFoundError:\n # fallback, use tensorflow file writer\n from tensorflow.summary import FileWriter\n import tensorflow as tf\n self.writer_class = FileWriter\n self.tf_summary = tf.Summary\n self.use_summary_tf = True\n\n tensorboard_train_dir = os.path.join(self.model_dir, 'train')\n tensorboard_validation_dir = os.path.join(self.model_dir, 'validation')\n\n # make sure we have these folders\n file.create_dir(tensorboard_train_dir, error_if_exist=False)\n file.create_dir(tensorboard_validation_dir, error_if_exist=False)\n\n # Get TensorBoard loggers\n self.tensorboard_logger_train = self.writer_class(tensorboard_train_dir)\n self.tensorboard_logger_validation = self.writer_class(tensorboard_validation_dir)\n pass", "def train(self, training_data, cfg, **kwargs):\n pass", "def on_train_begin(self, logs=None):\n pass", "def run(self):\n # This should do nothing if the user has already configured\n # logging, and will it least enable error messages otherwise.\n logging.basicConfig()\n\n # If this is resumption from a checkpoint, it is crucial to\n # reset `profile.current`. Otherwise, it simply does not hurt.\n self.profile.current = []\n\n # Sanity check for the most common case\n if (self._model and isinstance(self._model, Model) and\n isinstance(self.algorithm, GradientDescent)):\n if not (set(self._model.get_parameter_dict().values()) ==\n set(self.algorithm.parameters)):\n logger.warning(\"different parameters for model and algorithm\")\n\n with change_recursion_limit(config.recursion_limit):\n self.original_sigint_handler = signal.signal(\n signal.SIGINT, self._handle_epoch_interrupt)\n self.original_sigterm_handler = signal.signal(\n signal.SIGTERM, self._handle_batch_interrupt)\n try:\n logger.info(\"Entered the main loop\")\n if not self.status['training_started']:\n for extension in self.extensions:\n extension.main_loop = self\n self._run_extensions('before_training')\n with Timer('initialization', self.profile):\n self.algorithm.initialize()\n self.status['training_started'] = True\n # We can not write \"else:\" here because extensions\n # called \"before_training\" could have changed the status\n # of the main loop.\n if self.log.status['iterations_done'] > 0:\n self.log.resume()\n self._run_extensions('on_resumption')\n self.status['epoch_interrupt_received'] = False\n self.status['batch_interrupt_received'] = False\n with Timer('training', self.profile):\n while self._run_epoch():\n pass\n except TrainingFinish:\n self.log.current_row['training_finished'] = True\n except Exception as e:\n self._restore_signal_handlers()\n self.log.current_row['got_exception'] = traceback.format_exc()\n logger.error(\"Error occured during training.\" + error_message)\n try:\n self._run_extensions('on_error')\n except Exception:\n logger.error(traceback.format_exc())\n logger.error(\"Error occured when running extensions.\" +\n error_in_error_handling_message)\n reraise_as(e)\n finally:\n self._restore_signal_handlers()\n if self.log.current_row.get('training_finished', False):\n self._run_extensions('after_training')\n if config.profile:\n self.profile.report()", "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "def run_pretraining(self):\n if self.is_main_process:\n logging.info(\"*********************************\")\n logging.info(\"*** Starting pre-training ***\")\n logging.info(\"*********************************\")\n logging.info(\"Training on GPU: %s\", torch.cuda.get_device_name(0))\n logging.info(\"Target batch size: %s\", self.target_batch_size)\n logging.info(\"Number of accumulation steps: %s\", self.num_accumulation_steps)\n logging.info(\"Actual batch size: %s\", self.batch_size)\n\n self.model.train()\n self.most_recent_ckpts_paths = []\n average_loss = 0.0 # averaged loss every self.log_freq steps\n epoch = 0\n training_steps = 0\n pool = ProcessPoolExecutor(1)\n if self.is_main_process:\n tensorboard_log_fpath = os.path.join(\n WORKDIR,\n '.tensorboard_logs',\n self.tensorboard_id,\n self.start_datetime.strftime(\"%d-%m-%Y_%H-%M-%S\")\n )\n logging.info(\n \"Writing TensorBoard logs in: %s\",\n tensorboard_log_fpath.replace(WORKDIR, '$WORKDIR'))\n self.tensorboard_writer = SummaryWriter(tensorboard_log_fpath)\n\n # NOTE: Infinite loop over epochs, termination is handled via iteration count\n while True:\n\n # If beginning of pre-training: read files from hdf5_directory and shuffle\n if (not self.resume_pretraining) or (epoch > 0) \\\n or (self.phase2 and self.global_step < 1) or self.init_checkpoint:\n files = []\n for fname in os.listdir(self.hdf5_directory):\n fpath = os.path.join(self.hdf5_directory, fname)\n if os.path.isfile(fpath) and fname.startswith('training.') and fname.endswith('.hdf5'):\n files.append(fpath)\n f_start_id = 0\n files.sort()\n random.Random(self.random_seed + epoch).shuffle(files)\n # Else: get id of next file\n else:\n f_start_id = self.checkpoint['files'][0]\n files = self.checkpoint['files'][1:]\n self.resume_pretraining = False\n num_files = len(files)\n\n # Get the current process hdf5 file\n # and handle case where there are more processes than files left:\n if \\\n torch.distributed.is_initialized() \\\n and torch.distributed.get_world_size() > num_files:\n\n remainder = torch.distributed.get_world_size() % num_files\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_start_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n\n # Set previous_file variable for next iteration\n previous_file = hdf5_fpath\n\n # Load the pre-training data from the .hdf5 file\n pretraining_data = PretrainingDataset(\n hdf5_fpath=hdf5_fpath,\n max_masked_tokens_per_input=self.max_masked_tokens_per_input\n )\n train_sampler = RandomSampler(pretraining_data)\n train_dataloader = DataLoader(\n pretraining_data,\n sampler=train_sampler,\n batch_size=self.batch_size * self.n_gpu,\n num_workers=4, pin_memory=True\n )\n overflow_buf = None\n if self.allreduce_post_accumulation:\n overflow_buf = torch.cuda.IntTensor([0])\n\n # Loop over the rest of pre-training data files\n if len(files) == 1:\n f_start_id = -1\n for f_id in range(f_start_id + 1, len(files)):\n\n # Submit creation of next DataLoader\n if torch.distributed.get_world_size() > num_files:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n if self.is_main_process:\n logging.info(\n \"Local rank: %s | File n° %s: %s\",\n self.local_rank, f_id, os.path.basename(previous_file)\n )\n previous_file = hdf5_fpath\n dataset_future = pool.submit(\n create_pretraining_dataloader,\n hdf5_fpath,\n self.max_masked_tokens_per_input,\n self.batch_size * self.n_gpu,\n )\n\n # Iterate over batches (w/ progress bar for main process)\n training_batches = tqdm(\n train_dataloader,\n desc=\"Pre-training...\"\n ) if self.is_main_process else train_dataloader\n for batch in training_batches:\n training_steps += 1\n (\n input_ids,\n segment_ids,\n input_mask,\n masked_lm_labels,\n next_sentence_labels\n ) = [tensor.to(self.device) for tensor in batch]\n\n # Forward Pass\n model_output = self.model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n labels=masked_lm_labels,\n next_sentence_label=next_sentence_labels)\n loss = model_output['loss']\n if self.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n\n divisor = self.num_accumulation_steps\n if self.num_accumulation_steps > 1:\n if not self.allreduce_post_accumulation:\n # this division was merged into predivision\n loss = loss / self.num_accumulation_steps\n divisor = 1.0\n\n # Compute gradients\n if self.fp16:\n with amp.scale_loss(\n loss, self.optimizer,\n delay_overflow_check=self.allreduce_post_accumulation) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n average_loss += loss.item()\n\n # Take optimizer/scheduler step every (gradient_acc_steps) steps\n # This is the model parameter update:\n if training_steps % self.num_accumulation_steps == 0:\n self.lr_scheduler.step() # learning rate warmup\n self.take_optimizer_step(overflow_buf)\n\n # If reached max steps save everything and log final loss:\n if self.global_step >= self.total_steps:\n last_num_steps = int(\n training_steps / self.num_accumulation_steps\n ) % self.log_freq\n last_num_steps = self.log_freq if last_num_steps == 0 else last_num_steps\n average_loss = torch.tensor(average_loss, dtype=torch.float32).cuda()\n average_loss = average_loss / (last_num_steps * divisor)\n if torch.distributed.is_initialized():\n average_loss /= torch.distributed.get_world_size()\n torch.distributed.all_reduce(average_loss)\n if self.is_main_process:\n logging.info(\n \"Total Steps: %s | Final Loss = %.3f\",\n int(training_steps / self.num_accumulation_steps),\n average_loss.item()\n )\n self.tensorboard_writer.add_scalar(\n \"Avg. training loss\",\n average_loss.item(), global_step=self.global_step)\n\n # If at a logging step:\n elif training_steps % (self.log_freq * self.num_accumulation_steps) == 0:\n if self.is_main_process:\n logging_message = (\n f\"Global step: {self.global_step} | \"\n f\"Learning Rate: {self.optimizer.param_groups[0]['lr']:.2E} | \"\n f\"Step Loss: {loss.item() * self.num_accumulation_steps / divisor:.3f} | \"\n f\"Avg. Loss: {average_loss / (self.log_freq * divisor):.3f}\"\n )\n # Update the tqdm description\n training_batches.set_description(logging_message, refresh=True)\n # Log average training loss to TensorBoard:\n self.tensorboard_writer.add_scalar(\n \"Avg. training loss\",\n average_loss / (self.log_freq * divisor),\n global_step=self.global_step)\n average_loss = 0\n\n # If reached max steps at log step or reached checkpoint step:\n if \\\n self.global_step >= self.total_steps \\\n or training_steps % \\\n (self.checkpoint_interval * self.num_accumulation_steps) == 0:\n\n # Check if model has improved then save a checkpoint if so\n if self.do_validation:\n model_has_improved = self.run_validation()\n else:\n model_has_improved = True\n if self.is_main_process and model_has_improved:\n self.make_checkpoint(f_id, files)\n\n # End pre-training if reached max steps\n if self.global_step >= self.total_steps:\n del train_dataloader\n return # NOTE: breaks out of the training loop\n\n # Move to next file after using up all batches of current file\n del train_dataloader\n train_dataloader, hdf5_fpath = \\\n dataset_future.result(timeout=None)\n\n # Update epoch after going through all .hdf5 files\n epoch += 1", "def train():\n # YOUR TRAINING CODE GOES HERE", "def on_train_start(self, trainer, pl_module):\n self.client.set_tags(self.run_id, {\"Mode\": \"training\"})\n\n params = {\"epochs\": trainer.max_epochs}\n\n # TODO For logging optimizer params - Following scenarios are to revisited.\n # 1. In the current scenario, only the first optimizer details are logged.\n # Code to be enhanced to log params when multiple optimizers are used.\n # 2. mlflow.log_params is used to store optimizer default values into mlflow.\n # The keys in default dictionary are too short, Ex: (lr - learning_rate).\n # Efficient mapping technique needs to be introduced\n # to rename the optimizer parameters based on keys in default dictionary.\n\n if hasattr(trainer, \"optimizers\"):\n optimizer = trainer.optimizers[0]\n params[\"optimizer_name\"] = _get_optimizer_name(optimizer)\n\n if hasattr(optimizer, \"defaults\"):\n params.update(optimizer.defaults)\n\n self.client.log_params(self.run_id, params)\n self.client.flush(synchronous=True)", "def train(self) -> None:\n\n # Check if in the saved model path there is already a trained model\n if self.config.TRN_HYPERP[\"save_path\"]:\n if tf.saved_model.contains_saved_model(self.config.TRN_HYPERP[\"save_path\"]):\n print(\"INFO: An existing saved model will be used for inference\\n\")\n else:\n params = {**self.config.TRN_HYPERP, **self.config.DATASET_HYPERP}\n trainer = Trainer(**params)\n\n print(f\"INFO: Starting training ... \\n\")\n start_time = time.time()\n trainer.train()\n print(f\"\\nINFO: Training completed in {round((time.time() - start_time)/60, 2)} minutes.\\n\")\n\n # Instantiate the saved translator for inference\n saved_path = self.config.TRN_HYPERP[\"save_path\"]\n self.saved_translator = tf.saved_model.load(saved_path)\n else:\n print(\"INFO: Path to save model wasn't provided in config file. Can't train the model\\n\")", "def do(self):\n super().do()\n logger.info(\"TrainPipeStep started...\")\n records = self._get_current_step_records()\n logger.debug(\"load pipestep records: {}\".format(records))\n self.num_models = len(records)\n self.num_epochs = self.num_models * TrainerConfig.epochs\n self.update_status(Status.running)\n self.master = create_master()\n self._train_multi_models(records)\n self.master.join()\n ReportServer().output_step_all_records(step_name=self.task.step_name)\n self.master.close()\n ReportServer().backup_output_path()\n self.update_status(Status.finished)", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def train(self, train_loader):\n pass", "def train(self):\n logging.info(\"Training DINTModel.\")\n start = time.time()\n tr = self.classifier.train()\n return time.time() - start", "def train():\n import trace\n trace.train()", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self):\n if self.retrain:\n self.states = self.get_states()\n self.transitions = self.get_transitions()\n self.matrix = self.get_matrix()\n self.save_training()\n else:\n self.load_training()", "def training(self):\r\n self.model, self.voc = svm_clf_training('all', self.dataset)\r\n return 0", "def _training_before_hook(self):\n pass", "def train(self, render_env: bool = False) -> None:\n\n print(\":: Environment rendering autorised: {}\".format(render_env))\n\n consol_print_learning_stats = ConsolPrintLearningStats(self.exp_spec,\n self.exp_spec.print_metric_every_what_epoch)\n\n \"\"\" ---- Setup run dir name ---- \"\"\"\n self.this_run_dir = setup_commented_run_dir_str(self.exp_spec, self.agent_root_dir)\n\n \"\"\" ---- Create run dir & setup file writer for TensorBoard ---- \"\"\"\n self.writer = tf_cv1.summary.FileWriter(self.this_run_dir, tf_cv1.get_default_graph())\n\n \"\"\" ---- Log experiment spec in run directory ---- \"\"\"\n try:\n with open(\"{}/config.txt\".format(self.this_run_dir), \"w\") as f:\n f.write(self.exp_spec.__repr__())\n except IOError as e:\n raise IOError(\"The config file cannot be saved in the run directory!\") from e\n\n \"\"\" ---- Start training agent ---- \"\"\"\n for epoch in self._training_epoch_generator(consol_print_learning_stats, render_env):\n (epoch, epoch_loss, batch_average_trjs_return, batch_average_trjs_lenght) = epoch\n\n \"\"\" ---- Teardown ---- \"\"\"\n consol_print_learning_stats.print_experiment_stats(print_plot=self.exp_spec.show_plot)\n\n self.writer.close()\n return None", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self):\n\n # Set the pretrain log\n trlog = {}\n trlog['args'] = vars(self.args)\n trlog['train_loss'] = []\n trlog['val_loss'] = []\n trlog['train_acc'] = []\n trlog['val_acc'] = []\n trlog['train_iou']=[]\n trlog['val_iou']=[]\n trlog['max_iou'] = 0.0\n trlog['max_iou_epoch'] = 0\n\n # Set the timer\n timer = Timer()\n # Set global count to zero\n global_count = 0\n # Set tensorboardX\n writer = SummaryWriter(comment=self.args.save_path)\n\n # Start pretrain\n for epoch in range(1, self.args.pre_max_epoch + 1):\n # Update learning rate\n self.lr_scheduler.step()\n # Set the model to train mode\n self.model.train()\n self.model.mode = 'train'\n # Set averager classes to record training losses and accuracies\n train_loss_averager = Averager()\n train_acc_averager = Averager()\n train_iou_averager = Averager()\n\n # Using tqdm to read samples from train loader\n tqdm_gen = tqdm.tqdm(self.train_loader)\n\n for i, batch in enumerate(tqdm_gen, 1):\n # Update global count number \n global_count = global_count + 1\n if torch.cuda.is_available():\n data, label = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n label = batch[1]\n\n # Output logits for model\n logits = self.model(data)\n # Calculate train loss\n # CD loss is modified in the whole project to incorporate ony Cross Entropy loss. Modify as per requirement.\n #loss = self.FL(logits, label) + self.CD(logits,label) + self.LS(logits,label)\n loss = self.CD(logits,label) \n \n # Calculate train accuracy\n self._reset_metrics()\n seg_metrics = eval_metrics(logits, label, self.args.num_classes)\n self._update_seg_metrics(*seg_metrics)\n pixAcc, mIoU, _ = self._get_seg_metrics(self.args.num_classes).values()\n\n # Add loss and accuracy for the averagers\n train_loss_averager.add(loss.item())\n train_acc_averager.add(pixAcc)\n train_iou_averager.add(mIoU)\n\n # Print loss and accuracy till this step\n tqdm_gen.set_description('Epoch {}, Loss={:.4f} Acc={:.4f} IOU={:.4f}'.format(epoch, train_loss_averager.item(),train_acc_averager.item()*100.0,train_iou_averager.item()))\n \n # Loss backwards and optimizer updates\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the averagers\n train_loss_averager = train_loss_averager.item()\n train_acc_averager = train_acc_averager.item()\n train_iou_averager = train_iou_averager.item()\n\n writer.add_scalar('data/train_loss(Pre)', float(train_loss_averager), epoch)\n writer.add_scalar('data/train_acc(Pre)', float(train_acc_averager)*100.0, epoch) \n writer.add_scalar('data/train_iou (Pre)', float(train_iou_averager), epoch)\n \n print('Epoch {}, Train: Loss={:.4f}, Acc={:.4f}, IoU={:.4f}'.format(epoch, train_loss_averager, train_acc_averager*100.0,train_iou_averager)) \n \n # Start validation for this epoch, set model to eval mode\n self.model.eval()\n self.model.mode = 'val'\n\n # Set averager classes to record validation losses and accuracies\n val_loss_averager = Averager()\n val_acc_averager = Averager()\n val_iou_averager = Averager()\n\n # Print previous information \n if epoch % 1 == 0:\n print('Best Val Epoch {}, Best Val IoU={:.4f}'.format(trlog['max_iou_epoch'], trlog['max_iou']))\n\n # Run validation\n for i, batch in enumerate(self.val_loader, 1):\n if torch.cuda.is_available():\n data, labels,_ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n label=labels[0]\n p = self.args.way*self.args.shot\n data_shot, data_query = data[:p], data[p:]\n label_shot,label=labels[:p],labels[p:]\n \n par=data_shot, label_shot, data_query\n logits = self.model(par)\n # Calculate preval loss\n \n #loss = self.FL(logits, label) + self.CD(logits,label) + self.LS(logits,label)\n loss = self.CD(logits,label) \n \n # Calculate val accuracy\n self._reset_metrics()\n seg_metrics = eval_metrics(logits, label, self.args.way)\n self._update_seg_metrics(*seg_metrics)\n pixAcc, mIoU, _ = self._get_seg_metrics(self.args.way).values()\n\n val_loss_averager.add(loss.item())\n val_acc_averager.add(pixAcc)\n val_iou_averager.add(mIoU) \n\n # Update validation averagers\n val_loss_averager = val_loss_averager.item()\n val_acc_averager = val_acc_averager.item()\n val_iou_averager = val_iou_averager.item()\n \n writer.add_scalar('data/val_loss(Pre)', float(val_loss_averager), epoch)\n writer.add_scalar('data/val_acc(Pre)', float(val_acc_averager)*100.0, epoch) \n writer.add_scalar('data/val_iou (Pre)', float(val_iou_averager), epoch) \n \n # Print loss and accuracy for this epoch\n print('Epoch {}, Val: Loss={:.4f} Acc={:.4f} IoU={:.4f}'.format(epoch, val_loss_averager, val_acc_averager*100.0,val_iou_averager))\n\n # Update best saved model\n if val_iou_averager > trlog['max_iou']:\n trlog['max_iou'] = val_iou_averager\n trlog['max_iou_epoch'] = epoch\n print(\"model saved in max_iou\")\n self.save_model('max_iou')\n\n # Save model every 10 epochs\n if epoch % 10 == 0:\n self.save_model('epoch'+str(epoch))\n\n # Update the logs\n trlog['train_loss'].append(train_loss_averager)\n trlog['train_acc'].append(train_acc_averager)\n trlog['val_loss'].append(val_loss_averager)\n trlog['val_acc'].append(val_acc_averager)\n trlog['train_iou'].append(train_iou_averager)\n trlog['val_iou'].append(val_iou_averager)\n\n # Save log\n torch.save(trlog, osp.join(self.args.save_path, 'trlog'))\n\n if epoch % 1 == 0:\n print('Running Time: {}, Estimated Time: {}'.format(timer.measure(), timer.measure(epoch / self.args.max_epoch)))\n writer.close()", "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def main(_):\n utility.set_up_logging()\n if not FLAGS.config:\n raise KeyError('You must specify a configuration.')\n logdir = FLAGS.logdir and os.path.expanduser(os.path.join(\n FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))\n try:\n config = utility.load_config(logdir)\n except IOError:\n config = tools.AttrDict(getattr(configs, FLAGS.config)())\n config = utility.save_config(config, logdir)\n for score in train(config, FLAGS.env_processes):\n tf.logging.info('Score {}.'.format(score))", "def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs", "def train(self, batch):\n pass", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def train(self):\n # Restore models\n global_step = self._restore_models_and_step()\n \n if self.gold and global_step >= self.gold_step:\n self.netD.use_gold = True\n\n print(\"INFO: Starting training from global step {}...\".format(\n global_step))\n logit_save_num = 0\n\n self.logit_results = defaultdict(dict)\n\n try:\n start_time = time.time()\n\n # Mixed precision\n if self.amp:\n print(\"INFO: Using mixed precision training...\")\n scaler = torch.cuda.amp.GradScaler()\n else:\n scaler = None\n\n # Iterate through data\n iter_dataloader = iter(self.dataloader)\n if self.train_drs:\n iter_dataloader_drs = iter(self.dataloader_drs)\n while global_step < self.num_steps:\n log_data = metric_log.MetricLog() # log data for tensorboard\n\n if self.topk:\n self.netG.decay_topk_rate(global_step, epoch_steps=len(self.dataloader))\n\n if self.gold and global_step == self.gold_step:\n self.netD.use_gold = True\n # -------------------------\n # One Training Step\n # -------------------------\n # Update n_dis times for D\n for i in range(self.n_dis):\n iter_dataloader, real_batch = self._fetch_data(\n iter_dataloader=iter_dataloader)\n\n # ------------------------\n # Update D Network\n # -----------------------\n log_data = self.netD.train_step(\n real_batch=real_batch,\n netG=self.netG,\n optD=self.optD,\n log_data=log_data,\n global_step=global_step,\n device=self.device,\n scaler=scaler)\n\n # train netD2 for DRS\n if self.train_drs:\n iter_dataloader_drs, real_batch_drs = self._fetch_data(\n iter_dataloader=iter_dataloader_drs)\n log_data = self.netD_drs.train_step(\n real_batch=real_batch_drs,\n netG=self.netG,\n optD=self.optD_drs,\n log_data=log_data,\n global_step=global_step,\n device=self.device,\n scaler=scaler)\n\n # -----------------------\n # Update G Network\n # -----------------------\n # Update G, but only once.\n if i == (self.n_dis - 1):\n log_data = self.netG.train_step(\n real_batch=real_batch,\n netD=self.netD,\n optG=self.optG,\n global_step=global_step,\n log_data=log_data,\n device=self.device,\n scaler=scaler)\n\n # --------------------------------\n # Update Training Variables\n # -------------------------------\n global_step += 1\n\n log_data = self.scheduler.step(log_data=log_data,\n global_step=global_step)\n\n # -------------------------\n # Logging and Metrics\n # -------------------------\n if global_step % self.log_steps == 0:\n self.logger.write_summaries(log_data=log_data,\n global_step=global_step)\n\n if global_step % self.print_steps == 0:\n curr_time = time.time()\n topk_rate = self.netG.topk_rate if hasattr(self.netG, 'topk_rate') else 1\n log_data.add_metric(f'topk_rate', topk_rate, group='topk_rate', precision=6)\n self.logger.print_log(global_step=global_step,\n log_data=log_data,\n time_taken=(curr_time - start_time) /\n self.print_steps)\n start_time = curr_time\n\n if global_step % self.vis_steps == 0:\n if 'gaussian' in self.log_dir:\n plot_gaussian_samples(netG=self.netG,\n global_step=global_step,\n log_dir=self.log_dir,\n device=self.device)\n else:\n self.logger.vis_images(netG=self.netG,\n global_step=global_step)\n \n if self.save_logits and global_step % self.logit_save_steps == 0 and global_step >= self.save_logit_after and global_step <= self.stop_save_logit_after:\n if self.train_drs:\n netD = self.netD_drs\n netD_name = 'netD_drs'\n else:\n netD = self.netD\n netD_name = 'netD'\n mode = 'eval' if self.save_eval_logits else 'train'\n print(f\"INFO: logit saving {mode} netD: {netD_name}...\")\n logit_list = self._get_logit(netD=netD, eval_mode=mode=='eval')\n self.logit_results[f'{netD_name}_{mode}'][global_step] = logit_list\n\n logit_save_num += 1\n\n if global_step % self.save_steps == 0:\n print(\"INFO: Saving checkpoints...\")\n self._save_model_checkpoints(global_step)\n if self.save_logits and global_step >= self.save_logit_after:\n self._save_logit(self.logit_results)\n\n print(\"INFO: Saving final checkpoints...\")\n self._save_model_checkpoints(global_step)\n if self.save_logits and global_step >= self.save_logit_after:\n self._save_logit(self.logit_results)\n\n except KeyboardInterrupt:\n print(\"INFO: Saving checkpoints from keyboard interrupt...\")\n self._save_model_checkpoints(global_step)\n if self.save_logits and global_step >= self.save_logit_after:\n self._save_logit(self.logit_results)\n\n finally:\n self.logger.close_writers()\n\n print(\"INFO: Training Ended.\")", "def train(self, trainData):\n pass", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def setup(self) -> None:\n self.logger.info(\"ML Train task: setup method called.\")", "def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n trainer = trainer_class(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n trainer.training_loop(n_epochs=n_epochs)", "def on_train_forward(self, runner):\n self.on_iter_forward(runner)", "def train(self) -> Any:\n pass", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def start_training(params):\n\n\n\n # CREATE A FOLDER TO HOLD RESULTS\n\n\n exp_pref = \"../results/\" + params.EXPERIMENT_PREFIX\n time_str = time.strftime(\"_%m-%d-%H-%M_\", time.gmtime())\n exp_dir = exp_pref + time_str + \\\n \"{}\".format(params.LEARNING_RATE).replace(\".\", \"p\") + \"_\" \\\n + \"{}\".format(params.DISCOUNT).replace(\".\", \"p\")\n\n try:\n os.stat(exp_dir)\n except OSError:\n os.makedirs(exp_dir)\n\n logger = logging.getLogger(\"DeepLogger\")\n logger.setLevel(logging.INFO)\n\n # Logging filehandler\n #fh = logging.FileHandler(exp_dir + \"/log.log\")\n # Rotate file when filesize is 5 mb\n fh = RotatingFileHandler(exp_dir + \"/log.log\", maxBytes=5000000, backupCount=100)\n\n fh.setLevel(logging.INFO)\n\n # Console filehandler\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n\n # Prevent nohup from producing large log file, logging to file is handled internally\n # logger.addHandler(ch)\n\n log_params(logger, params)\n\n #logging.basicConfig(level=logging.INFO, filename=exp_dir + \"/log.log\")\n\n\n if params.DETERMINISTIC:\n rng = np.random.RandomState(12345)\n else:\n rng = np.random.RandomState()\n\n if params.CUDNN_DETERMINISTIC:\n theano.config.dnn.conv.algo_bwd = 'deterministic'\n\n # Init ale\n ale = ale_python_interface.ALEInterface()\n ale.setInt('random_seed', 123)\n ale.setBool('display_screen', params.DISPLAY_SCREEN)\n ale.setFloat('repeat_action_probability', params.REPEAT_ACTION_PROBABILITY)\n full_rom_path = os.path.join(params.ROM_PATH, params.ROM_NAME)\n ale.loadROM(full_rom_path)\n num_actions = len(ale.getMinimalActionSet())\n\n print \"Legal actions: \", num_actions\n print ale.getMinimalActionSet()\n\n # Instantiate network\n logger.info(\"Setting up network...\")\n network = None # Be able to continue training from a network or watch a network play\n if (params.NETWORK_PICKLE_FILE is None):\n logger.info(\"Initializing a new random network...\")\n network = q_network.DeepQLearner(params.RESIZED_WIDTH,\n params.RESIZED_HEIGHT,\n num_actions,\n params.PHI_LENGTH,\n params.DISCOUNT,\n params.LEARNING_RATE,\n params.RMS_DECAY,\n params.RMS_EPSILON,\n params.MOMENTUM,\n params.CLIP_DELTA,\n params.FREEZE_INTERVAL,\n params.BATCH_SIZE,\n params.NETWORK_TYPE,\n params.UPDATE_RULE,\n params.BATCH_ACCUMULATOR,\n rng)\n else:\n logger.info(\"Loading network instance from file...\")\n handle = open(params.NETWORK_PICKLE_FILE, 'r')\n network = cPickle.load(handle)\n\n\n # Only used when getting a random network\n if params.RANDOM_NETWORK_PICKLE:\n import sys\n sys.setrecursionlimit(10000)\n result_net_file = open(params.EXPERIMENT_PREFIX + '.pkl', 'w')\n print \"File opened\"\n cPickle.dump(network, result_net_file, -1)\n print \"Pickle dumped\"\n result_net_file.close()\n sys.exit(0)\n\n\n # Instatiate agent\n logger.info(\"Setting up agent...\")\n agent = ale_agent.NeuralAgent(network,\n params.EPSILON_START,\n params.EPSILON_MIN,\n params.EPSILON_DECAY,\n params.REPLAY_MEMORY_SIZE,\n exp_dir,\n params.REPLAY_START_SIZE,\n params.UPDATE_FREQUENCY,\n rng)\n\n # Instantiate experient\n logger.info(\"Setting up experiment...\")\n experiment = ale_experiment.ALEExperiment(ale, agent,\n params.RESIZED_WIDTH,\n params.RESIZED_HEIGHT,\n params.RESIZE_METHOD,\n params.EPOCHS,\n params.STEPS_PER_EPOCH,\n params.STEPS_PER_TEST,\n params.FRAME_SKIP,\n params.DEATH_ENDS_EPISODE,\n params.MAX_START_NULLOPS,\n rng)\n\n\n # Run experiment\n logger.info(\"Running experiment...\")\n experiment.run()", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def test_train(self):\n print \"x=\",self.trainer.train()", "def before_train(self, logs=None):\n self.config = self.trainer.config\n self.unrolled = self.trainer.config.unrolled\n self.device = self.trainer.config.device\n self.model = self.trainer.model\n self.optimizer = self.trainer.optimizer\n self.lr_scheduler = self.trainer.lr_scheduler\n self.loss = self.trainer.loss\n self.search_alg = SearchAlgorithm(SearchSpace())\n self._set_algorithm_model(self.model)\n self.trainer.train_loader = self.trainer._init_dataloader(mode='train')\n self.trainer.valid_loader = self.trainer._init_dataloader(mode='val')\n normal_selected_idxs = torch.tensor(len(self.model.alphas_normal) * [-1],\n requires_grad=False, dtype=torch.int).cuda()\n reduce_selected_idxs = torch.tensor(len(self.model.alphas_reduce) * [-1],\n requires_grad=False, dtype=torch.int).cuda()\n normal_candidate_flags = torch.tensor(len(self.model.alphas_normal) * [True],\n requires_grad=False, dtype=torch.bool).cuda()\n reduce_candidate_flags = torch.tensor(len(self.model.alphas_reduce) * [True],\n requires_grad=False, dtype=torch.bool).cuda()\n logging.info('normal_selected_idxs: {}'.format(normal_selected_idxs))\n logging.info('reduce_selected_idxs: {}'.format(reduce_selected_idxs))\n logging.info('normal_candidate_flags: {}'.format(normal_candidate_flags))\n logging.info('reduce_candidate_flags: {}'.format(reduce_candidate_flags))\n self.model.normal_selected_idxs = normal_selected_idxs\n self.model.reduce_selected_idxs = reduce_selected_idxs\n self.model.normal_candidate_flags = normal_candidate_flags\n self.model.reduce_candidate_flags = reduce_candidate_flags\n logging.info(F.softmax(torch.stack(self.model.alphas_normal, dim=0), dim=-1).detach())\n logging.info(F.softmax(torch.stack(self.model.alphas_reduce, dim=0), dim=-1).detach())\n self.normal_probs_history = []\n self.reduce_probs_history = []", "def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()", "def train(self, log_in_tensorboard=True):\n if log_in_tensorboard or self.config.save_model:\n os.makedirs(self.config.results_path, exist_ok=True)\n\n # Manage GPUs\n if 0 < self.num_gpus:\n num_gpus_per_worker = self.num_gpus / (\n self.config.train_on_gpu\n + self.config.num_workers * self.config.selfplay_on_gpu\n + log_in_tensorboard * self.config.selfplay_on_gpu\n + self.config.use_last_model_value * self.config.reanalyse_on_gpu\n )\n if 1 < num_gpus_per_worker:\n num_gpus_per_worker = math.floor(num_gpus_per_worker)\n else:\n num_gpus_per_worker = 0\n\n # Initialize workers\n self.training_worker = trainer.Trainer.options(\n num_cpus=0, num_gpus=num_gpus_per_worker if self.config.train_on_gpu else 0,\n ).remote(self.checkpoint, self.config)\n\n self.shared_storage_worker = shared_storage.SharedStorage.remote(\n self.checkpoint, self.config,\n )\n self.shared_storage_worker.set_info.remote(\"terminate\", False)\n\n self.replay_buffer_worker = replay_buffer.ReplayBuffer.remote(\n self.checkpoint, self.replay_buffer, self.config\n )\n\n if self.config.use_last_model_value:\n self.reanalyse_worker = replay_buffer.Reanalyse.options(\n num_cpus=0,\n num_gpus=num_gpus_per_worker if self.config.reanalyse_on_gpu else 0,\n ).remote(self.checkpoint, self.config)\n\n self.self_play_workers = [\n self_play.SelfPlay.options(\n num_cpus=0,\n num_gpus=num_gpus_per_worker if self.config.selfplay_on_gpu else 0,\n ).remote(\n self.checkpoint, self.Game, self.config, self.config.seed + seed,\n )\n for seed in range(self.config.num_workers)\n ]\n\n # Launch workers\n [\n self_play_worker.continuous_self_play.remote(\n self.shared_storage_worker, self.replay_buffer_worker\n )\n for self_play_worker in self.self_play_workers\n ]\n self.training_worker.continuous_update_weights.remote(\n self.replay_buffer_worker, self.shared_storage_worker\n )\n if self.config.use_last_model_value:\n self.reanalyse_worker.reanalyse.remote(\n self.replay_buffer_worker, self.shared_storage_worker\n )\n\n if log_in_tensorboard:\n self.logging_loop(\n num_gpus_per_worker if self.config.selfplay_on_gpu else 0,\n )", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def train(self, session, train_examples, dev_examples, train_dir):\n\n # some free code to print out number of parameters in your model\n # it's always good to check!\n # you will also want to save your model parameters in train_dir\n # so that you can use your trained model to make predictions, or\n # even continue training\n\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n if self.summary_flag:\n self.train_writer = tf.summary.FileWriter(self.summaries_dir + '/train', session.graph)\n\n logging.info(\"Train Loss File: {}\".format(self.train_loss_log))\n logging.info(\"Dev Loss File: {}\".format(self.dev_loss_log))\n best_score = 100000\n train_log = open(self.train_loss_log, \"w\")\n dev_log = open(self.dev_loss_log, \"w\")\n for epoch in range(self.n_epoch):\n print(\"Epoch {:} out of {:}\".format(epoch + 1, self.n_epoch))\n dev_score = self.run_epoch(session, train_examples, dev_examples, epoch, train_log)\n dev_log.write(\"{},{}\\n\".format(epoch + 1, dev_score))\n logging.info(\"Average Dev Cost: {}\".format(dev_score))\n logging.info(\"train F1 & EM\")\n f1, em = self.evaluate_answer(session, train_examples, self.rev_vocab, log = True)\n logging.info(\"Dev F1 & EM\")\n f1, em = self.evaluate_answer(session, dev_examples, self.rev_vocab, log = True)\n if dev_score < best_score:\n best_score = dev_score\n print(\"New best dev score! Saving model in {}\".format(train_dir + \"/\" + self.model_name))\n self.saver.save(session, train_dir + \"/\" + self.model_name)\n\n return best_score", "def run(self):\n def update_logs(summary_writer, episode, reward, loss, epsilon):\n summary_writer.add_scalar('Reward', reward, episode)\n summary_writer.add_scalar('Loss', loss, episode)\n summary_writer.add_scalar('Epsilon', epsilon, episode)\n \n # Print model and init summary_writer\n summary(self.policy_net, (1, self.num_inputs))\n summary_writer = SummaryWriter(log_dir=f'./logs/{self.name}/')\n\n sum_reward = 0\n\n # Run nb_games\n for n in range(self.nb_games):\n\n reward, loss = self._run_one_game()\n\n # Update values and logs\n episode = self.nb_iter_prev + n\n sum_reward += reward\n self.epsilon = max(self.min_epsilon, self.epsilon * self.decay)\n update_logs(summary_writer, episode, reward, loss, self.epsilon)\n \n # Each update_frequency print and update target_net\n if (episode + 1) % self.update_frequency == 0:\n print(f'Episode: {episode + 1}, Epsilon: {self.epsilon}, '\n f'Reward: {reward}, Loss: {loss}, '\n f'Mean reward: {sum_reward/self.update_frequency}.')\n sum_reward = 0\n self._update_target_net()\n\n # End of the training\n self.nb_iter_prev += self.nb_games\n self.save()", "def _on_training_start(self) -> None:\n if self.eval_freq > 0:\n self.solver.run_tests(0, draw=self.draw, verbose=self.verbose)", "def train(self): \n self.current_step = 0\n self.log = log_setup(self.args)\n self.current_gamma = self.args.initial_gamma\n with tf.Session(graph = self.computation_graph) as session:\n self.init.run()\n print(\"Model Initialized.\")\n for repetition in range(0, self.args.epochs):\n\n random.shuffle(self.nodes)\n self.optimization_time = 0 \n self.average_loss = 0\n\n epoch_printer(repetition)\n for i in tqdm(range(int(len(self.edges)/self.args.batch_size))):\n self.current_step = self.current_step + 1\n self.current_gamma = gamma_incrementer(self.current_step, self.args.initial_gamma, self.current_gamma, self.true_step_size)\n feed_dict = self.feed_dict_generator(self.edges[i*self.args.batch_size:(i+1)*self.args.batch_size], self.current_step, self.current_gamma)\n start = time.time()\n _, loss = session.run([self.train_op , self.loss], feed_dict=feed_dict)\n end = time.time()\n self.optimization_time = self.optimization_time + (end-start)\n self.average_loss = self.average_loss + loss\n\n print(\"\")\n self.average_loss = self.average_loss/self.vocab_size\n self.final_embeddings = self.factorization_layer.embedding_matrix.eval()\n if \"CODE\" in self.args.model: \n self.c_means = self.cluster_layer.cluster_means.eval()\n self.modularity_score, assignments = neural_modularity_calculator(self.graph, self.final_embeddings, self.c_means)\n else:\n self.modularity_score, assignments = classical_modularity_calculator(self.graph, self.final_embeddings, self.args)\n self.log = log_updater(self.log, repetition, self.average_loss, self.optimization_time, self.modularity_score)\n tab_printer(self.log)\n if \"CODE\" in self.args.model: \n initiate_dump_grafcode(self.log, assignments, self.args, self.final_embeddings, self.c_means)\n else:\n initiate_dump_graf(self.log, assignments, self.args, self.final_embeddings)", "def train(self):\n start_time = time()\n self.model.train()\n\n for step, sample in enumerate(self.train_loader):\n self.optimizer.zero_grad()\n\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n loss.backward()\n self.train_losses.append(loss.item())\n\n self.optimizer.step(None)\n\n # print an incredible progress bar\n print(f'\\r{self.progress_bar} │ Loss: {np.mean(self.train_losses):.6f}', end='')\n self.progress_bar.inc()\n\n # log average loss of this epoch\n mean_epoch_loss = np.mean(self.train_losses)\n self.sw.add_scalar(tag='train_loss', scalar_value=mean_epoch_loss, global_step=self.epoch)\n self.train_losses = []\n\n # log epoch duration\n print(f' │ T: {time() - start_time:.2f} s')", "def train(\n self, training_data: TrainingData, cfg: DazuConfig, **kwargs: Any\n ) -> None:", "def call_training_routine(self):\n training_command = \"th main.lua \"\\\n \"-GPU_id %(GPU_identifier)i \"\\\n \"-number_of_GPUs %(number_of_GPUs)i \"\\\n \"-training_dataset %(training_dataset)s \"\\\n \"-testing_dataset %(testing_dataset)s \"\\\n \"-modelFilePath %(modelFilePath)s \"\\\n \"-maxepoch %(maxepoch)i \"\\\n \"-savingDirectory %(savingDirectory)s \"\\\n \"-learningRate %(learningRate)f \"\\\n \"-batchSize %(batchSize)i \"\\\n \"-momentum %(momentum)f\" % self.training_parameters\n\n if self.training_parameters[\"presavedModelPath\"] != \"\":\n training_command += \" -presavedModelPath %s\" %\\\n self.training_parameters[\"presavedModelPath\"]\n\n # Call the training command\n subprocess.call(training_command, shell=True)", "def train(self, request):\n model = request.get(\"model\")\n if not model:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n \"Model is not provided for Training Task\",\n )\n\n task = self._trainers.get(model)\n if not task:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n f\"Train Task is not Initialized. There is no model '{model}' available\",\n )\n\n request = copy.deepcopy(request)\n result = task(request, self.datastore())\n\n # Run all scoring methods\n if self._auto_update_scoring:\n self.async_scoring(None)\n return result", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model" ]
[ "0.78187734", "0.766623", "0.7507915", "0.74226147", "0.7411162", "0.7411162", "0.7411162", "0.7411162", "0.7411162", "0.73725444", "0.7329948", "0.7323073", "0.73172176", "0.72957176", "0.7275571", "0.72495633", "0.7228919", "0.7227007", "0.72220093", "0.7217784", "0.7202225", "0.7198964", "0.7191397", "0.7179416", "0.71673715", "0.71509296", "0.7140122", "0.71321064", "0.7115376", "0.7102492", "0.7094984", "0.7081416", "0.70713323", "0.70447314", "0.7033591", "0.7033591", "0.70217884", "0.70092636", "0.699168", "0.69715476", "0.696812", "0.6967148", "0.694918", "0.69451183", "0.69449806", "0.69420004", "0.6936235", "0.6930559", "0.69267786", "0.6917924", "0.69102967", "0.6889197", "0.6880349", "0.68795496", "0.6876861", "0.6875265", "0.687089", "0.68702203", "0.6868504", "0.68683493", "0.68644226", "0.6860388", "0.6858473", "0.685073", "0.6845569", "0.6842699", "0.68292016", "0.6820533", "0.67902416", "0.6787302", "0.6783641", "0.67831063", "0.6779687", "0.67631805", "0.6753253", "0.67528796", "0.6751373", "0.6736025", "0.67342824", "0.67337", "0.6729016", "0.67272806", "0.67214614", "0.6718474", "0.6718474", "0.6714168", "0.670036", "0.6697921", "0.66952664", "0.66921985", "0.6691674", "0.6691341", "0.6675888", "0.6673282", "0.6672983", "0.6672351", "0.6665191", "0.66612506", "0.66529596", "0.66518956", "0.6651485" ]
0.0
-1
Test the gateway can connect and have an empty dict of devices
def test_connect(self, gateway): assert not gateway._devs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_devices(self):\n pass", "def test_get_devices(self):\n pass", "def test_verify_connection_to_a_device():", "def test_verify_list_of_devices_in_my_network():", "def test_gwservice_listdevices(self, setup_controller):\n resp = setup_controller.request(\"gw\", \"devices\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw list devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)", "def test_get_devices1(self):\n pass", "def test_get_device(self):\n pass", "def test_get_device(self):\n pass", "def test_nothing(self):\n response = self.client.get(reverse('device-list'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['devices'], [])", "def test_gwservice_createdevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n print(json.dumps(payload))\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device delete\", body=body)\n if resp.status_code != 200:\n assert False", "async def test_gateway_empty_string(\n ip4config_service: IP4ConfigService, dbus_session_bus: MessageBus\n):\n ip4 = IpConfiguration(\"/org/freedesktop/NetworkManager/IP4Config/1\", ip4=True)\n await ip4.connect(dbus_session_bus)\n\n ip4config_service.emit_properties_changed({\"Gateway\": \"\"})\n await ip4config_service.ping()\n assert ip4.gateway is None", "def the_user_should_be_able_to_connect_to_one_of_the_devices():\n assert web_app.connect_to_device1()", "def test_device_tests():\n assert call_json_fans.FANS_NUM == len(call_json_fans.DETAILS_RESPONSES)\n assert call_json_bulbs.BULBS_NUM == len(call_json_bulbs.DETAILS_RESPONSES)\n assert call_json_outlets.OUTLETS_NUM == len(call_json_outlets.DETAILS_RESPONSES)\n assert call_json_switches.SWITCHES_NUM == len(call_json_switches.DETAILS_RESPONSES)", "async def _check_api(self) -> None:\n await self._api_request(\"devices\")", "def test_get_pci_device_list(self):\n pass", "def test_verify_state_of_a_device_when_disconnected_from_the_device():", "def check_devices():\n\n cur = g.db.execute('select node from slaves')\n NODES=[r[0] for r in cur.fetchall()]\n dev = {}\n for node in NODES:\n try:\n print \"Getting Device and status from \"+ node\n logger.info('Getting Devices and status from ' +node)\n url = Request(\"http://\"+ node +\":5000/api/all/status\")\n data = json.load(urllib2.urlopen(url))\n dev[node] = data\n except URLError, e:\n print e\n logger.error(e)\n dev[node] = e\n if not dev:\n dev[\"error\"] = \"error No devices found please add\"\n return dev\n else:\n return dev", "def test_gatt_connect_get_connected_devices(self):\n gatt_server_cb = self.per_ad.droid.gattServerCreateGattServerCallback()\n gatt_server = self.per_ad.droid.gattServerOpenGattServer(\n gatt_server_cb)\n self.gatt_server_list.append(gatt_server)\n try:\n bluetooth_gatt, gatt_callback, adv_callback = (\n orchestrate_gatt_connection(self.cen_ad, self.per_ad))\n self.bluetooth_gatt_list.append(bluetooth_gatt)\n except GattTestUtilsError as err:\n self.log.error(err)\n return False\n conn_cen_devices = self.cen_ad.droid.bluetoothGetConnectedLeDevices(\n bt_profile_constants['gatt'])\n conn_per_devices = self.per_ad.droid.bluetoothGetConnectedLeDevices(\n bt_profile_constants['gatt_server'])\n target_name = self.per_ad.droid.bluetoothGetLocalName()\n error_message = (\"Connected device {} not found in list of connected \"\n \"devices {}\")\n if not any(d['name'] == target_name for d in conn_cen_devices):\n self.log.error(error_message.format(target_name, conn_cen_devices))\n return False\n # For the GATT server only check the size of the list since\n # it may or may not include the device name.\n target_name = self.cen_ad.droid.bluetoothGetLocalName()\n if not conn_per_devices:\n self.log.error(error_message.format(target_name, conn_per_devices))\n return False\n self.adv_instances.append(adv_callback)\n return self._orchestrate_gatt_disconnection(bluetooth_gatt,\n gatt_callback)", "async def test_no_sensors(hass, mock_bridge):\n mock_bridge.allow_groups = True\n mock_bridge.mock_sensor_responses.append({})\n await setup_bridge(hass, mock_bridge)\n assert len(mock_bridge.mock_requests) == 1\n assert len(hass.states.async_all()) == 0", "def test_create_device(self):\n pass", "def test_create_device(self):\n pass", "def __init__(self):\n self._discovered_devices = {}\n self._discovered_ip = None", "def test_filter_device(self):\n pass", "async def test_registered_devices(hass: HomeAssistant) -> None:\n integration_matchers = [\n {\"domain\": \"not-matching\", \"registered_devices\": True},\n {\"domain\": \"mock-domain\", \"registered_devices\": True},\n ]\n\n packet = Ether(RAW_DHCP_RENEWAL)\n\n registry = dr.async_get(hass)\n config_entry = MockConfigEntry(domain=\"mock-domain\", data={})\n config_entry.add_to_hass(hass)\n registry.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"50147903852c\")},\n name=\"name\",\n )\n # Not enabled should not get flows\n config_entry2 = MockConfigEntry(domain=\"mock-domain-2\", data={})\n config_entry2.add_to_hass(hass)\n registry.async_get_or_create(\n config_entry_id=config_entry2.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"50147903852c\")},\n name=\"name\",\n )\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n # Ensure no change is ignored\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.1.120\",\n hostname=\"irobot-ae9ec12dd3b04885bcbfa36afb01e1cc\",\n macaddress=\"50147903852c\",\n )", "async def test_get_scanner(\n hass: HomeAssistant, mocked_opnsense, mock_device_tracker_conf: list[legacy.Device]\n) -> None:\n interface_client = mock.MagicMock()\n mocked_opnsense.InterfaceClient.return_value = interface_client\n interface_client.get_arp.return_value = [\n {\n \"hostname\": \"\",\n \"intf\": \"igb1\",\n \"intf_description\": \"LAN\",\n \"ip\": \"192.168.0.123\",\n \"mac\": \"ff:ff:ff:ff:ff:ff\",\n \"manufacturer\": \"\",\n },\n {\n \"hostname\": \"Desktop\",\n \"intf\": \"igb1\",\n \"intf_description\": \"LAN\",\n \"ip\": \"192.168.0.167\",\n \"mac\": \"ff:ff:ff:ff:ff:fe\",\n \"manufacturer\": \"OEM\",\n },\n ]\n network_insight_client = mock.MagicMock()\n mocked_opnsense.NetworkInsightClient.return_value = network_insight_client\n network_insight_client.get_interfaces.return_value = {\"igb0\": \"WAN\", \"igb1\": \"LAN\"}\n\n result = await async_setup_component(\n hass,\n DOMAIN,\n {\n DOMAIN: {\n CONF_URL: \"https://fake_host_fun/api\",\n CONF_API_KEY: \"fake_key\",\n CONF_API_SECRET: \"fake_secret\",\n CONF_VERIFY_SSL: False,\n }\n },\n )\n await hass.async_block_till_done()\n assert result\n device_1 = hass.states.get(\"device_tracker.desktop\")\n assert device_1 is not None\n assert device_1.state == \"home\"\n device_2 = hass.states.get(\"device_tracker.ff_ff_ff_ff_ff_ff\")\n assert device_2.state == \"home\"", "def test_get_devices(self):\n print(\"Test Device List\")\n self.mock_api.return_value = call_json.DeviceList.device_list_response()\n self.manager.get_devices()\n all_kwargs = parse_args(self.mock_api)\n assert assert_test(self.manager.get_devices, all_kwargs, None,\n self.write_api, self.overwrite)\n assert len(self.manager.bulbs) == call_json_bulbs.BULBS_NUM\n assert len(self.manager.outlets) == call_json_outlets.OUTLETS_NUM\n assert len(self.manager.fans) == call_json_fans.FANS_NUM\n assert len(self.manager.switches) == call_json_switches.SWITCHES_NUM", "def test_device_mgmt(self, gateway_with_devs):\n gateway_with_devs.restart('daq')\n assert gateway_with_devs.daq\n gateway_with_devs.remove('daq')\n with pytest.raises(AttributeError):\n gateway_with_devs.daq", "def test_get_device_unknown():\n device = get_device(SERIAL, CREDENTIAL, \"unknown\")\n assert device is None", "async def test_calling_service_with_no_master_gateway_fails(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n await setup_deconz_integration(\n hass, aioclient_mock, options={CONF_MASTER_GATEWAY: False}\n )\n aioclient_mock.clear_requests()\n\n data = {\n SERVICE_FIELD: \"/lights/1\",\n SERVICE_DATA: {\"on\": True},\n }\n\n await hass.services.async_call(\n DECONZ_DOMAIN, SERVICE_CONFIGURE_DEVICE, service_data=data\n )\n await hass.async_block_till_done()\n\n assert len(aioclient_mock.mock_calls) == 0", "def test_get_device_detects_none(hass, mock_openzwave):\n node = MockNode()\n value = MockValue(data=0, node=node)\n values = MockEntityValues(primary=value, node=node)\n\n device = cover.get_device(hass=hass, node=node, values=values, node_config={})\n assert device is None", "def test_setup_adds_proper_devices(self, mock_switch, mock_client):\n ports = {\n i: mock.MagicMock(model=model) for i, model in enumerate(mfi.SWITCH_MODELS)\n }\n ports[\"bad\"] = mock.MagicMock(model=\"notaswitch\")\n print(ports[\"bad\"].model)\n mock_client.return_value.get_devices.return_value = [\n mock.MagicMock(ports=ports)\n ]\n assert setup_component(self.hass, switch.DOMAIN, self.GOOD_CONFIG)\n self.hass.block_till_done()\n for ident, port in ports.items():\n if ident != \"bad\":\n mock_switch.assert_any_call(port)\n assert mock.call(ports[\"bad\"], self.hass) not in mock_switch.mock_calls", "async def test_device_tracker_registered_hostname_none(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerRegisteredWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n async_dispatcher_send(\n hass,\n CONNECTED_DEVICE_REGISTERED,\n {\"ip\": \"192.168.210.56\", \"mac\": \"b8b7f16db533\", \"host_name\": None},\n )\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()", "def test_show_neighbors_empty(self, device):\n command_line = [\n \"bgp-bot\",\n \"--config\", CONFIG,\n \"show\",\n device,\n \"--neighbors\"\n ]\n print execute(command_line)\n output = json.loads(execute(command_line)[0])\n assert not output", "def _bluetooth_check_profile_connection(self):\n profiles = dict()\n output = self.dut.get_conn_devices()\n # need to strip all whitespaces.\n conn_devs = {}\n\n for key in output:\n conn_devs[key.strip()] = output[key].strip()\n for key in conn_devs:\n self.logger.info('%s:%s' % (key, conn_devs[key]))\n if 'XXXXXXXX' in conn_devs[key]:\n profiles[key] = conn_devs[key]\n else:\n profiles[key] = False\n return profiles", "def check_expected_devices():\n\n res = devices()\n error = extract_error_from(res)\n if error:\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"Check expected devices got error result: {}\".format(res))\n return\n\n expected_usb_devices = __opts__.get(\"expected_usb_devices\", [])\n vendors_products = [ \"{}:{}\".format(dev[\"vendor\"], dev[\"product\"]) for dev in res[\"values\"] ]\n\n for dev in expected_usb_devices:\n if dev not in vendors_products:\n vendor, product = dev.split(\":\")\n tag = \"system/usb/{}/{}/not_connected\".format(vendor, product)\n __salt__[\"minionutil.trigger_event\"](tag)", "def test_verify_state_of_a_device():", "def test_check_connection(self):\n self.assertIsNotNone(app.check_connection())", "async def test_device_not_accessible(hass):\n with patch.object(axis.device, \"get_device\", side_effect=axis.errors.CannotConnect):\n await setup_axis_integration(hass)\n assert hass.data[AXIS_DOMAIN] == {}", "def test_create_device1(self):\n pass", "async def test_get_device_device_unavailable(hass):\n with patch(\n \"axis.vapix.Vapix.request\", side_effect=axislib.RequestError\n ), pytest.raises(axis.errors.CannotConnect):\n await axis.device.get_device(hass, host=\"\", port=\"\", username=\"\", password=\"\")", "def connected_devices_arp(self, run_test):\n\n\n if not run_test:\n return\n\n res = {}\n\n ts = int(time.time())\n\n route_cmd = \"ip r | grep /24 | awk '{print $1;}'\"\n subnet = Popen(route_cmd, shell=True,\n stdout=PIPE).stdout.read().decode('utf-8')\n\n nmap_cmd = f'nmap -sn {subnet}'\n Popen(nmap_cmd, shell=True, stdout=PIPE)\n\n arp_cmd = (\"/usr/sbin/arp -i eth0 -n | grep : |\"\n \"grep -v '_gateway' | tr -s ' ' | \"\n \"cut -f3 -d' ' | sort | uniq\")\n\n arp_res = Popen(arp_cmd, shell=True,\n stdout=PIPE).stdout.read().decode('utf-8')\n res['arp'] = arp_res\n\n devices = set(arp_res.strip().split(\"\\n\"))\n active_devices = [[dev, ts, 1] for dev in devices]\n\n for device in active_devices:\n if self.dev_db.contains(where('mac_addr') == device[0]):\n self.dev_db.update(increment(\"n\"),\n where('mac_addr') == device[0])\n self.dev_db.update(tdb_set('last_seen', device[1]),\n where('mac_addr') == device[0])\n else:\n self.dev_db.insert({'mac_addr': device[0],\n 'last_seen': device[1],\n 'n': device[2]})\n\n print(self.dev_db.all())\n ndev_past_day = len(self.dev_db.search(\n where('last_seen') > (ts - 86400)))\n ndev_past_week = len(self.dev_db.search(\n where('last_seen') > (ts - 86400*7)))\n\n print(ndev_past_day)\n self.results[\"devices_active\"] = len(active_devices)\n self.results[\"devices_total\"] = self.dev_db.count(where('n') >= 1)\n self.results[\"devices_1day\"] = ndev_past_day\n self.results[\"devices_1week\"] = ndev_past_week\n\n if not self.quiet:\n print('\\n --- Number of Devices ---')\n print(f'Number of active devices: '\n f'{self.results[\"devices_active\"]}')\n print(f'Number of total devices: '\n f'{self.results[\"devices_total\"]}')\n print(f'Number of devices in last 1 day:'\n f' {self.results[\"devices_1day\"]}')\n print(f'Number of devices in last week:'\n f' {self.results[\"devices_1week\"]}')\n return res", "def test_connected(self):\n networktables_mock = unittest.mock.Mock()\n networktables_mock.isConnected.side_effect = [True, False]\n\n network_instance = network.Network(networktables_mock, None, None)\n self.assertTrue(network_instance.connected())\n self.assertFalse(network_instance.connected())", "def _get_data(self):\n devices = []\n try:\n if not self.router_client.login():\n self.hass.states.set(f\"{DOMAIN}.statusmsg\", self.router_client.statusmsg)\n _LOGGER.warning(\"Login failed: {0}:{1}@{2}\".format(self.router_client.username, self.router_client.password,self.router_client.host))\n self.router_client.logout()\n return devices\n\n devices_json = self.router_client.get_devices_response()\n finally:\n self.router_client.logout()\n\n self.hass.states.set(f\"{DOMAIN}.scanning\", devices_json != False)\n\n if devices_json != False:\n for device in devices_json:\n # _LOGGER.debug(\"Device: {0}\".format(device))\n dev = Device(\n device['HostName'].replace('未知设备', 'Unknown'),\n device['IPAddress'],\n device['MACAddress'],\n device['Active'],\n ICONS.get(device['IconType'])\n )\n # _LOGGER.debug(\"Device: {0}\".format(dev))\n devices.append(dev)\n return devices\n else:\n return []", "def test_add_device(self):\n\n pass", "def test_empty(self):\n\n wire_map = default_wire_map([])\n assert wire_map == {}", "def test_invalid_device_type():\n _aws_device(wires=2, device_type=\"foo\", shots=None)", "def test_connect(rgd):\n assert rgd.connected is True", "def the_user_should_be_returned_with_the_list_of_devices_with_ip_address():\n assert web_app.validate_list_devices()", "def check_connectivity(self):\n r = self.run_cmd(\"get-state\")\n return r.startswith(\"device\")", "def getDeviceList(self):\n return defer.succeed(self.discovered)", "def test_non_jaqcd_device(name_mock):\n _bad_aws_device(wires=2)", "def check_device_state(self):", "def check_remote_pairing(ignore_errors):\n try:\n DeviceApi().get()\n return True\n except HTTPError as e:\n if e.response.status_code == 401:\n return False\n error = e\n except Exception as e:\n error = e\n\n LOG.warning('Could not get device info: {}'.format(repr(error)))\n\n if ignore_errors:\n return False\n\n if isinstance(error, HTTPError):\n if connected():\n raise BackendDown from error\n else:\n raise InternetDown from error\n else:\n raise error", "def test_device_on(self):\n self.ms.add_response({'\\x14081031031E226410\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.on((49, 3))\n self.assertTrue(response)", "def test_initialize_connection(self, moch_ghn):\n volume = {'name': '123', 'provider_id': 'spacey'}\n conn = self.driver.initialize_connection(volume, None)\n expected = {'name': 'spacey', 'noremovehost': 'thisserver'}\n self.assertDictMatch(expected, conn['data'])", "def test_show_bgp_config_empty(self, device):\n command_line = [\n \"bgp-bot\",\n \"--config\", CONFIG,\n \"show\",\n device,\n \"--bgp-config\"\n ]\n print execute(command_line)\n output = json.loads(execute(command_line)[0])\n assert not output", "def test_device_states_get(self):\n pass", "def the_user_should_not_be_able_to_connect_to_another_device():\n print(\"Trying to connect 2 devices at once\")\n bln_result = web_app.connect_to_device2()\n assert(bln_result, False)", "def device_get(self, filters={}):\n return {}", "def connected_network_devices(self):\n connected = {'ip': self.ip, 'port': self.port}\n return connected", "async def connected(self) -> bool:\n args = ['-t', f\"DEVICE INFO,{self.conf['device_address']}\"]\n output = await self.run_vh(args)\n return \"IN USE BY: NO ONE\" not in output", "def test_init(self):\n self.assertEqual(self.device_key, self.factory.device_key)", "def testBuildDeviceList(self):\n\n self.inv._devices = {\n 'first': self.Device(),\n 'second': self.Device(),\n 'third': self.Device()\n }\n self.inv._CmdFilter('targets', ['^f.*,second,^t.ird'])\n self.inv._CmdFilter('xtargets', [''])\n self.inv._device_list = None\n self.assertEqual(set(['first', 'second', 'third']),\n set(self.inv.device_list))\n\n self.inv._CmdFilter('targets', ['^f.*'])\n self.inv._device_list = None\n self.assertEqual(['first'], self.inv.device_list)", "def test_filter_device1(self):\n pass", "def test_is_connected__not_connection(self):\n self.switch.connection = None\n self.switch.is_active = MagicMock()\n self.switch.is_active.return_value = True\n\n self.assertFalse(self.switch.is_connected())", "def health_check(self):\n unset_props = []\n if not self.hub_name:\n unset_props.append(self._hub_name_prop)\n if not self.port_number:\n unset_props.append(self._primary_port_prop)\n if unset_props:\n msg_format = (\"If device is connected to {}, \"\n \"set them via 'gdm redetect {}'\")\n msg = msg_format.format(self.hub_type, self._device_name)\n error_msg = \"properties {} are unset. \".format(\n \" and \".join(unset_props)) + msg\n raise errors.CapabilityNotReadyError(\n msg=error_msg, device_name=self._device_name)\n\n try:\n self._hub = self._create_device_func(self.hub_name)\n # Set up ethernet\n if self.ethernet_switch_address is not None:\n self._ethernet_switch = self._create_device_func(\n self.ethernet_switch_address)\n\n except errors.DeviceError as err:\n raise errors.CapabilityNotReadyError(\n msg=str(err), device_name=self._device_name)\n if self.ethernet_switch_address is not None:\n self._verify_switch_created(self._ethernet_switch)\n self._healthy = True", "def test_create_device_data(self):\n pass", "def test_discover_no_cli_creds(self):\n entry = mock.MagicMock(user=None, password=None, enable_password=None)\n vendor = mock.MagicMock()\n vendor_settings = mock.MagicMock()\n self.networking_handler._get_cli_credentials = mock.MagicMock(return_value=None)\n\n # act\n result = self.networking_handler.discover(entry=entry,\n vendor=vendor,\n vendor_settings=vendor_settings)\n\n # verify\n self.assertEqual(result, entry)\n self.assertEqual(entry.comment, \"Unable to discover device user/password/enable password\")\n self.assertIsNone(entry.user)\n self.assertIsNone(entry.password)\n self.assertIsNone(entry.enable_password)", "def test_gwservice_deletedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def test_device_add_from_file(self, gateway_with_devs):\n assert 'daq' in gateway_with_devs._devs\n assert 'pel' in gateway_with_devs._devs\n assert 'sg' in gateway_with_devs._devs\n assert 'not_a_driver' not in gateway_with_devs._devs", "def the_user_should_be_able_to_connect_to_another_device():\n assert web_app.connect_to_device2()", "def get_list_devices(self, verbose=False):\n # TODO: refresh region_names if more regions get devices available\n self.backends = {}\n region_names = ['us-west-1', 'us-east-1']\n for region in region_names:\n client = boto3.client(\n 'braket',\n region_name=region,\n aws_access_key_id=self._credentials['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=self._credentials['AWS_SECRET_KEY'],\n )\n filters = []\n devicelist = client.search_devices(filters=filters)\n for result in devicelist['devices']:\n if result['deviceType'] not in ['QPU', 'SIMULATOR']:\n continue\n if result['deviceType'] == 'QPU':\n device_capabilities = json.loads(\n client.get_device(deviceArn=result['deviceArn'])['deviceCapabilities']\n )\n self.backends[result['deviceName']] = {\n 'nq': device_capabilities['paradigm']['qubitCount'],\n 'coupling_map': device_capabilities['paradigm']['connectivity']['connectivityGraph'],\n 'version': device_capabilities['braketSchemaHeader']['version'],\n 'location': region, # deviceCapabilities['service']['deviceLocation'],\n 'deviceArn': result['deviceArn'],\n 'deviceParameters': device_capabilities['deviceParameters']['properties']['braketSchemaHeader'][\n 'const'\n ],\n 'deviceModelParameters': device_capabilities['deviceParameters']['definitions'][\n 'GateModelParameters'\n ]['properties']['braketSchemaHeader']['const'],\n }\n # Unfortunately the Capabilities schemas are not homogeneus for real devices and simulators\n elif result['deviceType'] == 'SIMULATOR':\n device_capabilities = json.loads(\n client.get_device(deviceArn=result['deviceArn'])['deviceCapabilities']\n )\n self.backends[result['deviceName']] = {\n 'nq': device_capabilities['paradigm']['qubitCount'],\n 'coupling_map': {},\n 'version': device_capabilities['braketSchemaHeader']['version'],\n 'location': 'us-east-1',\n 'deviceArn': result['deviceArn'],\n 'deviceParameters': device_capabilities['deviceParameters']['properties']['braketSchemaHeader'][\n 'const'\n ],\n 'deviceModelParameters': device_capabilities['deviceParameters']['definitions'][\n 'GateModelParameters'\n ]['properties']['braketSchemaHeader']['const'],\n }\n\n if verbose:\n print('- List of AWSBraket devices available:')\n print(list(self.backends))\n\n return self.backends", "def test_listEmpty(self):\n store = Store()\n self.assertSuccessStatus(self._makeConfig(store), [\"list\"])\n self.assertIn(\"There are no ports configured.\", sys.stdout.getvalue())", "def __init__(self) -> None:\n self._found_devices = {} # type: Dict[IPv4Address, conf.BaseService]", "def test_connection(self):\n r = main.List.connection()\n self.assertTrue(r.ping(), \"Connection failed.\")", "async def test_user_step_no_devices_found(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.ld2410_ble.config_flow.async_discovered_service_info\",\n return_value=[NOT_LD2410_BLE_DISCOVERY_INFO],\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"no_devices_found\"", "def test_get_dev_info_returns_dict():\n assert isinstance(get_dev_info(), dict)", "def test_get_device_templates(self):\n pass", "async def test_user_step_no_new_devices_found(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n data={\n CONF_ADDRESS: LD2410_BLE_DISCOVERY_INFO.address,\n },\n unique_id=LD2410_BLE_DISCOVERY_INFO.address,\n )\n entry.add_to_hass(hass)\n with patch(\n \"homeassistant.components.ld2410_ble.config_flow.async_discovered_service_info\",\n return_value=[LD2410_BLE_DISCOVERY_INFO],\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"no_devices_found\"", "async def test_device_tracker_hostname_and_macaddress_after_start_not_router(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_HOST_NAME: \"connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: \"something_else\",\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0", "async def _async_has_devices(hass) -> bool:\n gree_discovery = Discovery(DISCOVERY_TIMEOUT)\n devices = await gree_discovery.scan(wait_for=DISCOVERY_TIMEOUT)\n return len(devices) > 0", "def test_connect_fail(self):\n with pytest.raises(InstrumentGatewayError):\n with InstrumentGateway(addr='an invalid ip!'):\n pass", "def test_all_hardware(self, mock_co2, mock_humidity):\n hw = HardwareEmulator(hardware_id=123456789, location='Room 567')\n hw.emulate_data()\n mock_humidity.assert_called_once_with()\n mock_co2.assert_called_once_with()", "async def test_entity_device_info_with_connection(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_entity_device_info_with_connection(\n hass, mqtt_mock_entry, select.DOMAIN, DEFAULT_CONFIG\n )", "async def test_discovery_cannot_connect(hass: HomeAssistant) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY\n )\n\n with patch(\n \"homeassistant.components.volumio.config_flow.Volumio.get_system_info\",\n side_effect=CannotConnectError,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={},\n )\n\n assert result2[\"type\"] == \"abort\"\n assert result2[\"reason\"] == \"cannot_connect\"", "def test_get_device_groups(self):\n pass", "def test_get_servers(self):\n self.assertIsInstance(network.get_servers(), dict)", "def get_network_devices(user, passwd, base_api_url):\n network_devices = ''\n response = connect_to_idrac(user, passwd, base_api_url)\n if response and response.json():\n network_devices_info = response.json()\n try:\n network_devices = network_devices_info[u'Members']\n except KeyError:\n network_devices = ''\n get_user_response(message='could not get network devices info')\n else:\n get_user_response(message='idrac connection status code is 401')\n\n return network_devices", "def test_get_pci_device_by_moid(self):\n pass", "def test_get_device_presence(self):\n\n device_id = self.properties['device1.id']\n response = self.api.get_device_presence(device_id)\n\n self.assertEqual(device_id, response.sdid, 'Sdids must match')\n self.assertIsNotNone(response.data.last_seen_on, 'last_seen_on')\n self.assertIsNotNone(response.data.connected, 'connected')", "def test_get_valid_networks_for_virtualization_realm(self):\n pass", "def test_get_device_by_id1(self):\n pass", "def Check_Gateway(gateway):\n\n global STATUS\n logging.info(\"Pinging gateway\")\n if ping_machine(gateway) != 0:\n add_info(gateway, GATEWAY, \"unpingable\")\n STATUS = 1 # can't work w/out gateway\n return 1\n else:\n add_info(gateway, GATEWAY, \"OK\")\n return 0", "def test_get_device_by_id(self):\n pass", "async def test_device_tracker_registered(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerRegisteredWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n async_dispatcher_send(\n hass,\n CONNECTED_DEVICE_REGISTERED,\n {\"ip\": \"192.168.210.56\", \"mac\": \"b8b7f16db533\", \"host_name\": \"connect\"},\n )\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()", "def test_gwservice_updatedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n payload = {'serialNumber': 'DEADBEEF0011',\n 'owner': 'pytest'}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"PUT\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n device = json.loads(resp.text)\n print (device)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n @pytest.mark.sdk_restapi\n def test_gwservice_deletedevice(self, setup_controller):\n \"\"\"\n Test the delete device endpoint\n WIFI-3455\n \"\"\"\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def test_setup_adds_proper_devices(self, mock_light):\n good_config = {\n \"mochad\": {},\n \"light\": {\n \"platform\": \"mochad\",\n \"devices\": [{\"name\": \"Light1\", \"address\": \"a1\"}],\n },\n }\n assert setup_component(self.hass, light.DOMAIN, good_config)", "async def get_device_list(self):\n self.logger.debug(\"Retrieving device list information.\")\n #url = 'https://{}/api/user/device'.format(self.apiHost) #suddenly stopped worrking, so use\n '''\n #full version\n url = 'https://{}/api/user/device?lang=en&apiKey={}&getTags=1&version={}&ts={}&nonce={}&appid={}&imei={}&os={}&model={}&romVersion={}&appVersion={}'.format(self.apiHost,\n self.apikey,\n self.timestamp,\n self._version,\n self._nonce,\n self._appid,\n self._imei,\n self._os,\n self._model,\n self._romVersion,\n self._appVersion)\n '''\n url = 'https://{}/api/user/device?version={}&appid={}'.format(self.apiHost, self._version, self._appid)\n headers = {\n 'Authorization': 'Bearer %s' % self.authenticationToken,\n }\n self.logger.debug('url: %s, headers: %s' % (url, headers))\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n json_response = await response.json()\n \n self.logger.debug('received response status: %s' % response.status) \n self.logger.debug('received response: %s' % self.pprint(json_response))\n if response.status != 200:\n self.logger.error('error: %s received' % response.status)\n return\n \n if json_response.get(\"devicelist\"):\n self.logger.info('New response format found')\n json_response = json_response[\"devicelist\"]\n \n self.logger.debug('number of device(s) is: %d' % len(json_response))\n \n self._devices = json_response #list of devices and current configurations\n \n self._create_client_devices()\n \n '''\n Example Response:\n [\n {\n \"__v\": 0,\n \"_id\": \"5becffa6d2b4a3c34cb79b38\",\n \"apikey\": \"530303a6-cf2c-4246-894c-xxxxxxxxxxx\",\n \"brandName\": \"AUTOSLIDE\",\n \"createdAt\": \"2018-11-15T05:09:58.341Z\",\n \"deviceStatus\": \"\",\n \"deviceUrl\": \"\",\n \"deviceid\": \"100050xxxxx\",\n \"devicekey\": \"4123ec79-d2c3-4d32-930a-xxxxxxxxxxxxx\",\n \"extra\": {\n \"_id\": \"xxxxxxxxxxxxxxxx\",\n \"extra\": {\n \"apmac\": \"xx:xx:xx:xx:xx:xx\",\n \"brandId\": \"5a6fcf00f620073c67efc280\",\n \"description\": \"20180813001\",\n \"mac\": \"xx:xx:xx0:xx:xx:xx\",\n \"manufacturer\": \"\\u9752\\u5c9b\\u6fb3\\u601d\\u5fb7\\u667a\\u80fd\\u95e8\\u63a7\\u7cfb\\u7edf\\u6709\\u9650\\u516c\\u53f8\",\n \"model\": \"PSA-BTA-GL\",\n \"modelInfo\": \"5af3f5332c8642b001540dac\",\n \"ui\": \"\\u63a8\\u62c9\\u5ba0\\u7269\\u95e8\",\n \"uiid\": 54\n }\n },\n \"group\": \"\",\n \"groups\": [],\n \"ip\": \"xxx.xx.xx.xxx\",\n \"location\": \"\",\n \"name\": \"Patio Door\",\n \"offlineTime\": \"2018-12-31T07:23:31.018Z\",\n \"online\": true,\n \"onlineTime\": \"2018-12-31T12:19:33.216Z\",\n \"params\": {\n \"a\": \"3\",\n \"b\": \"3\",\n \"c\": \"1\",\n \"d\": \"1\",\n \"e\": \"1\",\n \"f\": \"1\",\n \"fwVersion\": \"2.0.2\",\n \"g\": \"0\",\n \"h\": \"1\",\n \"i\": \"0\",\n \"j\": \"00\",\n \"k\": \"0\",\n \"l\": \"1\",\n \"m\": \"2\",\n \"n\": \"0\",\n \"rssi\": -53,\n \"staMac\": \"xx:xx:xx:xx:xx:xx\"\n },\n \"productModel\": \"WFA-1\",\n \"settings\": {\n \"alarmNotify\": 1,\n \"opsHistory\": 1,\n \"opsNotify\": 0\n },\n \"sharedTo\": [\n {\n \"note\": \"\",\n \"permit\": 15,\n \"phoneNumber\": \"[email protected]\",\n \"shareTime\": 1542259546087\n }\n ],\n \"showBrand\": true,\n \"type\": \"10\",\n \"uiid\": 54\n }\n ]\n \n or New format:\n {\n \"devicelist\": [\n {\n \"__v\": 0,\n \"_id\": \"5c3665d012d28ae6ba4943c8\",\n \"apikey\": \"530303a6-cf2c-4246-894c-50855b00e6d8\",\n \"brandLogoUrl\": \"https://us-ota.coolkit.cc/logo/KRZ54OifuGmjoEMxT1YYM3Ybu2fj5K2C.png\",\n \"brandName\": \"Sonoff\",\n \"createdAt\": \"2019-01-09T21:21:20.402Z\",\n \"devConfig\": {},\n \"devGroups\": [],\n \"deviceStatus\": \"\",\n ... as before\n '''", "async def test_device_tracker_hostname_and_macaddress_after_start_not_home(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_NOT_HOME,\n {\n ATTR_HOST_NAME: \"connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0", "def test_not_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.ios_app,\n )\n call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n 'available': 'False',\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=NAK')\n self.assertEqual(cache.get('attempts'), 2)" ]
[ "0.6762325", "0.6762325", "0.672721", "0.6718752", "0.66731656", "0.6479616", "0.64170194", "0.64170194", "0.6373689", "0.6370904", "0.63623434", "0.6293659", "0.62630945", "0.6125039", "0.6104315", "0.60881805", "0.6080881", "0.6077714", "0.60736173", "0.6019352", "0.6019352", "0.60067296", "0.60005754", "0.5975097", "0.5973282", "0.5958114", "0.59443873", "0.5941064", "0.5939071", "0.59205145", "0.5907202", "0.58987427", "0.58907735", "0.5876848", "0.5875891", "0.58745825", "0.58611435", "0.585701", "0.5835792", "0.5833349", "0.58266884", "0.5809645", "0.5803444", "0.57984626", "0.57938516", "0.5789442", "0.5765046", "0.5761172", "0.5755428", "0.5754689", "0.57525283", "0.57511336", "0.5746387", "0.5735668", "0.57326543", "0.57279915", "0.5726066", "0.5717309", "0.57113576", "0.5697143", "0.56869406", "0.5684531", "0.5651474", "0.5646846", "0.5627935", "0.5627699", "0.56243914", "0.56215906", "0.5618318", "0.5614449", "0.56064415", "0.5605583", "0.5603324", "0.55962306", "0.55887234", "0.557917", "0.5578876", "0.5574254", "0.5573573", "0.556149", "0.5530761", "0.5528991", "0.5524819", "0.5522388", "0.5509264", "0.55080986", "0.5489009", "0.5485742", "0.5485124", "0.5478425", "0.54759824", "0.54714596", "0.5468339", "0.5453551", "0.54523456", "0.5450448", "0.54473424", "0.5443068", "0.5439307", "0.54366624" ]
0.81306255
0
Test the gateway returns an error if the ip is wrong
def test_connect_fail(self): with pytest.raises(InstrumentGatewayError): with InstrumentGateway(addr='an invalid ip!'): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_functional_good_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n if response.status_code != BAD_GATEWAY:\n print(\"\\nGATEWAY is OK\")\n self.assertEqual(response.status_code, OK)\n content = response.json()\n self.assertEqual(len(content), 3)\n self.assertTrue(content.get(\"temp\"))\n self.assertTrue(content.get(\"city\"))\n else:\n print(\"\\nGATEWAY is RESET BY PEER\")", "def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()", "def test_functional_bad_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n self.assertEqual(response.status_code, BAD_REQUEST)\n self.assertEqual(response.json().get(\"error\"),\n \"No city for ip {}\".format(url))", "def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._fail_ip = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def is_valid_ip(ip):\n ...", "def __checkIPAddr(self,ip):\n if not iplib.checkIPAddrWithoutMask(ip):\n raise GeneralException(errorText(\"GENERAL\",\"INVALID_IP_ADDRESS\")%ip)", "def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None", "def Check_Gateway(gateway):\n\n global STATUS\n logging.info(\"Pinging gateway\")\n if ping_machine(gateway) != 0:\n add_info(gateway, GATEWAY, \"unpingable\")\n STATUS = 1 # can't work w/out gateway\n return 1\n else:\n add_info(gateway, GATEWAY, \"OK\")\n return 0", "def request_valid_ip():\n ip = input(\"Enter a valid IP address you would like to check: \")\n return validate_ip(ip)", "def isIP(ipToTest):\n \n try:\n socket.inet_aton(ipToTest)\n return True\n except socket.error:\n return False", "def checkIP(self, ip = None):\n\t\treturn os.system(\"ping -c 1 -w2 \" + ip + \" > /dev/null 2>&1\") == 0", "def test_validate_ip(self, ip, version, expected_result):\n # Call method under test\n test_result = validate_ip(ip, version)\n\n # Assert\n self.assertEqual(expected_result, test_result)", "def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True", "def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255", "def getGwIp(target):\n tmp = target.split('.')\n try:\n gw = (tmp[0] + \".\" + tmp[1] + \".\" + tmp[2] + \".1\")\n except IndexError:\n print(bcolors.FAIL + \" Invalid IP provided: \" + target + bcolors.ENDC)\n return False\n return gw", "def checklan(ipaddr, network):\n return True", "def test_ipv4_validation_failure():\n with pytest.raises(socket.error):\n is_ipv4('256.8.8.8')", "def test_try_create_invalid_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_10_0_0_430_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Error save new IP.: 10.0.0.430',\n response.data['detail'])", "def check(self, ip_address, country):\n payload = {\n 'ip': ip_address,\n 'country': country\n }\n return self.util.sendRequest(self.util.ipCheckRoute, payload)", "def test_check_ip_on_whitelist_false(self):\n\n ip_name = 'f11.my.com'\n\n result = check_ip_on_whitelist(ip_name, self.pattern_ip)\n\n self.assertFalse(result)", "def test_get_source_ip(self):\n pass", "def test_exclude_ip_ban(self):\n pass", "def is_actual_ip(self, ip_addr):\n try:\n socket.inet_aton(ip_addr)\n return True\n except socket.error:\n return False", "def test_validate_ip_exit():\n ip = '1.1'\n with pytest.raises(SystemExit) as err:\n howisresolved.validate_ip(ip)\n assert 'Invalid ip specified.' in str(err.value)", "def test(self):\n response = requests.get(\"https://ipinfo.io/\")\n response_json = {}\n try:\n response_json = response.json()\n except JSONDecodeError as e:\n response_json[\"ip\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"city\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"region\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"loc\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"org\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"timezone\"] = \"Error with remote website. This is not an error with the client.\"\n\n self.ip = str(response_json['ip'])\n self.city = str(response_json['city'])\n self.region = str(response_json['region'])\n self.loc = str(response_json['loc'])\n self.org = str(response_json['org'])\n self.timezone = str(response_json['timezone'])\n return self", "def dshield_ip_check(ip):\n if not is_IPv4Address(ip):\n return None\n\n headers = {'User-Agent': useragent}\n url = 'https://isc.sans.edu/api/ip/'\n response = requests.get('{0}{1}?json'.format(url, ip), headers=headers)\n return response.json()", "def vt_ip_check(ip, vt_api):\n if not is_IPv4Address(ip):\n return None\n\n url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'\n parameters = {'ip': ip, 'apikey': vt_api}\n response = requests.get(url, params=parameters)\n try:\n return response.json()\n except ValueError:\n return None", "def checkIPValid(self, ipAddr):\n try:\n socket.inet_aton(ipAddr)\n return True\n except socket.error:\n return False", "def test_geoiplookup(self):\n\n try:\n output = subprocess.check_output('geoiplookup 1.1.1.1', shell=True).decode('utf-8')\n if not 'Cloudflare' in output:\n self.fail('Are your geoip databases in /usr/share/GeoIP/ ???')\n except:\n self.fail('Error when calling geoiplookup')", "def set_ip():\r\n fail_count = 0\r\n while fail_count < 3:\r\n address = moves.input('What is the IP address of the Packetmaster you want to access?: ')\r\n if pm_input_check.ipv4(address) != 0:\r\n address = pm_input_check.ipv4(address)\r\n return address\r\n else:\r\n print(\"That is not a valid IPv4 address.\")\r\n fail_count += 1\r\n print(\"That is not a valid IPv4 address. Exiting\")\r\n exit()", "def validate_ip(self, ip):\n if not ip:\n raise ValidationError(\"Please provide an actual IP or web address. You gave me: \" + ip)", "def test_get_ip_tags_invalid_ip(client, database):\n\n invalid_ip = \"http://127.0.0.1:5000/ip-tags/10.1.2.3000\"\n response = client.get(invalid_ip)\n response_data = response.get_json()\n\n assert response.status_code == 400\n assert response.headers[\"Content-Type\"] == \"application/json\"\n assert (\n response_data[\"error\"]\n == \"400 Bad Request: Address 10.1.2.3000 does not have IPv4 format\"\n )", "def check_vpn_interface():\n return validate_vpn_interface(call_command('netstat -i')[0].split('\\n'))", "def check_ip_format(self, ip_address):\n # regex for validating an Ip-address \n ip_regex = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\n\n # validate ip address\n r = re.compile(ip_regex)\n if(r.match(ip_address)):\n print(\"Valid IP address format\")\n self.target = ip_address\n return True\n else:\n print(R + \"{} is an invalid IP address format\".format(ip_address) + W)\n return False", "def test_invalid_ip_address_as_argument(self, api_client):\n runner = CliRunner()\n\n expected = 'Error: Invalid value for \"[IP_ADDRESS]...\": not-an-ip\\n'\n\n result = runner.invoke(subcommand.quick, [\"not-an-ip\"])\n assert result.exit_code == 2\n assert expected in result.output\n api_client.quick.assert_not_called()", "def ipinfo_ip_check(ip):\n if not is_IPv4Address(ip):\n return None\n\n response = requests.get('http://ipinfo.io/%s/json' % ip)\n return response.json()", "def validIPAddress(ip):\n try:\n socket.inet_aton(ip)\n return True\n except socket.error:\n return False", "def testIP(self):\n self.assertEqual([\"http://234.234.234.234\"], grab('http://234.234.234.234', self.needScheme))", "def checkIP(self):\n\t\tself.get(\"https://ifconfig.me/\")\n\t\treturn self.findId(\"ip_address\").text", "def test_get_node_internal_ip_address(self):\n pass", "def test_error_if_bgp_ipv4_conflict_no_conflict(self, m_exit, m_client):\n m_client.get_hostnames_from_ips = Mock()\n m_client.get_hostnames_from_ips.return_value = {}\n startup.error_if_bgp_ip_conflict(\"10.0.0.1\", \"abcd::beef\")\n self.assertFalse(m_exit.called)", "def _LeasedIP(self):\n check_command = 'ip addr show {interface} | grep \"inet \"'.format(\n interface=self.interface)\n try:\n # grep exit with return code 0 when we have retrieved an IP.\n out = self._device.CheckOutput(check_command)\n except device_types.CalledProcessError:\n return False\n # ex: inet 192.168.159.78/20 brd 192.168.159.255 scope global wlan0\n return out.split()[1].split('/')[0]", "def validate_ip(ip):\n try:\n ipobj = IPy.IP(ip)\n if ipobj.iptype() == 'PRIVATE':\n print(\"IP addresses {} will be ignored as it is in a private network range.\".format(ip))\n ip = None\n except ValueError as ve:\n print(\"Invalid IP: {}\".format(ve.args))\n ip = None\n finally:\n return ip", "def _checknet():\n exit_code = os.system('ping -c 1 www.baidu.com 1>/dev/null 2>&1')\n return exit_code", "def test_check_ip_on_whitelist_true(self):\n\n ip_name = 'mail-ed1-f51.google.com'\n\n result = check_ip_on_whitelist(ip_name, self.pattern_ip)\n\n self.assertTrue(result)", "def dev_ip():\n while True:\n d_ip = raw_input(\"\\nEnter the IP Address of the device you need to access: \")\n if re.match(\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\", d_ip):\n return d_ip\n else:\n print(\"\\nThat is not a valid IP Address!\\nTry again.\")", "def known_ip(ip=DEFAULT_IP):\r\n tunnel(ip)", "def test_same_ip(self):\n self.create_ptr(\n ip_str='128.193.0.2', ip_type='4', fqdn='foo1.oregonstate.edu')\n\n with self.assertRaises(ValidationError):\n self.create_ptr(\n ip_str='128.193.0.2', ip_type='4', fqdn='foo2.oregonstate.edu')", "def bridge_network_check(ip, bridge_ip, bridge_netmask):\n# convert vars to unicode \n ip = unicode(ip)\n bridge_ip = unicode(bridge_ip)\n bridge_netmask = unicode(bridge_netmask)\n# by default ip is not in bridge network \n brctl = 0\n\n# bridge insterface ip network\n brdige_network = IPv4Interface('%s/%s' % (bridge_ip, bridge_netmask)).network\n\n# check if ip is from bridge network and return bridge control var (brctl) = true\n if IPv4Address(ip) in list(IPv4Network(brdige_network)):\n brctl = 1\n\n# return brctl and bridge ip network \n return brctl, brdige_network", "def verify_ip_address(ip):\n try:\n ipaddress.ip_address(ip)\n return True\n except ValueError:\n return False", "def test_get_geoip():\n assert get_geoip(\"74.125.67.100\") == \"US\"", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_invalid_ip_address_as_argument(self, api_client):\n runner = CliRunner()\n\n expected = 'Error: Invalid value for \"[IP_ADDRESS]...\": not-an-ip\\n'\n\n result = runner.invoke(subcommand.ip, [\"not-an-ip\"])\n assert result.exit_code == 2\n assert expected in result.output\n api_client.ip.assert_not_called()", "def test_udp_bad_server():\n assert dnsck_query(\"8.8.8.88\", \"google.com\", \"A\", 1) == 1", "def validate_ip(ip):\n valid_ip = ''\n try:\n valid_ip = str(ipaddress.ip_address(ip))\n except ValueError:\n logging.error('ip address \\'{}\\' is not valid: '.format(ip))\n \n return valid_ip", "def check_input(data):\n if data.has_key('fqdn') and data.has_key('ip'):\n\n try:\n socket.inet_aton(data['ip'])\n return True\n except socket.error:\n return False", "def test_host_header_as_ip(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)", "def valid_ip(ip):\n return valid_ipv4(ip) or valid_ipv6(ip)", "def test_try_create_out_of_range_ip_in_network(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/out_of_range_ipv4_172_0_0_5_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Ip 172.0.0.5 not available for network 5.',\n response.data['detail'])", "def valid_ip(ip_addr):\n try:\n inet_aton(ip_addr)\n return True\n\n except error:\n return False", "def test_re_ip(self, ip_address: str, is_valid_ip: bool):\n self.assertEqual(bool(re_ip.search(ip_address)), is_valid_ip)", "def valid_ip_address (ip_address):\n return valid_key(ip_address, ip_hash, ip_hash_threshold)", "def isValidIP(ip_add):\n if _check_ip(ip_add):\n return True\n return False", "def test_ip_addresses_exists():\n load_ips()\n validate_names()", "def is_ip_allowed(self, ip):\n\n log.warn(\"%s.is_ip_allowed not implemented. Returning True for %s.\" % \\\n (self.__class__, ip))\n return True", "def validateIP(ip):\n # type: (str)->None\n try:\n socket.inet_aton(ip)\n except socket.error:\n socket.inet_pton(socket.AF_INET6, ip)", "def _is_valid_ip(ip):\n return _is_valid_ipv4(ip) or _is_valid_ipv6(ip)", "def test_ip_roundtrip():\n s = Signer(qb64=\"AgjD4nRlycmM5cPcAkfOATAp8wVldRsnc9f1tiwctXlw\",\n transferable=False)\n now = datetime.datetime.now(datetime.timezone.utc)\n payload = build_witness_ip(s, \"10.0.0.8\", \"0a:ff:c2:43:91:5c::\")\n r = parse_verify_witness_ip(payload)\n\n assert r is not None\n assert r[\"ip4\"] == \"10.0.0.8\"\n assert r[\"ip6\"] == \"a:ff:c2:43:91:5c::\"\n assert (r[\"timestamp\"] - now).seconds < 5\n assert r[\"verfer\"].raw == s.verfer.raw", "def is_valid_ip(addr):\n\n try:\n socket.inet_aton(addr)\n except socket.error:\n return False\n return True", "def _check_ip(val: Any, input_format: str, clean: bool) -> Any:\n try:\n if val in NULL_VALUES:\n return (None, \"null\") if clean else False\n\n address = ip_address(val)\n vers = address.version\n\n if vers == 4 and input_format != \"ipv6\" or vers == 6 and input_format != \"ipv4\":\n return (address, \"success\") if clean else True\n return (None, \"unknown\") if clean else False\n\n except (TypeError, ValueError):\n return (None, \"unknown\") if clean else False", "def address_blank_test():\n \n Debug.info('Enter an IP address should not be possible')\n\n click(\"1499782256475.png\")\n if exists(\"1499782281377.png\"):\n Debug.info('************ Pass ******************')\n click(\"1499782294209.png\")\n \n else: \n Debug.info('************ Fail ******************')\n click(\"1499782317985.png\")", "def test_correct_url(self, ip_address, bool_value):\n self.assertEqual(check_correct_url(ip_address), bool_value)", "def test_no_ip_address_passed(self, api_client):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.helper.sys\") as sys:\n sys.stdin.isatty.return_value = True\n result = runner.invoke(\n subcommand.ip, parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise ip\" in result.output\n api_client.ip.assert_not_called()", "def check_no_network():\n try:\n socket.gethostbyname(\"www.google.com\")\n return False\n except:\n return True", "def handle_ip(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<hostname>')\n return\n try:\n ipnr = socket.gethostbyname(item)\n ievent.reply(ipnr)\n except:\n ievent.reply(\"can't match \" + str(item))", "def test_connect(self, gateway):\n assert not gateway._devs", "def validate_ping(result):\n if '0 packets received' in str(result) or 'no answer from' in str(result) or '0 received' in str(result):\n print 'Conectividade - DOWN'\n return False\n print 'Conectividade - OK'\n return True", "def _get_ipaddress(node):\n if \"ipaddress\" not in node:\n with settings(hide('stdout'), warn_only=True):\n output = sudo('ohai ipaddress')\n if output.succeeded:\n node['ipaddress'] = json.loads(output)[0]\n return True\n return False", "def test_read_host_subnet(self):\n pass", "def _is_valid_ip(self, address):\r\n try:\r\n # Is this an valid ip address?\r\n ipaddr.IPNetwork(address)\r\n except ValueError:\r\n return False\r\n return True", "def validate_ip(argname, param, safe, optional = False):\n _validate_one(argname, param, safe, _check_ip, optional)", "def send_error(self, conn, msg):\n # dst ip becomes src ip to return the message\n\n # src ip becomes this ip\n\n # type becomes \"no route\"\n\n # msg is empty\n\n # send from port incoming...current dst ip?\n\n # TODO\n\n return", "def error_bad_gateway(self):\n self._error(502, \"Bad Gateway\")", "def check_ip(ip):\n \n is_black = is_black_ip(ip)\n return jsonify(ip=ip, is_black=is_black)", "def test_net_ping(self):\n\n self.assertEquals(self.scanner.ping(type='net'), True)\n\n # Test timeout\n self.assertRaises(\n ScanError,\n self.scanner.ping,\n {'type': 'net', 'timeout': 1.0e-16})" ]
[ "0.76661813", "0.71126264", "0.7094155", "0.69302577", "0.6852113", "0.6760333", "0.671132", "0.6597866", "0.65174395", "0.6418101", "0.64037704", "0.6371887", "0.63335603", "0.632763", "0.6314457", "0.6295242", "0.62879497", "0.6277039", "0.62159216", "0.6197408", "0.61674243", "0.6133235", "0.6113192", "0.6112046", "0.60939944", "0.60846484", "0.6069952", "0.60449183", "0.6042617", "0.6032942", "0.60252166", "0.5998271", "0.5993644", "0.5992747", "0.5987225", "0.5972789", "0.59354955", "0.5933348", "0.59242976", "0.59207934", "0.5919083", "0.5913978", "0.5911075", "0.59019303", "0.58787435", "0.58751345", "0.5870079", "0.586619", "0.5862486", "0.584946", "0.58486986", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58472854", "0.58432317", "0.5840304", "0.5839366", "0.5833912", "0.58335614", "0.5832555", "0.58315337", "0.58257186", "0.58158064", "0.58083504", "0.5804108", "0.5793134", "0.5780771", "0.577951", "0.5767112", "0.57485443", "0.5744856", "0.5733754", "0.5731926", "0.5731518", "0.5727563", "0.57104206", "0.57072735", "0.5706319", "0.57061076", "0.5700652", "0.56978446", "0.56975836", "0.5692559", "0.56876624", "0.5679379", "0.5676048", "0.5675701" ]
0.64550585
9
Test the gateway fixture contains drivers that were loaded from files
def test_device_add_from_file(self, gateway_with_devs): assert 'daq' in gateway_with_devs._devs assert 'pel' in gateway_with_devs._devs assert 'sg' in gateway_with_devs._devs assert 'not_a_driver' not in gateway_with_devs._devs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixtures():", "def fixture_example_data():\n import_example_data()", "def _fixture_setup(self):\n pass", "def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })", "def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {\n \"arg\": \"val\"\n }\n })", "def setUp(self):\n self.test_data = self.read_data('test_data/clients.txt')", "def setUp(self):\n self.setUpPyfakefs()", "def test_missing_data_sources(self):", "def test_load(tmp_path, data_name, params, expect_paths):\n\n folder_path = tmp_path\n dsets = pennylane.data.data_manager.load(\n data_name=data_name,\n folder_path=folder_path,\n **params,\n )\n\n assert {Path(dset.bind.filename) for dset in dsets} == {\n Path(tmp_path, path) for path in expect_paths\n }", "def setUp(self):\n with open('test/0a6a357e.json') as read_file:\n self.tx_json_0a6a357e = json.load(read_file)\n with open('test/bip69-synth.json') as read_file:\n self.bip69_synth = json.load(read_file)", "def setUp(self):\n self.setUpPyfakefs()\n self.fake_os = fake_filesystem.FakeOsModule(self.fs)\n\n populate_fakefs(self)", "def test_autoload(driver: IxiaChassisShell2GDriver, autoload_context: AutoLoadCommandContext) -> None:\n inventory = driver.get_inventory(autoload_context)\n print_inventory(inventory)", "def fake_drivers(self):\n patcher = mock.patch('biggraphite.drivers.cassandra.build',\n return_value=self.accessor)\n patcher.start()\n self.addCleanup(patcher.stop)", "def setUpFixture(self):\n pass", "def test_bed(self):\n #TODO write bed tests", "def test_00_setup(self):\n with mock_api(magento_base_responses):\n import_batch(self.session, 'magento.website', self.backend_id)\n import_batch(self.session, 'magento.store', self.backend_id)\n import_batch(self.session, 'magento.storeview', self.backend_id)\n import_record(self.session, 'magento.res.partner.category',\n self.backend_id, 1)", "def test_core_files_hw(self):\n self.test_core_files()", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n 'L2GW', self)", "def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }", "def test_get_driver_test_class_usable(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n \n driver = locator.get_driver()\n try:\n driver.get_wind_dir()\n driver.get_sail()\n except Exception:\n self.fail('Could not call get_wind_dir and get_sail on driver from get_driver')", "def _load_test_data(self):\n self._save_test_data()", "def setUp(self):\n self.dbus_mock = MagicMock()\n self.mainloop_mock = MagicMock()\n self.gobject_mock = MagicMock()\n\n modules = {\n 'dbus': self.dbus_mock,\n 'dbus.mainloop.glib': self.mainloop_mock,\n 'gi.repository': self.gobject_mock,\n }\n self.dbus_mock.Interface.return_value.GetManagedObjects.return_value = tests.obj_data.full_ubits\n self.dbus_mock.Interface.return_value.Get = mock_get\n self.dbus_mock.Interface.return_value.Set = mock_set\n self.dbus_mock.Interface.return_value.GetAll = mock_get_all\n self.module_patcher = patch.dict('sys.modules', modules)\n self.module_patcher.start()\n from bluezero import adapter\n self.module_under_test = adapter\n self.adapter_device = 'hci0'\n self.adapter_name = 'linaro-alip'\n self.path = '/org/bluez/hci0'", "def setUp(self):\n self.epath = 'flyeye/tests/fixtures'\n self.dpath = join(self.epath, 'disc.silhouette')", "def setUp(self):\n models.Connector.objects.create(\n identifier=\"openlibrary.org\",\n name=\"OpenLibrary\",\n connector_file=\"openlibrary\",\n base_url=\"https://openlibrary.org\",\n books_url=\"https://openlibrary.org\",\n covers_url=\"https://covers.openlibrary.org\",\n search_url=\"https://openlibrary.org/search?q=\",\n isbn_search_url=\"https://openlibrary.org/isbn\",\n )\n self.connector = Connector(\"openlibrary.org\")\n\n work_file = pathlib.Path(__file__).parent.joinpath(\"../data/ol_work.json\")\n edition_file = pathlib.Path(__file__).parent.joinpath(\"../data/ol_edition.json\")\n edition_md_file = pathlib.Path(__file__).parent.joinpath(\n \"../data/ol_edition_markdown.json\"\n )\n edition_list_file = pathlib.Path(__file__).parent.joinpath(\n \"../data/ol_edition_list.json\"\n )\n self.work_data = json.loads(work_file.read_bytes())\n self.edition_data = json.loads(edition_file.read_bytes())\n self.edition_md_data = json.loads(edition_md_file.read_bytes())\n self.edition_list_data = json.loads(edition_list_file.read_bytes())", "def setUp(self):\n super(PlayTests, self).setUp(\n \"tests/data/shakespeare/\", \"structure.json\", \"brief_example.xml\")", "def test_alien_data(self):", "def test_load_protocol():\n\n # version 0.0.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,0,0))))\n\n # version 0.1.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,1,0))))", "def test_dag_load(self):\n # Run tests both for telescope with file suffixes and without\n for accounts in [None, {\"accounts\": [\"foo\", \"bar\"]}]:\n with self.subTest(accounts=accounts):\n env = ObservatoryEnvironment(\n self.project_id, self.data_location, api_host=self.host, api_port=self.api_port\n )\n with env.create():\n # Add Observatory API connection\n conn = Connection(\n conn_id=AirflowConns.OBSERVATORY_API, uri=f\"http://:password@{self.host}:{self.api_port}\"\n )\n env.add_connection(conn)\n\n # Add a Google Books telescope\n dt = pendulum.now(\"UTC\")\n telescope_type = orm.TelescopeType(\n name=\"Google Books Telescope\", type_id=TelescopeTypes.google_books, created=dt, modified=dt\n )\n env.api_session.add(telescope_type)\n organisation = orm.Organisation(name=\"anu-press\", created=dt, modified=dt)\n env.api_session.add(organisation)\n telescope = orm.Telescope(\n name=\"anu-press Google Books Telescope\",\n telescope_type=telescope_type,\n organisation=organisation,\n modified=dt,\n created=dt,\n extra=accounts,\n )\n env.api_session.add(telescope)\n env.api_session.commit()\n\n dag_file = os.path.join(module_file_path(\"oaebu_workflows.dags\"), \"google_books_telescope.py\")\n self.assert_dag_load(\"google_books_anu-press\", dag_file)", "def setup(self):\n log.msg(\"Fetching required net test inputs...\")\n for net_test_loader in self.netTestLoaders:\n yield self.fetchAndVerifyNetTestInput(net_test_loader)\n\n if self.bouncer:\n log.msg(\"Looking up test helpers...\")\n yield self.lookupTestHelpers()", "def setUp(self):\n \n with open(\"config_script.json\", \"r\") as json_data:\n config_data = json.load(json_data)\n\n logging.basicConfig(\n filename=config_data[\"filename_logging\"], \n filemode=config_data[\"filemode_logging\"],\n level=config_data[\"level_logging\"], \n format=config_data[\"format_logging\"])\n\n self.database = DB_Worker() \n self.info_list = GoodInfoList()\n self.file_goods = FileWork()\n self.file_data = self.file_goods.select_path_file(\"test\")\n\n if len(self.file_data) > 0:\n self.info_list.get_from_file(self.file_data)", "def setUp(self):\n self.setup_beets()", "def fixtures():\n temp_path = os.path.join(os.path.dirname(__file__), 'temp')\n demo_files_path = os.path.join(os.path.dirname(__file__), 'demo_files')\n\n # Create location\n loc = Location(name='local', uri=temp_path, default=True)\n db.session.add(loc)\n db.session.commit()\n\n # Example files from the data folder\n demo_files = (\n 'markdown.md',\n 'csvfile.csv',\n 'zipfile.zip',\n 'jsonfile.json',\n 'xmlfile.xml',\n 'notebook.ipynb',\n 'jpgfile.jpg',\n 'pngfile.png',\n )\n\n rec_uuid = uuid4()\n provider = RecordIdProvider.create(object_type='rec', object_uuid=rec_uuid)\n data = {\n 'pid_value': provider.pid.pid_value,\n }\n\n record = Record.create(data, id_=rec_uuid)\n bucket = Bucket.create()\n RecordsBuckets.create(record=record.model, bucket=bucket)\n\n # Add files to the record\n for f in demo_files:\n with open(os.path.join(demo_files_path, f), 'rb') as fp:\n record.files[f] = fp\n\n record.files.flush()\n record.commit()\n db.session.commit()", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n taas_consts.TAAS, self)", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def load_devices():", "def test_load(self):\n command = constituencies.Command()\n command.handle('load', silent=True)", "def load_tests(loader, tests, pattern):\n test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)\n return driver.build_tests(test_dir, loader, host=None,\n intercept=blogstrap.create_app,\n fixture_module=sys.modules[__name__])", "def setUp(self):\n super().setUp()\n self.devices = _DEVICE_STRATEGY()\n command_line = [\"pool\", \"create\", self._POOLNAME] + self.devices\n RUNNER(command_line)", "def test_load_fixture(caplog):\n caplog.set_level('INFO')\n\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n\n response = _request_load_fixture({'fixture': [ADVISER_FIXTURE]})\n\n assert response.status_code == status.HTTP_201_CREATED\n\n adviser = Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n assert adviser.email == ADVISER_FIXTURE['fields']['email']\n assert adviser.first_name == ADVISER_FIXTURE['fields']['first_name']\n assert adviser.last_name == ADVISER_FIXTURE['fields']['last_name']\n assert str(adviser.dit_team_id) == ADVISER_FIXTURE['fields']['dit_team']\n\n fixture_info = [\n 'Loading fixture: [',\n ' {',\n ' \"fields\": {',\n f' \"dit_team\": \"{ADVISER_FIXTURE[\"fields\"][\"dit_team\"]}\",',\n f' \"email\": \"{ADVISER_FIXTURE[\"fields\"][\"email\"]}\",',\n f' \"first_name\": \"{ADVISER_FIXTURE[\"fields\"][\"first_name\"]}\",',\n f' \"last_name\": \"{ADVISER_FIXTURE[\"fields\"][\"last_name\"]}\"',\n ' },',\n ' \"model\": \"company.advisor\",',\n f' \"pk\": \"{ADVISER_FIXTURE[\"pk\"]}\"',\n ' }',\n ']',\n ]\n assert caplog.messages == ['\\n'.join(fixture_info)]", "def load_test_file():\n hou.hipFile.load(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"data\",\n \"test_api_integration.hipnc\",\n ),\n ignore_load_warnings=True,\n )\n\n yield\n\n hou.hipFile.clear()", "def setUp(self):\n\n self.test_data_path = 'testing/test_data/'", "def setUp(self):\n server = TestServer()\n servers = OrderedDict([(\"default\", server)])\n self.client = TurboTestClient(servers=servers)\n\n self.ref_a = RecipeReference.loads(\"liba/1.0@conan/stable\")\n self.client.create(self.ref_a, conanfile=GenConanfile())\n\n self.ref_b = RecipeReference.loads(\"libb/1.0@conan/stable\")\n self.client.create(self.ref_b, conanfile=GenConanfile().with_requirement(self.ref_a))\n\n self.ref_c = RecipeReference.loads(\"libc/1.0@conan/stable\")\n self.client.create(self.ref_c, conanfile=GenConanfile().with_requirement(self.ref_a))\n\n self.ref_d = RecipeReference.loads(\"libd/1.0@conan/stable\")\n self.client.create(self.ref_d, conanfile=GenConanfile().with_requirement(self.ref_b))\n\n self.ref_e = RecipeReference.loads(\"libe/1.0@conan/stable\")\n self.client.create(self.ref_e, conanfile=GenConanfile().with_requirement(self.ref_d))\n\n self.ref_f = RecipeReference.loads(\"libf/1.0@conan/stable\")\n conanfile = GenConanfile().with_requirement(self.ref_c).with_requirement(self.ref_d)\n self.client.create(self.ref_f, conanfile=conanfile)", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_get_scenarios(self):\n pass", "def start_fixture(self):\n pass", "def populate_fixtures():\n languages()\n words()", "def test_load_case_objects(cli_args_fixture, tmp_path):\n # cli_args_fixture fixture is called but not used simply in order to mock it in render_json.dump()\n tmp_file = os.path.join(tmp_path, 'stub-load.json')\n # - protocol\n proto_ref, proto_name, proto_blocking, proto_is_database = ('foo', 'bar', True, False)\n # - crawl strategy\n cs_description, cs_name, cs_providers, cs_provider_args, cs_child_provider, cs_filter, cs_rewrites = \\\n ('foo', 'bar', ['baz'], {'buzz': 'buzzbuzz'}, {'qux': True}, {'quux': True}, {'quz': True})\n # - node\n node_ref, node_prov, node_mux, node_hint, node_address, node_service_name, node_children, node_warn, node_err = \\\n ('a_ref', 'a_prov', 'a_mux', True, 'an_add', 'a_name', {'foo': 'child'}, {'bar': True}, {'baz': True})\n stub_json = f\"\"\"\n{{\n \"args\": {{\n \"max_depth\": 0,\n \"skip_nonblocking_grandchildren\": false\n }},\n \"tree\": {{\n \"{node_ref}\": {{\n \"__type__\": \"Node\",\n \"provider\": \"{node_prov}\",\n \"protocol_mux\": \"{node_mux}\",\n \"from_hint\": {str(node_hint).lower()},\n \"address\": \"{node_address}\",\n \"service_name\": \"{node_service_name}\",\n \"children\": {json.dumps(node_children)},\n \"warnings\": {json.dumps(node_warn)},\n \"errors\": {json.dumps(node_err)},\n \"crawl_strategy\": {{\n \"__type__\": \"CrawlStrategy\",\n \"description\": \"{cs_description}\",\n \"name\": \"{cs_name}\",\n \"providers\": {json.dumps(cs_providers)},\n \"provider_args\": {json.dumps(cs_provider_args)},\n \"child_provider\": {json.dumps(cs_child_provider)},\n \"service_name_filter\": {json.dumps(cs_filter)},\n \"service_name_rewrites\": {json.dumps(cs_rewrites)},\n \"protocol\": {{\n \"__type__\": \"Protocol\",\n \"ref\": \"{proto_ref}\",\n \"name\": \"{proto_name}\",\n \"blocking\": {str(proto_blocking).lower()},\n \"is_database\": {str(proto_is_database).lower()}\n }}\n }},\n \"protocol\": {{\n \"__type__\": \"Protocol\",\n \"ref\": \"{proto_ref}\",\n \"name\": \"{proto_name}\",\n \"blocking\": {str(proto_blocking).lower()},\n \"is_database\": {str(proto_is_database).lower()}\n }}\n }}\n }}\n}}\n\"\"\"\n with open(tmp_file, 'w') as f:\n f.write(stub_json)\n\n # act\n tree = render_json.load(tmp_file)\n\n # assert\n # - Node()\n assert isinstance(tree[node_ref], node.Node)\n loaded_node = tree[node_ref]\n assert loaded_node.provider == node_prov\n assert loaded_node.protocol_mux == node_mux\n assert loaded_node.from_hint == node_hint\n assert loaded_node.address == node_address\n assert loaded_node.service_name == node_service_name\n assert loaded_node.children == node_children\n assert loaded_node.warnings == node_warn\n assert loaded_node.errors == node_err\n # - CrawlStrategy()\n assert isinstance(loaded_node.crawl_strategy, charlotte.CrawlStrategy)\n loaded_cs = loaded_node.crawl_strategy\n assert loaded_cs.description == cs_description\n assert loaded_cs.name == cs_name\n assert loaded_cs.providers == cs_providers\n assert loaded_cs.provider_args == cs_provider_args\n assert loaded_cs.child_provider == cs_child_provider\n assert loaded_cs.service_name_filter == cs_filter\n assert loaded_cs.service_name_rewrites == cs_rewrites\n # - Protocol\n assert isinstance(loaded_cs.protocol, charlotte_web.Protocol)\n loaded_protocol = loaded_cs.protocol\n assert loaded_protocol.ref == proto_ref\n assert loaded_protocol.name == proto_name\n assert loaded_protocol.blocking == proto_blocking\n assert loaded_protocol.is_database == proto_is_database\n assert isinstance(loaded_node.protocol, charlotte_web.Protocol)", "def setUp(self):\n self.fixtures_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"fixtures/\"\n )", "def test_load_rivers_config_major_forcing_data_file(self, config, config_dict, infile_dict, monkeypatch):\n test_output_file = 'Fraser_flow'\n monkeypatch.setitem(infile_dict['forcing_data_files'], 'major_river', test_output_file)\n config.rivers = Mock()\n config._read_yaml_file = Mock(return_value=config_dict)\n config._load_rivers_config(config_dict, infile_dict)\n assert config.rivers.output_files[\"major\"] == test_output_file", "def setUpTestData(cls):\n cls.emulate_off_api_manager_categories()\n cls.emulate_off_api_manager_products()\n cls.db_manager = Command()", "def setUp(self):\r\n\t\tself._configuration_ = Declare.Configuration.read(\"configuration.json\")", "def setUp(self):\n self.test_data = MockPyMySqlDataSource().load()", "def setUp(self):\n self.proxyServices = [\n Host('web', '127.0.0.1:8080', 'prod', leastc, 'host1', '127.0.0.1:7001', True),\n Host('web', '127.0.0.1:8080', 'prod', leastc, 'host2', '127.0.0.1:7002'),\n Host('web', '127.0.0.1:8080', 'prod', leastc, 'host3', '127.0.0.1:7003'),\n Host('web', '127.0.0.1:8080', 'test', leastc, 'host4', '127.0.0.1:7004', False),\n Host('web', '127.0.0.1:8080', 'test', leastc, 'host5', '127.0.0.1:7005'),\n Host('web', '127.0.0.1:8080', 'test', leastc, 'host6', '127.0.0.1:7006'),\n Host('dns', '127.0.0.1:8053', 'corp', roundr, 'host7', '127.0.0.1:7007', True),\n Host('dns', '127.0.0.1:8053', 'corp', roundr, 'host8', '127.0.0.1:7008'),\n ]", "def setUp(self):\n PatientIDSettings.objects.create()\n User.objects.create_user('temporary', '[email protected]', 'temporary')\n\n dx1 = \"test_files/DX-Im-Carestream_DR7500-1.dcm\"\n dx2 = \"test_files/DX-Im-Carestream_DR7500-2.dcm\"\n dx3 = \"test_files/DX-Im-Carestream_DRX.dcm\"\n dx4 = \"test_files/DX-Im-GE_XR220-1.dcm\"\n dx5 = \"test_files/DX-Im-GE_XR220-2.dcm\"\n dx6 = \"test_files/DX-Im-GE_XR220-3.dcm\"\n dx7 = \"test_files/DX-RDSR-Canon_CXDI.dcm\"\n dx8 = \"test_files/DX-RDSR-Carestream_DRXEvolution.dcm\"\n root_tests = os.path.dirname(os.path.abspath(__file__))\n path_dx1 = os.path.join(root_tests, dx1)\n path_dx2 = os.path.join(root_tests, dx2)\n path_dx3 = os.path.join(root_tests, dx3)\n path_dx4 = os.path.join(root_tests, dx4)\n path_dx5 = os.path.join(root_tests, dx5)\n path_dx6 = os.path.join(root_tests, dx6)\n path_dx7 = os.path.join(root_tests, dx7)\n path_dx8 = os.path.join(root_tests, dx8)\n\n dx.dx(path_dx1)\n dx.dx(path_dx2)\n dx.dx(path_dx3)\n dx.dx(path_dx4)\n dx.dx(path_dx5)\n dx.dx(path_dx6)\n rdsr.rdsr(path_dx7)\n rdsr.rdsr(path_dx8)", "def setUp(self):\n self.vmodel_name = \"LABasin\"\n self.sim_id = int(seqnum.get_seq_num())\n self.install = InstallCfg()\n self.vmodel_obj = vmodels.get_velocity_model_by_name(self.vmodel_name)\n\n indir = os.path.join(self.install.A_IN_DATA_DIR, str(self.sim_id))\n tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(self.sim_id))\n outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))\n logdir = os.path.join(self.install.A_OUT_LOG_DIR, str(self.sim_id))\n # Create all directories\n bband_utils.mkdirs([indir, tmpdir, outdir, logdir], print_cmd=False)\n\n # Copy needed files\n\n # src file\n r_src_file = \"nr_v12_11_0_fs.src\"\n src_file = os.path.join(self.install.A_TEST_REF_DIR, \"uwo\", r_src_file)\n self.src_file = os.path.join(indir, r_src_file)\n cmd = \"cp %s %s\" % (src_file, self.src_file)\n bband_utils.runprog(cmd)\n\n # exsim param template file\n vmodel_params = self.vmodel_obj.get_codebase_params('exsim')\n self.failIf('GENERIC_PARAM' not in vmodel_params)\n r_param_template = vmodel_params['GENERIC_PARAM']\n\n self.failIf(r_param_template == \"\" or r_param_template is None)\n param_template = os.path.join(self.vmodel_obj.base_dir,\n r_param_template)\n # r_param_template is relative to the velocity model basedir,\n # get only basename\n r_param_template = os.path.basename(r_param_template)\n self.param_template = os.path.join(indir, r_param_template)\n cmd = \"cp %s %s\" % (param_template, self.param_template)\n bband_utils.runprog(cmd)\n\n # station file\n r_stations = \"nr_v12_11_2.stl\"\n stations = os.path.join(self.install.A_TEST_REF_DIR, \"uwo\", r_stations)\n self.stations = os.path.join(indir, r_stations)\n cmd = \"cp %s %s\" % (stations, self.stations)\n bband_utils.runprog(cmd)", "def test_get_devices(self):\n pass", "def test_get_devices(self):\n pass", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def test_detect(self):\n\n for filename in os.listdir(DATA):\n f = os.path.join(DATA, filename)\n if filename.endswith(sppasANTX().default_extension):\n self.assertTrue(sppasANTX.detect(f))\n else:\n self.assertFalse(sppasANTX.detect(f))", "def main_tester():\n create_tester_paths()\n _logger.info(' -- tester init done setting up paths and db file.')", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def setUp(self):\n self.a = backend.dbconnection.DBConnect()", "def test_load(self):\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !LinearController\n low_utilisation: 0.9\n high_utilisation: 1.1\n - !MockPool\n \"\"\"\n )\n with load(config.name):\n assert True\n assert True", "def test_generate_all_testing(self):\n pass", "def setUp(self):\n super(BDEFileWithKeyChainTest, self).setUp()\n self._resolver_context = context.Context()\n test_path = self._GetTestFilePath(['bdetogo.raw'])\n self._SkipIfPathNotExists(test_path)\n\n self._os_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_OS, location=test_path)\n self._bde_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_BDE, parent=self._os_path_spec)\n resolver.Resolver.key_chain.SetCredential(\n self._bde_path_spec, 'password', self._BDE_PASSWORD)", "def test_init_client(self):\n # TODO: dynamically importing dependancies from the file tested\n self.assertIn(\n \"describe_trusted_advisor_check_result\", dir(self.subclass.client)\n )", "def setUp(self):\n self.interface = DbtInterface(\n database=\"test\",\n schema=\"public\",\n path=\"tests/fixtures/sample_project/\",\n )\n logging.getLogger(__name__)\n logging.basicConfig(level=logging.DEBUG)", "def test_get_device_templates(self):\n pass", "def setUp(self):\n #\n self.statsd = DogStatsd(telemetry_min_flush_interval=0)\n self.statsd.socket = FakeSocket()\n self.statsd._reset_telemetry()\n\n # Mock the proc filesystem\n route_data = load_fixtures('route')\n self._procfs_mock = patch('datadog.util.compat.builtins.open', mock_open())\n self._procfs_mock.start().return_value.readlines.return_value = route_data.split(\"\\n\")", "def setUp(self):\n self.fixtureFile = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixtureList = [\"my\", \"written\", \"text\"]\n self.fixtureListEmptyStrings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixtureListTrailingEmptyString = [\"my\", \"written\", \"text\", \"\", \"\"]", "def loadDrivers(self):\n\n self.sources = {}\n for source in self.config['sources']:\n sourceConf = self.config['sources'][source]\n baseClass = sourceConf['baseClass']\n self.logger.debug(\"Loading: \" + source +\n \" instance of: \" + baseClass)\n sourceArgs = sourceConf['source-config']\n self.sources[source] = {}\n try:\n print(baseClass)\n tempModule = import_module('sources.' + baseClass)\n \"\"\"tempModule = __import__('sources.' + baseClass,\n globals(), locals(), [baseClass], -1)\n \"\"\"\n self.sources[source]['source'] = getattr(tempModule, str(\n baseClass))(sourceArgs)\n except Exception as e:\n self.logger.error(\"exception: \" + str(e))\n return None", "def setUp(self):\n super().setUp()\n self.file_path = 'file.json'", "def setUp(self):\n self.app = app.test_client()\n self.api = MockApi()\n self.api.reset()\n for gateway in GATEWAYS:\n if gateway == 'PremiumPaymentGateway':\n item = self.api.put(\n gateway,\n 'available',\n 0,\n )", "def test_get_file_executors(self):\n pass", "def test_gethardwares(self):\n pass", "def runTest(self):\n self.setUp()\n self.test_NeuroPath1()", "def runTest(self):\n self.setUp()\n self.test_STLModelBuilder1()", "def test_get_test_assets(self):\n pass", "def setUp(self):\n self.fixture_file = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixture_list = [\"my\", \"written\", \"text\"]\n self.fixture_list_empty_strings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixture_list_trailing_empty_strings = [\"my\", \"written\", \"text\", \"\", \"\"]", "def setUp(self):\n self.app = app.test_client()\n self.api = MockApi()\n self.api.reset()\n for gateway in GATEWAYS:\n if gateway == 'ExpensivePaymentGateway':\n item = self.api.put(\n gateway,\n 'available',\n 0,\n )", "def setUp(self):\n super(BDEFileWithPathSpecCredentialsTest, self).setUp()\n self._resolver_context = context.Context()\n test_path = self._GetTestFilePath(['bdetogo.raw'])\n self._SkipIfPathNotExists(test_path)\n\n self._os_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_OS, location=test_path)\n self._bde_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_BDE, parent=self._os_path_spec,\n password=self._BDE_PASSWORD)", "def setUp(self):\n self.data = DatabaseIntermediary()", "def tests_generator(self, load_file, runtime=60, clients=(1, 6, 12, 48, 128, 256)):\n db_bin = os.path.join(bin_path, 'dbench')\n cmd = \"{0} -c {1} -t {2} -D {3} {4}\"\n tests = []\n for idx, client in enumerate(clients):\n desc = \"{}clients\".format(client)\n test_name = \"dbench_{0}_{1}\".format(idx + 1, to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=cmd.format(db_bin, load_file, runtime, self.test_path, client))\n tests.append(test)\n return tests", "def testInitialization(self):\n self.assertEqual(\n self.grr_hunt_file_collector.file_path_list,\n ['/etc/passwd', '/etc/shadow']\n )", "def test_import_system_asset(self):\n pass", "def test_import_software_asset(self):\n pass", "def test_import_test_asset(self):\n pass", "def test_load(\n mock_hvac_client_read, mock_load, localhost_client, gen_input_config, gen_processed_config, gen_vault_response_kv1\n):\n mock_hvac_client_read.return_value = gen_vault_response_kv1()\n mock_load.return_value = gen_input_config()\n\n assert localhost_client.load(\"in.json\") == gen_processed_config()\n\n mock_hvac_client_read.assert_called_with(gen_input_config()[\"vault_secrets\"][\"acme.user\"])\n mock_load.assert_called_with(\"in.json\")", "def setUp(self):\n self.server_address = \"http://localhost:3030/$/\"\n self.request_address = \"http://localhost:3030/ds\"\n self.api = \"http://localhost:4032/\"\n self.version = \"0.2\"", "def setUp(self):\n self.app = app.test_client()\n self.api = MockApi()\n self.api.reset()\n for gateway in GATEWAYS:\n if gateway == 'CheapPaymentGateway':\n item = self.api.put(\n gateway,\n 'available',\n 0,\n )", "def test_load_file(self):\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test1_\"+self.loader.version))\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test2_\"+self.loader.version))", "def fixture_sim():\n\tEXAMPLE_FILE_FOLDER = str(MODULE_DIR) + \"/data/nail1/\"\n\tsim = read.load_sim(EXAMPLE_FILE_FOLDER)\n\treturn sim", "def load_tests(loader, tests, ignore):\n\n _current_cwd = os.getcwd()\n\n def setUp(self):\n warnings.simplefilter(\"ignore\")\n os.chdir(os.path.realpath(os.path.join(__file__, \"..\", \"data\")))\n\n def tearDown(self):\n os.chdir(_current_cwd)\n warnings.simplefilter(warnings.defaultaction)\n\n globs = {\n \"fastobo\": fastobo,\n \"datetime\": datetime,\n \"textwrap\": textwrap,\n \"pprint\": pprint.pprint,\n \"ms\": fastobo.load(os.path.realpath(\n os.path.join(__file__, \"..\", \"data\", \"ms.obo\")\n )),\n }\n\n if not sys.argv[0].endswith('green'):\n tests = _load_tests_from_module(tests, fastobo, globs, setUp, tearDown)\n return tests", "def setUp(self):\n self.data_dir_qucs = os.path.dirname(os.path.abspath(__file__)) + \\\n '/qucs_prj/'\n self.data_dir_ads = os.path.dirname(os.path.abspath(__file__)) + \\\n '/ads/'\n\n self.ref_qucs = [\n {'model': 'hammerstadjensen', 'disp': 'hammerstadjensen', 'color': 'r',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,hammerstad,hammerstad.s2p'))},\n {'model': 'hammerstadjensen', 'disp': 'kirschningjansen', 'color': 'c',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,hammerstad,kirschning.s2p'))},\n {'model': 'hammerstadjensen', 'disp': 'kobayashi', 'color': 'k',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,hammerstad,kobayashi.s2p'))},\n {'model': 'hammerstadjensen', 'disp': 'yamashita', 'color': 'g',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,hammerstad,yamashita.s2p'))},\n {'model': 'wheeler', 'disp': 'schneider', 'color': 'm',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,wheeler,schneider.s2p'))},\n {'model': 'schneider', 'disp': 'schneider', 'color': 'b',\n 'n': rf.Network(os.path.join(self.data_dir_qucs,\n 'mline,schneider,schneider.s2p'))}\n ]\n\n\n self.ref_ads = [\n {'diel': 'frequencyinvariant', 'disp': 'kirschningjansen', 'color': 'r',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,freqencyinvariant,kirschning.s2p'))},\n {'diel': 'djordjevicsvensson', 'disp': 'kirschningjansen', 'color': 'c',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,djordjevicsvensson,kirschning.s2p'))},\n {'diel': 'frequencyinvariant', 'disp': 'kobayashi', 'color': 'k',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,freqencyinvariant,kobayashi.s2p'))},\n {'diel': 'djordjevicsvensson', 'disp': 'kobayashi', 'color': 'g',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,djordjevicsvensson,kobayashi.s2p'))},\n {'diel': 'frequencyinvariant', 'disp': 'yamashita', 'color': 'm',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,freqencyinvariant,yamashita.s2p'))},\n {'diel': 'djordjevicsvensson', 'disp': 'yamashita', 'color': 'b',\n 'n': rf.Network(os.path.join(self.data_dir_ads,\n 'mlin,djordjevicsvensson,yamashita.s2p'))}\n ]\n\n # default parameter set for tests\n self.verbose = False # output comparison plots if True\n self.w = 3.00e-3\n self.h = 1.55e-3\n self.t = 35e-6\n self.l = 25e-3\n self.ep_r = 4.413\n self.tand = 0.0182\n self.rho = 1.7e-8\n self.d = 0.15e-6\n self.f_et = 1e9", "def setUp(self):\n dirname = os.path.dirname(__file__)\n self.files = [\n os.path.join(dirname, 'data',\n 'goes13_IR_107_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'goes15_IR_107_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'himawari8_IR1_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'meteosat7_IR_115_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'meteosat10_IR_108_testwcm_201604291015.tif')\n ]", "def test_brain_templates():\n try: # Test distant installation\n vbpath = get_python_lib()\n brainpath = vbpath + \",visbrain,brain,base,templates\"\n for k in ['B1.npz', 'B2.npz', 'B3.npz', 'roi.npz']:\n s = brainpath + ',' + k\n warn('Distant version passed for brain templates file')\n assert os.path.isfile(os.path.join(*s.split(\",\")))\n except: # Test local installation\n brainpath = \",visbrain,brain,base,templates\"\n for k in ['B1.npz', 'B2.npz', 'B3.npz', 'roi.npz']:\n s = brainpath + ',' + k\n warn('Local version passed for brain templates file')\n assert os.path.isfile(os.path.join(*s.split(\",\")))", "def fixture_obs():\n\tEXAMPLE_FILE_FOLDER = str(MODULE_DIR) + \"/data/nail1/\"\n\tobs = read.load_obs(EXAMPLE_FILE_FOLDER)\n\treturn obs", "def test_load(loqusdbapi, mocker, loqusdb_output):\n # GIVEN a loqusdb api and some info about a case\n family_id = 'test'\n ped_path = 'a ped path'\n vcf_path = 'a vcf path'\n\n # WHEN uploading a case with 15 variants to loqusdb\n mocker.patch.object(subprocess, 'check_output')\n subprocess.check_output.return_value = loqusdb_output\n\n data = loqusdbapi.load(family_id, ped_path, vcf_path)\n\n # THEN assert that the number of variants is 15\n\n assert data['variants'] == 15", "def test_get_devices1(self):\n pass" ]
[ "0.6442319", "0.6338353", "0.63182837", "0.61462885", "0.61180353", "0.60295516", "0.5985679", "0.5983612", "0.5970922", "0.5935132", "0.593162", "0.5924928", "0.5904823", "0.59045386", "0.5870761", "0.5864663", "0.58519274", "0.58161414", "0.5772585", "0.5764369", "0.573519", "0.57350725", "0.57304376", "0.57281744", "0.5727395", "0.5721333", "0.572", "0.57094115", "0.5706237", "0.5703934", "0.56959355", "0.5695313", "0.568984", "0.5673571", "0.5663029", "0.56622595", "0.5651346", "0.5645507", "0.56391114", "0.5613379", "0.55961674", "0.5586625", "0.55810565", "0.55810565", "0.55752975", "0.55729663", "0.55715865", "0.5570969", "0.5569529", "0.55695146", "0.5568745", "0.55680245", "0.55599046", "0.5545944", "0.5544931", "0.55412745", "0.5537589", "0.5537589", "0.5527485", "0.55231625", "0.5521409", "0.5516199", "0.5502629", "0.5498402", "0.5491199", "0.54906577", "0.54890114", "0.54865617", "0.54853255", "0.5485287", "0.5484817", "0.5483342", "0.5480412", "0.5478704", "0.54717016", "0.54716027", "0.54682344", "0.5467453", "0.546625", "0.5465317", "0.54620725", "0.54573256", "0.54569614", "0.5453098", "0.54468286", "0.5441415", "0.5441385", "0.5437088", "0.5436695", "0.5434104", "0.543326", "0.5430473", "0.5426618", "0.5426578", "0.5426435", "0.5423301", "0.5422722", "0.54173183", "0.5412784", "0.54109955" ]
0.650908
0
Test the gateway can restart and remove devices
def test_device_mgmt(self, gateway_with_devs): gateway_with_devs.restart('daq') assert gateway_with_devs.daq gateway_with_devs.remove('daq') with pytest.raises(AttributeError): gateway_with_devs.daq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def clean_rebind_test(**kwargs):\n if 'verify_traffic' not in kwargs:\n kwargs['verify_traffic'] = False\n prepare_subscriber_traffic(**kwargs)\n device_id = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)[0])\n switch_id = kwargs.get('switch_id', 'r1')\n switch_handle = t.get_handle(switch_id)\n switch_access_intf = bbe.get_interfaces(switch_id, interfaces='access')\n status = True\n for iteration in range(1, int(kwargs.get('iteration', 1)) + 1):\n t.log(\"disable access ports in switch in iteration #{}\".format(iteration))\n port_command_list = []\n status = True\n for access in switch_access_intf:\n port_command_list.append(\"set interfaces {} disable\".format(access.interface_pic))\n switch_handle.config(command_list=port_command_list)\n switch_handle.commit()\n t.log(\"verify access ports in down state\")\n for access in switch_access_intf:\n resp = switch_handle.pyez('get_interface_information', level_extra='terse',\n interface_name=access.interface_pic).resp\n if resp.findtext('physical-interface/admin-status') == 'down' and resp.findtext(\n 'physical-interface/oper-status') == 'down':\n t.log(\"interface {} is in down state\".format(access.interface_pic))\n else:\n t.log('WARN', \"interface {} is in state {}\".format(access.interface_pic, resp))\n status = False\n\n if not status:\n for access in switch_access_intf:\n port_command_list.append(\"delete interfaces {} disable\".format(access.interface_pic))\n switch_handle.config(command_list=port_command_list)\n switch_handle.commit()\n raise Exception(\"some interfaces failed to be in down state after disable\")\n base_time = time.time()\n while time.time() - base_time < 1800:\n router_count = get_router_sub_summary(device_id)['client']\n tester_count = get_rt_subs_info()['rt_sessions_up']\n if router_count == 0 and tester_count == 0:\n duration = time.time() - base_time\n t.log(\"all subscribers cleared from tester and router after {}s in iteration #{}\".format(duration,\n iteration))\n break\n t.log(\"sleep 30s , waiting for clients cleared\")\n time.sleep(30)\n\n result = get_router_sub_summary(device_id)\n\n if result['client'] != 0 or 'terminated' in result or 'terminating' in result or 'init' in result:\n status = False\n t.log('WARN', 'some subscribers stuck in unexpected state in iteration #{}'.format(iteration))\n\n for access in switch_access_intf:\n port_command_list.append(\"delete interfaces {} disable\".format(access.interface_pic))\n switch_handle.config(command_list=port_command_list)\n switch_handle.commit()\n time.sleep(10)\n t.log(\"verify access ports in up state in iteration {}\".format(iteration))\n for access in switch_access_intf:\n resp = switch_handle.pyez('get_interface_information', level_extra='terse',\n interface_name=access.interface_pic).resp\n if resp.findtext('physical-interface/admin-status') == 'up' and resp.findtext(\n 'physical-interface/oper-status') == 'up':\n t.log(\"interface {} is in up state\".format(access.interface_pic))\n else:\n t.log('WARN', \"interface {} is in state {}\".format(access.interface_pic, resp))\n status = False\n\n if not status:\n raise Exception(\"clean test failed\")\n ##set the rt subscriber state to stopped, since it is not teared down by actions\n t.log(\"login subscriber and verify traffic after restore the connection in iteration #{}\".format(iteration))\n prepare_subscriber_traffic(**kwargs)", "def test_gwservice_updatedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n payload = {'serialNumber': 'DEADBEEF0011',\n 'owner': 'pytest'}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"PUT\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n device = json.loads(resp.text)\n print (device)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n @pytest.mark.sdk_restapi\n def test_gwservice_deletedevice(self, setup_controller):\n \"\"\"\n Test the delete device endpoint\n WIFI-3455\n \"\"\"\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def test_gwservice_deletedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def test_get_devices(self):\n pass", "def test_get_devices(self):\n pass", "def test_delete_device_users(self):\n pass", "def test_connect(self, gateway):\n assert not gateway._devs", "async def test_device_remove_devices_nvr(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n hass_ws_client: WebSocketGenerator,\n) -> None:\n assert await async_setup_component(hass, \"config\", {})\n\n ufp.api.get_bootstrap = AsyncMock(return_value=ufp.api.bootstrap)\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n entry_id = ufp.entry.entry_id\n\n device_registry = dr.async_get(hass)\n\n live_device_entry = list(device_registry.devices.values())[0]\n assert (\n await remove_device(await hass_ws_client(hass), live_device_entry.id, entry_id)\n is False\n )", "def test_verify_list_of_devices_in_my_network():", "def delete_unavailable_devices():\n _run_command('delete unavailable')", "def test_verify_state_of_a_device_when_disconnected_from_the_device():", "def test_get_devices1(self):\n pass", "def test_device_management_endpoints_removed(self) -> None:\n self.expect_unrecognized(\"POST\", \"/_matrix/client/v3/delete_devices\")\n self.expect_unrecognized(\"DELETE\", \"/_matrix/client/v3/devices/{DEVICE}\")", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "async def test_device_remove_devices(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n light: Light,\n hass_ws_client: WebSocketGenerator,\n) -> None:\n\n await init_entry(hass, ufp, [light])\n assert await async_setup_component(hass, \"config\", {})\n entity_id = \"light.test_light\"\n entry_id = ufp.entry.entry_id\n\n registry: er.EntityRegistry = er.async_get(hass)\n entity = registry.async_get(entity_id)\n assert entity is not None\n device_registry = dr.async_get(hass)\n\n live_device_entry = device_registry.async_get(entity.device_id)\n assert (\n await remove_device(await hass_ws_client(hass), live_device_entry.id, entry_id)\n is False\n )\n\n dead_device_entry = device_registry.async_get_or_create(\n config_entry_id=entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"e9:88:e7:b8:b4:40\")},\n )\n assert (\n await remove_device(await hass_ws_client(hass), dead_device_entry.id, entry_id)\n is True\n )", "def test_gwservice_createdevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n print(json.dumps(payload))\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device delete\", body=body)\n if resp.status_code != 200:\n assert False", "def test_device_states_device_name_delete(self):\n pass", "def test_add_device(self):\n\n pass", "async def test_remove_config_entry_device(\n hass: HomeAssistant,\n gps_sensor: Sensor,\n integration: MockConfigEntry,\n gateway: BaseSyncGateway,\n hass_ws_client: WebSocketGenerator,\n) -> None:\n entity_id = \"sensor.gps_sensor_1_1\"\n node_id = 1\n config_entry = integration\n assert await async_setup_component(hass, \"config\", {})\n await hass.async_block_till_done()\n\n device_registry = dr.async_get(hass)\n device_entry = device_registry.async_get_device(\n identifiers={(DOMAIN, f\"{config_entry.entry_id}-{node_id}\")}\n )\n entity_registry = er.async_get(hass)\n state = hass.states.get(entity_id)\n\n assert gateway.sensors\n assert gateway.sensors[node_id]\n assert device_entry\n assert state\n\n client = await hass_ws_client(hass)\n await client.send_json(\n {\n \"id\": 5,\n \"type\": \"config/device_registry/remove_config_entry\",\n \"config_entry_id\": config_entry.entry_id,\n \"device_id\": device_entry.id,\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n await hass.async_block_till_done()\n\n assert node_id not in gateway.sensors\n assert gateway.tasks.persistence.need_save is True\n assert not device_registry.async_get_device(\n identifiers={(DOMAIN, f\"{config_entry.entry_id}-1\")}\n )\n assert not entity_registry.async_get(entity_id)\n assert not hass.states.get(entity_id)", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_guest_applications(self):\n self.check_guest_applications()", "def test_verify_connection_to_a_device():", "def test_create_device(self):\n pass", "def test_create_device(self):\n pass", "def test_delete_device_by_id1(self):\n pass", "def test_create_node_reboot_item(self):\n pass", "def test_multiple_sim_control_devices(self):\n self.assertGreater(self.sim_control_device1.ping(), 0)\n self.assertGreater(self.sim_control_device2.ping(), 0)\n self.assertGreater(self.sim_control_device3.ping(), 0)", "def test_moving_devices_1(\n self, management_client, internal_client, inventory_attributes\n ):\n did1 = \"device-id-1\"\n did2 = \"device-id-2\"\n internal_client.create_device(did1, inventory_attributes)\n internal_client.create_device(did2, inventory_attributes)\n\n group = management_client.group(group=\"group-test-1\")\n management_client.addDeviceToGroup(group=group, device=did1)\n management_client.addDeviceToGroup(group=group, device=did2)\n assert len(management_client.getGroupDevices(\"group-test-1\")) == 2\n\n group = management_client.group(group=\"group-test-2\")\n management_client.addDeviceToGroup(group=group, device=did2)\n assert len(management_client.getGroupDevices(\"group-test-1\")) == 1\n assert len(management_client.getGroupDevices(\"group-test-2\")) == 1\n\n management_client.addDeviceToGroup(group=group, device=did1)\n assert (\n len(management_client.getGroupDevices(\"group-test-1\", expected_error=True))\n == 0\n )\n assert len(management_client.getGroupDevices(\"group-test-2\")) == 2\n\n group = management_client.group(group=\"group-test-1\")\n management_client.addDeviceToGroup(group=group, device=did1)\n management_client.addDeviceToGroup(group=group, device=did2)\n assert len(management_client.getGroupDevices(\"group-test-1\")) == 2\n assert (\n len(management_client.getGroupDevices(\"group-test-2\", expected_error=True))\n == 0\n )", "def test_delete_device_by_id(self):\n pass", "def test_delete_device_user(self):\n pass", "async def test_device_remove_devices(\n hass: HomeAssistant,\n hass_ws_client: WebSocketGenerator,\n mock_config_entry: MockConfigEntry,\n mock_jellyfin: MagicMock,\n device_registry: dr.DeviceRegistry,\n) -> None:\n assert await async_setup_component(hass, \"config\", {})\n\n mock_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n device_entry = device_registry.async_get_device(\n identifiers={\n (\n DOMAIN,\n \"DEVICE-UUID\",\n )\n },\n )\n assert (\n await remove_device(\n await hass_ws_client(hass), device_entry.id, mock_config_entry.entry_id\n )\n is False\n )\n old_device_entry = device_registry.async_get_or_create(\n config_entry_id=mock_config_entry.entry_id,\n identifiers={(DOMAIN, \"OLD-DEVICE-UUID\")},\n )\n assert (\n await remove_device(\n await hass_ws_client(hass), old_device_entry.id, mock_config_entry.entry_id\n )\n is True\n )", "def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()", "def test_01_factory_reset(self):\n time.sleep(_LOG_CATCH_UP_DELAY)\n start_time = datetime.datetime.now()\n\n self.device.factory_reset()\n self.assertTrue(\n self.device.connected,\n f\"{self.device.name} is offline after factory_reset() execution \"\n \"finished. factory_reset should block until the device comes back \"\n \"online and becomes responsive.\")\n self._verify_no_unexpected_reboots(start_time)", "def fpc_mic_restart_test(**kwargs):\n t.log(\"fpc/mic_restart test stated\")\n cpu_check = kwargs.get('cpu_check', True)\n check_client = kwargs.get('verify_client', True)\n verify_traffic_enable = kwargs.get('verify_traffic', True)\n check_access_route = kwargs.get('check_access_route', True)\n if verify_utils.convert_str_to_num_or_bool(verify_traffic_enable):\n t.log('login clients and verify traffic before FPC/MIC restart')\n unicast_traffic_test(**kwargs)\n device = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)[0])\n router = t.get_handle(device)\n t.log('starting FPC/MIC restart test')\n fpc_pic_dict = {}\n if 'fpc_slot' in kwargs:\n fpcslots = kwargs['fpc_slot']\n if isinstance(fpcslots, list):\n for slot in fpcslots:\n fpc_pic_dict[slot] = ['0', '1']\n else:\n fpc_pic_dict[fpcslots] = ['0', '1']\n else:\n access_list = bbe.get_interfaces(device=device, interfaces=kwargs.get('interface_id', 'access'))\n if not access_list:\n t.log(\"WARN\", \"no interface was picked for test\")\n return\n fpc_pic_dict = {}\n for access in access_list:\n match = re.match(r'.*-(\\d+)/(\\d+)/\\d+', access.interface_pic)\n if match:\n if match.group(1) not in fpc_pic_dict:\n fpc_pic_dict[match.group(1)] = []\n if match.group(2) not in fpc_pic_dict[match.group(1)]:\n fpc_pic_dict[match.group(1)].append(match.group(2))\n t.log('fpc_pic_list is {}'.format(fpc_pic_dict))\n component = kwargs.get('component', 'fpc').lower()\n action = kwargs.get('action', 'restart').lower()\n if component == 'fpc':\n for fpc in fpc_pic_dict:\n if action == 'restart':\n t.log('will restart fpc slot {}'.format(fpc))\n command = 'request chassis fpc restart slot {}'.format(fpc)\n resp = router.cli(command=command).resp\n match = re.match(r'Restart\\s+initiated', resp)\n if match:\n t.log('fpc slot #{} restarted'.format(fpc))\n else:\n raise Exception('fpc slot {} can not be restarted'.format(fpc))\n if action == 'panic':\n t.log('will panic fpc slot {}'.format(fpc))\n resp = router.vty(command='set parser security 10', destination='fpc' + fpc).resp\n if re.search('Security level', resp):\n t.log('enter into vty security mode')\n router.vty(command='test panic', destination='fpc' + fpc, pattern='(.*)')\n t.log(\"waiting 200s for core-dumps to be generated\")\n time.sleep(200)\n resp = router.cli(command='show system core-dumps').resp\n if re.search('core-', resp):\n t.log('fpc core was generated during test panic, will remove it')\n router.cli(command='file delete /var/crash/core-*')\n else:\n raise Exception(\"not able to set vty security mode\")\n if action == 'offon':\n t.log(\"will offline/online fpc slot {}\".format(fpc))\n for item in ['offline', 'online']:\n if not re.match(r'MX(80|80-t|40|40-t|10|10-t|5|5-t)', router.get_model(), re.IGNORECASE):\n command = \"request chassis fpc slot {} {}\".format(fpc, item)\n else:\n command = \"request chassis tfeb {}\".format(item)\n resp = router.cli(command=command).resp\n\n if item == 'offline':\n offline_initiated = re.match(r'Offline\\s+initiated', resp)\n if offline_initiated:\n t.log('fpc slot #{} offline'.format(fpc))\n # gap between fpc offline and online\n time.sleep(10)\n else:\n raise Exception('fpc slot {} failed to be offline'.format(fpc))\n else:\n # online, Sai's test encountered that fpc online request could hit\n # that fpc is in a state that \"FPC x is in transition, try again\"\n online_initiated = False\n for online_retry in range(10):\n mat = re.match(r'Online\\s+initiated', resp)\n if mat:\n t.log('fpc slot #{} online initiated'.format(fpc))\n online_initiated = True\n break\n else:\n time.sleep(60)\n resp = router.cli(command=command).resp\n\n if not online_initiated:\n raise Exception('fpc slot {} failed to be online'.format(fpc))\n\n t.log('fpc slot {} enabled online command, wait for 100s'.format(fpc))\n time.sleep(100)\n\n time.sleep(30)\n base_time = time.time()\n while True:\n resp = router.pyez('get_fpc_information', fpc_slot=fpc).resp\n\n if resp.findtext('fpc/state') == 'Online':\n t.log('fpc slot {} back to online'.format(fpc))\n break\n else:\n time.sleep(10)\n\n if (time.time() - base_time) > 600:\n raise Exception(\"FPC slot {} failed to come back online after 600s\".format(fpc))\n\n if component == 'mic':\n fpc_mic_list = []\n for fpc in fpc_pic_dict:\n for mic in fpc_pic_dict[fpc]:\n fpc_mic_list.append((fpc, mic))\n t.log('fpc_mic list is {}'.format(fpc_mic_list))\n for chosen in fpc_mic_list:\n if action == 'restart':\n for cmds in ['offline', 'online']:\n t.log('will do {} mic {}'.format(cmds, chosen))\n command = 'request chassis mic fpc-slot {} mic-slot {} {}'.format(chosen[0], chosen[1], cmds)\n resp = router.cli(command=command).resp\n if re.search('not support', resp):\n t.log('WARN', '{}'.format(resp))\n break\n pic_slots = ['0', '1']\n if chosen[1] == '1':\n pic_slots = ['2', '3']\n retry = 5\n while True:\n time.sleep(10)\n resp = router.pyez('get_pic_information', slot=chosen[0]).resp\n break_loop = False\n count = 0\n for pic in resp.findall('fpc/pic'):\n if pic.findtext('pic-slot') in pic_slots:\n count += 1\n if pic.findtext('pic-state').lower() == cmds and count == 2:\n t.log('fpc {} mic {} is {} now'.format(chosen[0], chosen[1], cmds))\n break_loop = True\n break\n if break_loop:\n break\n retry = retry - 1\n if retry == 0:\n raise Exception(\"fpc {} mic {} failed to be {}\".format(chosen[0], chosen[1], cmds))\n if action == 'panic':\n for cmds in ['detach', 'attach']:\n command = \"cprod -A fpc{} -c 'set parser security 10'\".format(chosen[0])\n resp = router.shell(command=command).resp\n if not re.search('Security level set to 10', resp):\n t.log('WARN', \"Not supported for command {}\".format(command))\n command = \"cprod -A fpc{} -c 'test mic {} {}'\".format(chosen[0], cmds, chosen[1])\n if cmds == 'attach':\n command = command + ' 0'\n router.shell(command=command)\n if cmds == 'detach':\n pic_state = 'offline'\n if cmds == 'attach':\n pic_state = 'online'\n pic_slots = ['0', '1']\n if chosen[1] == '1':\n pic_slots = ['2', '3']\n retry = 5\n while True:\n time.sleep(10)\n break_loop = False\n resp = router.pyez('get_pic_information', slot=chosen[0]).resp\n count = 0\n for pic in resp.findall('fpc/pic'):\n if pic.findtext('pic-slot') in pic_slots:\n count += 1\n if pic.findtext('pic-state').lower() == pic_state and count == 2:\n t.log('fpc {} mic {} is {} now'.format(chosen[0], chosen[1], cmds))\n break_loop = True\n break\n if break_loop:\n break\n retry = retry - 1\n if retry == 0:\n raise Exception(\"fpc {} mic {} failed to be {}\".format(chosen[0], chosen[1], cmds))\n\n if verify_utils.convert_str_to_num_or_bool(check_client):\n try:\n t.log('verify subscriber count after FPC/PIC test')\n verify_client_count(subs=subs, device_id=device, check_access_route=check_access_route)\n except:\n t.log('subscriber lost during the test, needs rebinding')\n if cpu_check:\n BBEJunosUtil.cpu_settle(cpu_threshold=int(kwargs.get('cpu_process', 30)),\n idle_min=int(kwargs.get('cpu_settle', 75)),\n dead_time=int(kwargs.get('cpu_deadtime', 1200)),\n interval=int(kwargs.get('cpu_interval', 20)))\n #cst_release_clients(**kwargs)\n #cst_start_clients(**kwargs)\n t.log('start login subs after FPC Restart Test')\n if 'subs' in kwargs:\n subs_list = kwargs['subs']\n if not isinstance(subs_list, list):\n subs_list = [subs_list]\n # kwargs.pop('subs')\n else:\n subs_list = bbe.get_subscriber_handles()\n interfaces = kwargs.get('interface_id', 'access')\n subs_interface = bbe.get_subscriber_handles(interface=interfaces)\n for subs in subs_interface:\n if subs not in subs_list:\n subs_interface.remove(subs)\n else:\n subs.stop()\n time.sleep(60)\n subs.abort()\n time.sleep(60)\n try:\n cst_start_clients(restart_unbound_only=True, **kwargs)\n except:\n cst_start_clients(**kwargs)\n\n if verify_utils.convert_str_to_num_or_bool(verify_traffic_enable):\n t.log('verify traffic after FPC/PIC test')\n unicast_traffic_test(**kwargs)", "def test_delete_device_template(self):\n pass", "def test_verify_state_of_a_device():", "def test_create_device1(self):\n pass", "def test_ipam_services_delete(self):\n pass", "def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_restart(self):", "def test_get_device(self):\n pass", "def test_get_device(self):\n pass", "def test_delete_hyperflex_server_firmware_version(self):\n pass", "def test_on_reboot_on(self):\n self.openstack('baremetal node power on {0}'.format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power on', show_prop['power_state'])\n\n self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power on', show_prop['power_state'])", "def test_disconnect_and_still_works(self):\n self.client.ensure_path(\"/services/db/1.1.1.1\")\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"1\"}))\n z = ZkFarmExporter(self.client, \"/services/db\", self.conf)\n z.loop(2, timeout=self.TIMEOUT)\n self.expire_session()\n z.loop(10, timeout=self.TIMEOUT)\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"2\"}))\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"2\"}})", "def test_reset():\n dev = _aws_device(wires=2)\n dev._circuit = CIRCUIT\n dev._task = TASK\n\n dev.reset()\n assert dev.circuit is None\n assert dev.task is None", "def test_redetect(self):\n self.device.close()\n time.sleep(.2)\n new_file_devices_name = os.path.join(self.get_output_dir(),\n \"test_redetect_devices.json\")\n new_file_options_name = os.path.join(self.get_output_dir(),\n \"test_redetect_device_options.json\")\n new_log_file = os.path.join(self.get_output_dir(), \"test_redetect_gdm.txt\")\n\n shutil.copy(self.get_manager().device_file_name, new_file_devices_name)\n shutil.copy(self.get_manager().device_options_file_name,\n new_file_options_name)\n new_manager = gazoo_device.Manager(\n device_file_name=new_file_devices_name,\n device_options_file_name=new_file_options_name,\n log_directory=self.get_output_dir(),\n gdm_log_file=new_log_file)\n try:\n new_manager.redetect(self.device.name, self.get_output_dir())\n finally:\n new_manager.close()\n\n # pylint: disable=protected-access\n self.assertTrue(\n self.device.name in new_manager._devices,\n \"Device was not successfully detected. See test_redetect_gdm.txt and \"\n f\"{self.device.device_type}_detect.txt for more info\")\n old_dict = self.get_manager()._devices[self.device.name][\"persistent\"]\n new_dict = new_manager._devices[self.device.name][\"persistent\"]\n # pylint: enable=protected-access\n\n for name, a_dict in [(\"Old\", old_dict), (\"Detected\", new_dict)]:\n self.logger.info(\"%s configuration:\", name)\n for key, value in a_dict.items():\n self.logger.info(\"\\t%s: %s\", key, value)\n\n missing_props = []\n bad_values = []\n for prop, old_value in old_dict.items():\n if prop in new_dict:\n new_value = new_dict[prop]\n if old_value != new_value:\n bad_values.append(\"{}: {!r} was previously {!r}\".format(\n prop, new_value, old_value))\n else:\n missing_props.append(prop)\n msg = \"\"\n if missing_props:\n msg += \"{} is missing the following previous props: {}.\\n\".format(\n self.device.name, missing_props)\n if bad_values:\n msg += \"{} has the following mismatched values: {}.\".format(\n self.device.name, \", \".join(bad_values))\n\n self.assertFalse(missing_props or bad_values, msg)", "def test_02_sys_vm_start(self):\n\n # 1. verify listHosts has all 'routing' hosts in UP state\n # 2. verify listStoragePools shows all primary storage pools\n # in UP state\n # 3. verify that secondary storage was added successfully\n\n list_hosts_response = list_hosts(\n self.apiclient,\n type='Routing',\n zoneid=self.zone.id,\n podid=self.pod.id\n )\n self.assertEqual(\n isinstance(list_hosts_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n # ListHosts has all 'routing' hosts in UP state\n self.assertNotEqual(\n len(list_hosts_response),\n 0,\n \"Check list host response\"\n )\n for host in list_hosts_response:\n self.assertEqual(\n host.state,\n 'Up',\n \"Check state of routing hosts is Up or not\"\n )\n\n # ListStoragePools shows all primary storage pools in UP state\n list_storage_response = list_storage_pools(\n self.apiclient,\n zoneid=self.zone.id,\n podid=self.pod.id\n )\n self.assertEqual(\n isinstance(list_storage_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertNotEqual(\n len(list_storage_response),\n 0,\n \"Check list storage pools response\"\n )\n\n for primary_storage in list_hosts_response:\n self.assertEqual(\n primary_storage.state,\n 'Up',\n \"Check state of primary storage pools is Up or not\"\n )\n\n # Secondary storage is added successfully\n timeout = self.services[\"timeout\"]\n while True:\n list_hosts_response = list_hosts(\n self.apiclient,\n type='SecondaryStorage',\n zoneid=self.zone.id,\n )\n\n if not isinstance(list_hosts_response, list):\n # Sleep to ensure Secondary storage is Up\n time.sleep(int(self.services[\"sleep\"]))\n timeout = timeout - 1\n elif timeout == 0 or isinstance(list_hosts_response, list):\n break\n \n self.assertEqual(\n isinstance(list_hosts_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n \n self.assertNotEqual(\n len(list_hosts_response),\n 0,\n \"Check list Hosts response\"\n )\n\n host_response = list_hosts_response[0]\n #Check if host is Up and running\n self.assertEqual(\n host_response.state,\n 'Up',\n \"Check state of secondary storage\"\n )\n self.debug(\"Checking SSVM status in zone: %s\" % self.zone.id)\n\n timeout = self.services[\"timeout\"]\n\n while True:\n list_ssvm_response = list_ssvms(\n self.apiclient,\n systemvmtype='secondarystoragevm',\n zoneid=self.zone.id,\n podid=self.pod.id\n )\n if not isinstance(list_ssvm_response, list):\n # Sleep to ensure SSVMs are Up and Running\n time.sleep(int(self.services[\"sleep\"]))\n timeout = timeout - 1\n elif timeout == 0 or isinstance(list_ssvm_response, list):\n break\n \n self.assertEqual(\n isinstance(list_ssvm_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n #Verify SSVM response\n self.assertNotEqual(\n len(list_ssvm_response),\n 0,\n \"Check list System VMs response\"\n )\n\n for ssvm in list_ssvm_response:\n self.assertEqual(\n ssvm.state,\n 'Running',\n \"Check whether state of SSVM is running\"\n )\n return", "def test_reset_db_mounted(db, client, settings, reload_urls):\n reload_urls(settings)\n resp = client.post(\"/__dev/reset-db/\")\n assert resp.status_code == 404\n\n settings.DEVELOPMENT_ENDPOINTS = True\n reload_urls(settings)\n\n resp = client.post(\"/__dev/reset-db/\")\n assert resp.status_code == 200\n assert resp.json()[\"status\"] == \"done\"", "async def test_remove_orphaned_entries_service(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n data = {\n \"lights\": {\n \"1\": {\n \"name\": \"Light 1 name\",\n \"state\": {\"reachable\": True},\n \"type\": \"Light\",\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n \"sensors\": {\n \"1\": {\n \"name\": \"Switch 1\",\n \"type\": \"ZHASwitch\",\n \"state\": {\"buttonevent\": 1000, \"gesture\": 1},\n \"config\": {\"battery\": 100},\n \"uniqueid\": \"00:00:00:00:00:00:00:03-00\",\n },\n },\n }\n with patch.dict(DECONZ_WEB_REQUEST, data):\n config_entry = await setup_deconz_integration(hass, aioclient_mock)\n\n device_registry = dr.async_get(hass)\n device = device_registry.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"123\")},\n )\n\n assert (\n len(\n [\n entry\n for entry in device_registry.devices.values()\n if config_entry.entry_id in entry.config_entries\n ]\n )\n == 5 # Host, gateway, light, switch and orphan\n )\n\n entity_registry = er.async_get(hass)\n entity_registry.async_get_or_create(\n SENSOR_DOMAIN,\n DECONZ_DOMAIN,\n \"12345\",\n suggested_object_id=\"Orphaned sensor\",\n config_entry=config_entry,\n device_id=device.id,\n )\n\n assert (\n len(async_entries_for_config_entry(entity_registry, config_entry.entry_id))\n == 3 # Light, switch battery and orphan\n )\n\n await hass.services.async_call(\n DECONZ_DOMAIN,\n SERVICE_REMOVE_ORPHANED_ENTRIES,\n service_data={CONF_BRIDGE_ID: BRIDGEID},\n )\n await hass.async_block_till_done()\n\n assert (\n len(\n [\n entry\n for entry in device_registry.devices.values()\n if config_entry.entry_id in entry.config_entries\n ]\n )\n == 4 # Host, gateway, light and switch\n )\n\n assert (\n len(async_entries_for_config_entry(entity_registry, config_entry.entry_id))\n == 2 # Light and switch battery\n )", "def test_filter_device(self):\n pass", "def test_off_reboot_on(self):\n self.openstack('baremetal node power off {0}'\n .format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power off', show_prop['power_state'])\n\n self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power on', show_prop['power_state'])", "def test_setup_adds_proper_devices(self, mock_switch, mock_client):\n ports = {\n i: mock.MagicMock(model=model) for i, model in enumerate(mfi.SWITCH_MODELS)\n }\n ports[\"bad\"] = mock.MagicMock(model=\"notaswitch\")\n print(ports[\"bad\"].model)\n mock_client.return_value.get_devices.return_value = [\n mock.MagicMock(ports=ports)\n ]\n assert setup_component(self.hass, switch.DOMAIN, self.GOOD_CONFIG)\n self.hass.block_till_done()\n for ident, port in ports.items():\n if ident != \"bad\":\n mock_switch.assert_any_call(port)\n assert mock.call(ports[\"bad\"], self.hass) not in mock_switch.mock_calls", "async def test_service_refresh_devices(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n config_entry = await setup_deconz_integration(hass, aioclient_mock)\n\n assert len(hass.states.async_all()) == 0\n\n aioclient_mock.clear_requests()\n\n data = {\n \"config\": {},\n \"groups\": {\n \"1\": {\n \"id\": \"Group 1 id\",\n \"name\": \"Group 1 name\",\n \"type\": \"LightGroup\",\n \"state\": {},\n \"action\": {},\n \"scenes\": [{\"id\": \"1\", \"name\": \"Scene 1\"}],\n \"lights\": [\"1\"],\n }\n },\n \"lights\": {\n \"1\": {\n \"name\": \"Light 1 name\",\n \"state\": {\"reachable\": True},\n \"type\": \"Light\",\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n \"sensors\": {\n \"1\": {\n \"name\": \"Sensor 1 name\",\n \"type\": \"ZHALightLevel\",\n \"state\": {\"lightlevel\": 30000, \"dark\": False},\n \"config\": {\"reachable\": True},\n \"uniqueid\": \"00:00:00:00:00:00:00:02-00\",\n }\n },\n }\n\n mock_deconz_request(aioclient_mock, config_entry.data, data)\n\n await hass.services.async_call(\n DECONZ_DOMAIN, SERVICE_DEVICE_REFRESH, service_data={CONF_BRIDGE_ID: BRIDGEID}\n )\n await hass.async_block_till_done()\n\n assert len(hass.states.async_all()) == 5", "def devices_discover_view(request):\n logger.info(\"Restarting device discovery daemon...\")\n supervisor.restart_program('device_discovery')", "def check_device_state(self):", "def test_disconnect(self):\n self.client.ensure_path(\"/services/db/1.1.1.1\")\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"1\"}))\n z = ZkFarmExporter(self.client, \"/services/db\", self.conf)\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"1\"}})\n self.conf.reset_mock()\n self.expire_session()\n z.loop(10, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"1\"}})", "def test_is_running(self, mock_call):\n\t\tmock_call.return_value = False \n\t\tdevice = Device(1, \"testDevice\", \"testDesc\", \"pump\", 1)\n\t\tdm = DeviceManager()\n\t\tresponse = dm.isRunning(device) \n\t\tself.assertEqual(response, False)", "def test_gwservice_listdevices(self, setup_controller):\n resp = setup_controller.request(\"gw\", \"devices\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw list devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)", "def test_connectivity_during_restart_controllers(\n server, public_network, create_floating_ip, port_steps, server_steps,\n os_faults_steps):\n server_port = port_steps.get_port(\n device_owner=stepler_config.PORT_DEVICE_OWNER_SERVER,\n device_id=server.id)\n\n # Create Floating IP\n floating_ip = create_floating_ip(public_network, port=server_port)\n ip = floating_ip['floating_ip_address']\n server_steps.check_server_ip(\n server, ip, timeout=settings.FLOATING_IP_BIND_TIMEOUT)\n\n # Check ping to Floating IP\n for i in range(30):\n server_steps.check_ping_for_ip(\n ip, timeout=settings.PING_SUCCESS_TIMEOUT)\n\n # Get contrail controller nodes\n controlles_fqdns = settings.CONTRAIL_ROLES_DISTRIBUTION[\n settings.ROLE_CONTRAIL_CONTROLLER]\n controllers = os_faults_steps.get_nodes(fqdns=controlles_fqdns)\n\n # Start background ping to Floating IP\n with server_steps.check_ping_loss_context(ip):\n # Restart nodes\n os_faults_steps.poweroff_nodes(controllers)\n os_faults_steps.poweron_nodes(controllers)\n server_steps.check_ping_for_ip(\n ip, timeout=settings.PING_SUCCESS_TIMEOUT)", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_restart(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertTrue(rpc.restart())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call()],\n self.supervisor.supvisors.fsm.on_restart.call_args_list)", "def test_restart_service_should_return_active(self):\n instance_info.dbaas.instances.restart(instance_info.id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n poll_until(result_is_active)", "def test_delete_network(self):\n pass", "def test_poweredUp(self):\n self.assertIdentical(\n IOneTimePadGenerator(self.store),\n self.store.findUnique(AMPConfiguration))", "def test_update_pci_switch(self):\n pass", "def test_controller_switches(self):\n for name in self.our_controllers:\n self.start_controller(name)\n self.assertTrue(self.check_state(name, 'running'), \"{} is starting correctly\".format(name))\n time.sleep(1) # process some update() cycles\n self.stop_controller(name)\n self.assertTrue(self.check_state(name, 'stopped'), \"{} is stopping correctly\".format(name))", "def testFailure():\n run(\"chariot-me\") #Start management-engine without initial deplflag\n egress()", "def test_reboot_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('reboot_machine', {}).get(\n 'machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/reboot'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'reboot_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'stop': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')" ]
[ "0.6643423", "0.6643423", "0.64361143", "0.6349524", "0.63331807", "0.6297916", "0.6297916", "0.620502", "0.6202497", "0.6154049", "0.6146571", "0.6097684", "0.60750765", "0.6045645", "0.6028808", "0.6019024", "0.6019024", "0.5986632", "0.59689575", "0.5868066", "0.58662766", "0.5833081", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5827914", "0.5819075", "0.57978284", "0.57978284", "0.57891625", "0.57815826", "0.57751167", "0.57672065", "0.5763305", "0.5758561", "0.57517296", "0.5745206", "0.57449794", "0.57406116", "0.5713854", "0.5710512", "0.57036", "0.5698913", "0.569533", "0.5685056", "0.5685056", "0.56776553", "0.56625056", "0.565636", "0.56488186", "0.56419694", "0.5629124", "0.56183106", "0.5614668", "0.56139106", "0.5589247", "0.5581711", "0.55791456", "0.5565657", "0.55647266", "0.55602425", "0.55500513", "0.5548483", "0.5538051", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.55329263", "0.5531984", "0.5527585", "0.54842865", "0.5475442", "0.5473598", "0.54670197", "0.5466186", "0.5460592" ]
0.7743327
0
Return longest Unicode IPA prefix of a word
def longest_one_seg_prefix(self, word, normalize=True): if normalize: word = FeatureTable.normalize(word) last_found_length = 0 node = self.seg_trie for pos in range(len(word) + 1): if pos == len(word) or word[pos] not in node: return word[:last_found_length] node = node[word[pos]] if self.TRIE_LEAF_MARKER in node: last_found_length = pos + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def longest_word_length(words):", "def find_longest_word(s):\n return sorted(map(lambda si: (si, len(si)), s.split()), key=lambda item: item[1], reverse=True)[0][0]", "def longest_prefix_length(s, i, j):\n l = 0\n while (i+l < len(s)) and (j+l < len(s)):\n if s[i+l] != s[j+l]:\n break\n l += 1\n return l", "def find_longest_common_prefix(words:list):\n trie = Trie(words)\n\n head = trie.head\n\n prefix = []\n\n while len(head) == 1 and trie.eof not in head:\n key, value = head.popitem()\n prefix.append(key)\n head = value\n \n return \"\".join(prefix)", "def replace_prefix(word, prefix):\r\n length_prefix = len(prefix)\r\n length_word = len(word)\r\n \r\n if length_prefix > length_word:\r\n return prefix\r\n\r\n #print(word[:length_prefix])\r\n word = prefix + word[length_prefix:]\r\n\r\n return word", "def longest_ORF(dna):\n\n # YOUR IMPLEMENTATION HERE", "def longest_common_prefix(strings: list):\n raise NotImplemented", "def get_prefix(word, length):\n if length <= 0:\n return \"\"\n if length <= len(word):\n return word[:length]\n else:\n return word.ljust(length, \"*\")", "def _get_maximum_prefix(self):\n return self.__maximum_prefix", "def longest_word_len(text):\r\n return np.max(np.array([len(word) for word in tokenization(text)]))", "def longest_word(file_name):\n longest = 0\n linenum = 0\n finalnum = 0\n result = ''\n with open(file_name) as file:\n lines = file.readlines()\n for line in lines:\n linenum += 1\n words = line.split()\n for word in words:\n if len(word) > longest:\n longest = len(word)\n result = word\n finalnum = linenum\n if longest == 0:\n return None\n return str(finalnum) + ': ' + result", "def find_longest_common_prefix_reduce(words:list):\n if not words:\n return ''\n \n def common_start(w1, w2):\n shorter = w1 if len(w1) < len(w2) else w2\n for i in range(0, len(shorter)):\n if w1[i] != w2[i]:\n return shorter[:i]\n return shorter\n \n return reduce(common_start, words)", "def findAlphabeticallyLastWord(text):\n return max(text.split(' '))", "def longest_ORF(dna):\n both_strings=find_all_ORFs_both_strands(dna)\n L=max(both_strings,key=len)\n Q=len(L)\n return Q\n\n #save out put of find all orfboth string to some variable", "def longest_word(sentence: str) -> str:\n trimmed = re.compile('[^a-zA-Z0-9 ]').sub('', sentence)\n chunks = trimmed.split(' ')\n longest = 0\n index = -1\n for i, x in enumerate(chunks):\n if len(x) > longest:\n longest = len(x)\n index = i\n\n return chunks[index]", "def longest_ORF_noncoding(dna, num_trials):\n\n # YOUR IMPLEMENTATION HERE", "def longest_ORF(dna):\n l = find_all_ORFs_both_strands(dna)\n longest=''\n if len(l)>=1:\n\t longest =max(l,key=len)\n return longest", "def longest_common_prefix(fst: str, snd: str) -> str:\n bound = 0\n for a, b in zip(fst, snd):\n if a != b:\n break\n bound += 1\n return fst[:bound]", "def longestWord(my_list):\r\n my_max = 0\r\n # find the maximum element\r\n for k in range( len(my_list)):\r\n (lyric, id) = my_list[k]\r\n if len(lyric) > len(my_list[my_max][0]):\r\n my_max = k\r\n max_length = len(my_list[my_max][0])\r\n return max_length", "def theLoveLetterMystery(s):\n mincount = 0\n for i in range(len(s) // 2):\n mincount += abs(ord(s[i]) - ord(s[-1 - i]))\n\n return mincount", "def longestword(word_list):\n\n longest = 0\n\n for word in word_list:\n\n if len(word) > longest:\n longest = len(word)\n\n return longest", "def find_longest_word(words):\n \n # lengths = []\n\n # for word in words: \n # l = len(word)\n # lengths.append(l)\n # result = max(lengths)\n # return result\n\n #ALT SOLUTION ONE LINE \n return len(max(words, key=lambda words: len(words)))", "def too_long_words(word):\n\n # If work is longer than 10 letters, print the word according to these rules\n if len(word) > 10:\n print word[0] + str(len(word[1:-1])) + word[-1]\n\n else:\n print word", "def get_suffix(word, length):\n if length <= 0:\n return \"\"\n if length <= len(word):\n start = len(word) - length\n return word[start:]\n else:\n return word.rjust(length, \"*\")", "def daily1(word):\n if len(word) <= 2:\n return f'{word} to za krótkie słowo.'\n else:\n return word[::-1]", "def longestAwesome(self, s: str) -> int:\n\n # So we are moving right, and reducing length by 1\n # for every time we move right - we start from the longest substring that can be formed to lowest one\n # So the moment, we find something we can instantly breal\n\n max_length = 0\n\n if s == s[::-1]:\n return len(s)\n\n for i in range(0, len(s)):\n left = i\n right = len(s)\n\n if right - left > max_length:\n\n while right > left:\n\n candidate = s[left:right]\n # print(f\"The candidate is: {candidate}\")\n ctr = Counter(candidate)\n\n # initial base check\n odd_cnt = 0\n fl = False\n for k, v in ctr.items():\n if v & 1:\n odd_cnt += 1\n if odd_cnt > 1:\n fl = True\n break\n\n if not fl:\n if max_length < (right - left):\n max_length = right - left\n # max_length = max(max_length, len(candidate))\n\n right -= 1\n\n return max_length", "def get_max_word_length(self, word_dict):\n max_len = 0\n max_word = \"\"\n for word in word_dict:\n word = \"^\" + word + \"$\"\n if len(word) > max_len:\n max_len = len(word)\n max_word = word\n print(\"Longest word: \" + max_word + \" \" + str(max_len))\n return max_len", "def longest_ORF(dna):\n orfs = find_all_ORFs_both_strands(dna)\n maxorf =orfs[1];\n for s in orfs:\n if len(s)>len(maxorf):\n maxorf=s\n return maxorf", "def get_lcp(s,sa):\n lcp = list()\n lcp.append(0)\n for i in range(1,len(sa)):\n lcp.append( longest_prefix_length(s, sa[i], sa[i-1]) )\n return lcp", "def longest_ORF(dna):\n l = find_all_ORFs_both_strands(dna)\n max_len = 0\n r = \"\"\n # [???] what if there are ORFs with same length?\n # this function just get the first one.\n if len(l) == 0:\n return \"\"\n for o in l:\n if len(o) > max_len:\n r = o\n max_len = len(o)\n return r", "def shn_abbreviate(word, size=48):\n\n if word:\n if (len(word) > size):\n word = \"%s...\" % word[:size - 4]\n else:\n return word\n else:\n return word", "def longest_word(text):\r\n try:\r\n # replace special characters in the input sentence\r\n text = re.sub('[^A-Za-z0-9]+', ' ', text).lower()\r\n\r\n # split the text by spaces and put it in a list\r\n text_list = list(set(text.split(' ')))\r\n\r\n # remove blanks from list\r\n if \"\" in text_list:\r\n text_list.remove(\"\")\r\n\r\n # find longest word's length & longest word\r\n matches = []\r\n if len(text_list) != 0:\r\n text_list.sort(key=len, reverse=True)\r\n x = len(text_list[0])\r\n for i in text_list:\r\n if len(i) == x:\r\n matches.append(i)\r\n else:\r\n break\r\n matches.sort()\r\n return (\"Longest word/s: \" + str(matches) + \" & its length is: \" + str(x))\r\n else:\r\n return None\r\n except Exception as e:\r\n print(\"Following exception occured while finding longest word - {}\".format(e))\r\n return None", "def find_longest_word(list):\n x=len(list[0]) #set x be the first length of word in the list\n for i in range(0,len(list)):\n if x<=len(list[i]):#if the second one is longer than the first\n x=len(list[i])#assign x to the bigger one\n else:\n continue#repeat until find the max number\n return x", "def longest_ORF(dna):\n\n\n\n\n #use the find_all_orfs_both_strands function and then compute the max length of these\n ORFs = find_all_ORFs_both_strands(dna)\n return max(ORFs,key=len)", "def longest_ORF(dna):\n ORFs = find_all_ORFs_both_strands(dna)\n longest = ''\n for ORF in ORFs:\n if len(ORF) > len(longest):\n longest = ORF\n return longest", "def most_repeating_word(words):\n return max(words, key=most_repeating_letter_count)", "def part2(data: str = None) -> int:\n polymer: str = getpolymer(data)\n minlength: int = len(polymer)\n for asciichar in range(ord(\"a\"), ord(\"z\") + 1):\n polymermut = re.sub(chr(asciichar), \"\", polymer, flags=re.IGNORECASE)\n minlength = min(minlength, len(react(polymermut)))\n return minlength", "def abbreviator(max_length):\n \n def abbreviate(text):\n if len(text) <= max_length:\n return text\n else:\n return text[: max_length - 3] + \"...\"\n\n return abbreviate", "def longest_word_length(words):\n\n longest = len(word[0])\n\n for word in words:\n if longest < len(word):\n longest = len(word)\n\n return longest", "def strnextling(prefix):\n if not prefix:\n ## all strings start with the null string,\n ## therefore we have to approximate strnextling('')\n ## with the last unicode character supported by python\n ## 0x10ffff for wide (32-bit unicode) python builds\n ## 0x00ffff for narrow (16-bit unicode) python builds\n ## We will not autodetect. 0xffff is safe enough.\n return unichr(0xffff)\n s = prefix[:-1]\n c = ord(prefix[-1])\n if c >= 0xffff:\n raise RuntimeError\n s += unichr(c + 1)\n return s", "def keyword_length(text):\n text = scrub_string(text)\n a = [fabs(IC(text, ncol) - ENGLISH_IC) for ncol in range(1, MAX_LEN)]\n return a.index(min(a)) + 1", "def longest_token(corpus):\n token_length = max([(len(x), x) for x in corpus])\n long_token = [(len(i), i) for i in corpus if len(i) == token_length[0]]\n return long_token", "def lengthof_lastword(s):\n a = s.split()\n if a:\n return len(a[len(a)-1])\n return 0", "def longest_ORF(dna):\n orfs = find_all_ORFs_both_strands(dna)\n longest = max(orfs, key=len)\n return longest", "def correction(word):\r\n return max(candidates(word), key=P)", "def common_prefix_length(s, u):\n length = 0\n for cs, cu in zip(s, u):\n if cs != cu:\n break\n length += 1\n return length", "def adj_lemma(word):\n if word.endswith(\"er\"):\n return word[:-2].lower()\n elif word != (\"best\") and word.endswith(\"est\"):\n return word[:-3].lower()\n else:\n return word.lower()", "def longest_increasing_suffix(n):\n m, suffix, k = 10, 0, 1\n while n:\n n, last = n // 10, n % 10\n if remainder // 10 < last:\n m, suffix, k = _____________, last, 10 * k\n else:\n return suffix\n return suffix", "def strprevling(prefix):\n if not prefix:\n ## There is no prevling for the null string\n return prefix\n s = prefix[:-1]\n c = ord(prefix[-1])\n if c > 0:\n s += unichr(c - 1) + unichr(0xffff)\n return s", "def _model_string_maxlen():\n # hardcoded for convenience. Could be dynamically set in future.\n # the current longest is: BLOSUM62+I+G+X, i.e. 14 chars.\n # so we just over double it, for safety\n\n return 30", "def length_of_longest_substring(s):\n def compare():\n return max(cur_length, longest_substring)\n\n longest_substring = 0\n cur_substring = \"\"\n for c in s:\n if c in cur_substring:\n cur_length = len(cur_substring)\n longest_substring = compare()\n cur_substring = c\n else:\n cur_substring += c\n return compare()", "def longestCommonPrefix(self, strs: List[str]) -> str:\r\n common = \"\"\r\n if not strs:\r\n return common\r\n shortest_str = min(strs, key=len)\r\n for i in range(len(shortest_str)):\r\n char = shortest_str[i]\r\n for item in strs:\r\n if item[i] != char:\r\n return common\r\n common += char\r\n return common", "def computeMaxWordLength(text):\n return max(text.split(), key=getWordKey) # usually key argument is a function defined by 'def' or 'lambda'", "def longest_palindromic_substring(s):\n longest = s[0] if len(s) > 0 else \"\"\n for i in range(len(s)):\n j = len(s)\n while s[i] in s[i+1:j] and j <= len(s):\n j = s[i + 1:j].rfind(s[i]) + i + 2\n print(i, j)\n if is_palindrome(s[i:j]) and len(longest) < len(s[i:j]):\n longest = s[i:j]\n j = len(s) + 1\n else:\n j -= 1\n if len(s) - len(longest) <= i:\n break\n return longest", "def longestPalindrome(self, s: str) -> str:\n # Basic validations\n self.validator.validate_word_length(s)\n self.validator.validate_word_char_types(s)\n\n start = 0\n end = 0\n for i in range(len(s)):\n left_pos = self.expand_around_center(s, i, i)\n right_pos = self.expand_around_center(s, i, i + 1)\n length = max(left_pos, right_pos)\n if length > (end - start):\n start = i - (length - 1) // 2\n end = i + (length // 2)\n\n max_palindrome = list(s)[start : end + 1]\n return ''.join(max_palindrome)", "def non_repeating_substring(str1: str) -> int:\n max_length = 0\n seen = {}\n window_start = 0\n for window_end in range(len(str1)):\n right_char = str1[window_end]\n if right_char in seen:\n window_start = max(window_start, seen[right_char] + 1)\n seen[right_char] = window_end\n max_length = max(max_length, window_end - window_start + 1)\n return max_length", "def reduce_base(word: str) -> str:\n porter = PorterStemmer()\n return porter.stem(word)", "def last(word):\n\treturn word[-1]", "def match_pfx(uni_word, morphs):\n uni_morph = unicode(morphs[0].lex, 'UTF-8')\n if uni_word.startswith(uni_morph): # just one morpheme starts with word\n return len(uni_morph), 1\n if len(morphs) == 1: # only one morpheme is left\n morph_dec = decompose(morphs[0].lex)\n word_dec = decompose(uni_word)\n if word_dec == morph_dec:\n return 1, len(uni_word)\n else:\n return -1, -1\n for i in range(2, len(morphs)+1):\n submorphs = ''.join([morph.lex for morph in morphs[:i]])\n submorphs_dec = decompose(submorphs)\n for k in range(1, len(unicode(submorphs, 'UTF-8'))):\n word_dec = decompose(uni_word[:k])\n # logging.debug(' %s(%s:%s) <- %s(%s:%s)', uni_word[:k].encode('UTF-8'), word_dec, to_hex(word_dec),\n # submorphs, submorphs_dec, to_hex(submorphs_dec))\n if word_dec == submorphs_dec:\n return k, i\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n logging.debug('PFX: %s(%s): %s', uni_word.encode('UTF-8'), decompose(uni_word), morphs_str)\n return -1, -1", "def FindLongestWord(data):\r\n if not data:\r\n return \"\", 0\r\n longest = 0\r\n longest_word = \"\"\r\n words = re.split(\"[\\s,\\n,\\r]\", data)\r\n if words:\r\n for word in words:\r\n length = len(word)\r\n if length > longest:\r\n longest = length\r\n longest_word = word\r\n return longest", "def longest_ORF_noncoding(dna, num_trials):\n longest = ''\n lenl = 0\n for i in range(num_trials):\n shuffle = shuffle_string(dna)\n if len(longest_ORF(shuffle)) > lenl:\n longest = longest_ORF(shuffle)\n lenl = len(longest)\n return len(longest)", "def last(word):\n return word[-1]", "def _get_overlap(word1, word2):\n max_overlap = min(len(word1), len(word2))\n max_found = 0\n for size in range(1, max_overlap+1):\n suffix = word1[-size:]\n prefix = word2[:size]\n if suffix == prefix:\n max_found = size\n return max_found", "def middle(word):\n return word[1:-1]", "def getMaxKey(self):\n if len(self.word_to_freq) == 0:\n return \"\"\n\n tail = self.tail.prev\n while tail is not None:\n if len(tail.words) > 0:\n return next(iter(tail.words))\n else:\n tail = tail.prev\n\n return \"\"", "def compile_word(word):\n \n result = ''\n for i,ltr in enumerate(word):\n result = str(10**(len(word)-i-1)) + '*' + ltr + result\n if i != len(word)-1:\n result = '+' + result\n\n return result", "def LPSubsequenceLength(str):\n return len(LPSubsequence(str))", "def longest_name():\n def foolen(p): # nothing wrong with having a function inside a function\n return len(p['name'])\n\n return sorted(PEOPLE_LIST, key=foolen, reverse=True)", "def primary_language(x: str) -> str:\n s = x.find(\" \")\n if s > -1:\n x = x[:s]\n return x", "def middle(word):\n\treturn word[1:-1]", "def longest_name():\n def foolen(p): # nothing wrong with having a function inside a function\n return len(p['name'])\n return sorted(PEOPLE_LIST, key=foolen, reverse=True)", "def infer_spaces(s):\n s=s.decode('utf8')\n # Find the best match for the i first characters, assuming cost has\n # been built for the i-1 first characters.\n # Returns a pair (match_cost, match_length).\n def best_match(i):\n candidates = enumerate(reversed(cost[max(0, i-maxword):i]))\n return min((c + wordcost.get(s[i-k-1:i], 9e999), k+1) for k,c in candidates)\n\n # Build the cost array.\n cost = [0]\n for i in range(1,len(s)+1):\n c,k = best_match(i)\n cost.append(c)\n\n # Backtrack to recover the minimal-cost string.\n out = []\n i = len(s)\n while i>0:\n c,k = best_match(i)\n assert c == cost[i]\n out.append(s[i-k:i])\n i -= k\n\n return \" \".join(reversed(out))", "def longest_ORF(dna):\n longest_ORF = []\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# initialize empty list\n longest_ORF = longest_ORF + find_all_ORFs_both_strands(dna)\t\t\t\t\t\t\t\t# adds all ORFs both strands to list\n\n for n in range(len(longest_ORF)):\t\t\t\t\t\t\t\t\t\t\t\t\t\t# searches through the list\n\t if longest_ORF[n] == max(longest_ORF, key=len):\t\t\t\t\t\t\t\t\t\t# finds longest string\n\t \treturn longest_ORF[n]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns longest string", "def get_prefix(self, route):\n bin_nmsk = '.'.join([bin(int(x)+256)[3:] for x in route[NMSK].split('.')])\n return self.len_pref(bin_nmsk)", "def longest_ORF(dna):\n i = 0\n both_ORFs = find_all_ORFs_both_strands(dna)\n longest = both_ORFs[0]\n while i + 1 < len(both_ORFs):\n if len(both_ORFs[i+1]) > len(longest):\n longest = both_ORFs[i+1]\n i+=1\n return longest", "def noun_lemma(word):\n if word.endswith(\"s\"):\n if word.endswith(\"ss\"):\n return word.lower()\n elif word.endswith(\"ies\"):\n return word[:-3].lower() + (\"y\")\n else:\n return word[:-1].lower()\n if word.endswith(\"men\"):\n return word[:-2].lower() + (\"an\")\n else:\n return word.lower()", "def _get_prefix_length(self):\n return self.__prefix_length", "def most_repeating_letter_count(word):\n return Counter(word.lower()).most_common(1)[0][1]", "def lengthOfLastWord(self, s):\n l = len(s)\n c = 0\n i = 1\n while l - i > -1:\n if s[-i] != ' ':\n c += 1\n elif c != 0:\n break\n\n i += 1\n return c", "def longest_ORF_noncoding(dna, num_trials):\n longest=[]\n for i in range(0,num_trials):\n \tshuffled_str=shuffle_string(dna)\n \tlongest.append(longest_ORF(shuffled_str))\n long_ORF=max(longest,key=len)\n return len(long_ORF)", "def computeMaxWordLength(text):\n # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)\n return(max(sorted(text.split(), reverse = True), key = len))\n # END_YOUR_CODE", "def lengthOfLongestSubstring(s):\n arr = [1] * len(s)\n i = 0\n j = 1\n while j < len(s):\n if s[j] not in s[i:j]:\n arr[i] += 1\n j = j + 1\n else:\n i = i + 1\n j = i + 1\n return max(arr)", "def profanity_word_handler(word):\n return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]", "def lengthOfLongestSubstring(self, s):\n longest = 0\n i = 0\n j = 0\n n = len(s)\n seen = {}\n while i < n and j < n:\n c = s[j]\n if c in seen:\n i = seen[c] + 1\n seen[c] = j\n j += 1\n longest = max(longest, j-i)\n\n return longest", "def shorten(string, maxLen, last):\n if len(string) <= maxLen:\n return string\n string = string[:maxLen]\n string = string[::-1]\n found = re.search(re.escape(last), string)\n if found:\n string = string[found.start():]\n string = string[::-1]\n return string", "def getWordKey(word):\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return len(word), word\n # END_YOUR_ANSWER", "def longest_ORF(dna):\n MyList = find_all_ORFs_both_strands(dna)\n if MyList == []:\n maximum_orf = 'none'\n else:\n maximum_orf = max(MyList, key=len)\n return maximum_orf", "def len_of_longest_string(s):\n return len(max(s, key=len))", "def get_experiment_name(hp, prefix_length=-1):\n try:\n removetable = str.maketrans('', '', '_')\n words_org = list(hp.keys())\n words = [s.translate(removetable) for s in words_org]\n words_unique = []\n # print(words)\n ####\n root = Node()\n for word in words:\n root.add_word(word)\n # print the trie\n root.print_trie()\n # for word in words:\n for i in range(len(words)):\n ptr = g_leaf_ptrs.get(words[i])\n unique_prefix = ptr.find_unique_prefix(words[i])\n words_unique.append(unique_prefix)\n name_final = check_name(words[i], unique_prefix, prefix_length)\n\n if words_org[i] in hp.keys():\n hp[name_final] = hp.pop(words_org[i])\n ####\n # print(words_unique)\n # output\n res_final = '_'.join([a + '_' + str(b) for (a, b) in hp.items()])\n # print(res_final)\n # print(hp)\n return res_final\n except:\n print('{} : exception, please handle the error'.format('get_experiment_name'))", "def suffix(sequence, l):\n if l > len(sequence):\n return sequence\n else:\n return sequence[-l:]", "def shortest_word(text):\r\n try:\r\n # replace special characters in the input sentence\r\n text = re.sub('[^A-Za-z0-9]+', ' ', text).lower()\r\n\r\n # split the text by spaces and put it in a list\r\n text_list = list(set(text.split(' ')))\r\n\r\n # remove blanks from list\r\n if \"\" in text_list:\r\n text_list.remove(\"\")\r\n\r\n # find longest word's length & longest word\r\n matches = []\r\n if len(text_list) != 0:\r\n text_list.sort(key=len)\r\n x = len(text_list[0])\r\n for i in text_list:\r\n if len(i) == x:\r\n matches.append(i)\r\n else:\r\n break\r\n matches.sort()\r\n return (\"Shortest word/s: \" + str(matches) + \" & its length is: \" + str(x))\r\n else:\r\n return None\r\n except Exception as e:\r\n print(\"Following exception occured while finding shortest word - {}\".format(e))\r\n return None", "def checkio_best(text):\n text = text.lower()\n # text.count为函数,返回指定char的数量\n return max(string.ascii_lowercase, key=text.count)", "def normalize(w):\n\n nfkd = unicodedata.normalize('NFKD', w)\n return ''.join(x for x in nfkd if unicodedata.category(x)[0] == 'L').lower()", "def length_of_longest_substring(arr, k):\n window_start = 0\n max_repeat_times = 0\n frequency_map = {0: 0, 1: 0}\n len_longest = 0\n\n for window_end in range(len(arr)):\n right_char = arr[window_end]\n left_char = arr[window_start]\n frequency_map[right_char] += 1\n max_repeat_times = frequency_map[0]\n\n if max_repeat_times > k:\n frequency_map[left_char] -= 1\n window_start += 1\n len_longest = max(len_longest, window_end - window_start + 1)\n\n return len_longest", "def get_longest_word(lettersGrid, dictionary):\n size = len(lettersGrid)\n maxLen = size*size\n # pruning: filter impossible words in the dictionary\n newDict = filterDict(lettersGrid, dictionary, maxLen)\n\n longestLen = 0\n longestPath = []\n for i in range(size):\n for j in range(size): \n startCoord = (i,j)\n startPath = [startCoord]\n startStack = [(startCoord, startPath)]\n # find longest path with each start cell, and update longest\n # length and path\n currLen, currPath = dfs_on_grid(startStack, size, maxLen,\n lettersGrid, newDict)\n if currLen == maxLen:\n return path_to_word(currPath, lettersGrid)\n if currLen > longestLen:\n longestLen = currLen\n longestPath = currPath\n\n # convert coordinates to letters before returning\n return path_to_word(longestPath, lettersGrid)", "def greco_latin_plural_noun(base_token=None):\n\n output_string = \"\"\n if base_token is not None:\n if base_token.endswith(\"us\"):\n output_string = base_token[:-2] + \"i\"\n elif base_token.endswith(\"ma\"):\n output_string = base_token + \"ta\"\n elif base_token.endswith(\"a\"):\n output_string = base_token[:-1] + \"ae\"\n elif base_token.endswith((\"on\", \"um\")):\n output_string = base_token[:-2] + \"a\"\n elif base_token.endswith(\"sis\"):\n output_string = base_token[:-3] + \"ses\"\n elif base_token.endswith(\"is\"):\n output_string = base_token[:-2] + \"ides\"\n elif base_token.endswith(\"men\"):\n output_string = base_token[:-3] + \"mina\"\n elif base_token.endswith(\"ex\"):\n output_string = base_token[:-2] + \"ices\"\n elif base_token.endswith(\"x\"):\n output_string = base_token[:-1] + \"ces\"\n\n return output_string", "def longest_ORF_noncoding(dna, num_trials):\n org = list(dna)\n max_len = 0\n r = \"\"\n for i in range(num_trials):\n l = org\n shuffle(l)\n orf = longest_ORF(collapse(l))\n if len(orf) > max_len:\n r = orf\n max_len = len(orf)\n return r", "def infer_spaces(s):\n\n\t# Find the best match for the i first characters, assuming cost has\n\t# been built for the i-1 first characters.\n\t# Returns a pair (match_cost, match_length).\n\tdef best_match(i):\n\t\tcandidates = enumerate(reversed(cost[max(0, i-maxword):i]))\n\t\treturn min((c + wordcost.get(s[i-k-1:i], 9e999), k+1) for k,c in candidates)\n\n\t# Build the cost array.\n\tcost = [0]\n\tfor i in range(1,len(s)+1):\n\t\tc,k = best_match(i)\n\t\tcost.append(c)\n\n\t# Backtrack to recover the minimal-cost string.\n\tout = []\n\ti = len(s)\n\twhile i>0:\n\t\tc,k = best_match(i)\n\t\tassert c == cost[i]\n\t\tout.append(s[i-k:i])\n\t\ti -= k\n\n\treturn \" \".join(reversed(out))", "def longest(my_list):\r\n\treturn sorted(my_list, key=len)[-1]", "def infer_spaces(s):\n global unfolded\n if s in unfolded:\n return unfolded[s]\n\n # Find the best match for the i first characters, assuming cost has\n # been built for the i-1 first characters.\n # Returns a pair (match_cost, match_length).\n def best_match(i):\n candidates = enumerate(reversed(cost[max(0, i-maxword):i]))\n return min((c + wordcost.get(s[i-k-1:i], 9e999), k+1) for k,c in candidates)\n\n # Build the cost array.\n cost = [0]\n for i in range(1,len(s)+1):\n c,k = best_match(i)\n cost.append(c)\n\n # Backtrack to recover the minimal-cost string.\n out = []\n i = len(s)\n while i>0:\n c,k = best_match(i)\n assert c == cost[i]\n out.append(s[i-k:i])\n i -= k\n \n unfolded[s] = ' '.join(reversed(out))\n return ' '.join(reversed(out))" ]
[ "0.7097997", "0.676997", "0.6517193", "0.6514086", "0.6458476", "0.6410171", "0.63811177", "0.6330309", "0.63036937", "0.6231064", "0.62217957", "0.61553735", "0.6102581", "0.60974884", "0.6072236", "0.6067233", "0.6035597", "0.60004765", "0.5977789", "0.5977576", "0.59734416", "0.5966286", "0.59598714", "0.5953818", "0.59462494", "0.59275156", "0.592631", "0.59169775", "0.59166175", "0.5914048", "0.5901358", "0.5897874", "0.5877523", "0.58690417", "0.58635765", "0.58267164", "0.58247805", "0.5822815", "0.5806859", "0.57991594", "0.5797139", "0.57591397", "0.57580256", "0.57457787", "0.5740324", "0.5727421", "0.57227993", "0.5718411", "0.5711673", "0.5703893", "0.5696896", "0.5682894", "0.5671094", "0.5652412", "0.56285053", "0.5617664", "0.56122047", "0.5610708", "0.55911905", "0.5586089", "0.55847555", "0.5577312", "0.5571474", "0.5567591", "0.55612934", "0.55505866", "0.5549273", "0.5530213", "0.55268085", "0.55262905", "0.5524817", "0.55132806", "0.5511939", "0.5509091", "0.55088025", "0.550693", "0.550584", "0.55004644", "0.5500036", "0.54838693", "0.548224", "0.54749584", "0.5473807", "0.547337", "0.5472679", "0.54689395", "0.5463504", "0.54608077", "0.5456108", "0.54555196", "0.5453713", "0.54388356", "0.54273707", "0.5418953", "0.5417836", "0.5414747", "0.541413", "0.5401764", "0.54015404", "0.5400797" ]
0.66873753
2
Returns a list of segments from a word
def ipa_segs(self, word, normalize=True): if normalize: word = FeatureTable.normalize(word) return self._segs(word, include_invalid=False, normalize=normalize)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def segment(text: str) -> List[str]:\n\n if not text or not isinstance(text, str):\n return []\n\n return _cut_subword(_cut_etcc.word_tokenize(text))", "def segment(text, WORDS) -> List[Word]:\n Pword = Bag(WORDS)\n if not text: \n return []\n else:\n candidates = ([first] + segment(rest, WORDS)\n for (first, rest) in splits(text, 1))\n return max(candidates, key=lambda x: Pwords(x, Pword))", "def find_segment(bv: binaryninja.binaryview.BinaryView, name: str) -> List[Tuple[int, int]]:\n result = []\n for sn in bv.sections:\n sec = bv.get_section_by_name(sn)\n if sec.name == name:\n result.append((sec.start, sec.end))\n return result", "def segment(text: str, model: str = \"attacut-sc\") -> List[str]:\n if not text or not isinstance(text, str):\n return []\n\n _tokenizer = AttacutTokenizer(model)\n\n return _tokenizer.tokenize(text)", "def get_segments(input_path):\n with open(input_path, 'r') as segments_file:\n segments = []\n for line in segments_file:\n words = line.split('\\t')\n sg_dict = {}\n sg_dict['start'] = float(words[0].replace(',', '.'))\n sg_dict['end'] = float(words[1].replace(',', '.'))\n sg_dict['class'] = words[2][:-1]\n segments.append(sg_dict)\n return segments", "def segment(text):\n if not text: return []\n candidates = ([first]+segment(rest) for first,rest in splits(text))\n return max(candidates, key=Pwords)", "def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents", "def segment_words(self, string):\n words = []\n\n word_begin = 0\n while word_begin < len(string):\n word_options = self.find_prefixes(string[word_begin:])\n if len(word_options) > 0:\n best_word = self.unigram_provider.get_most_frequent_word(word_options)\n else:\n best_word = string[word_begin:word_begin+1]\n words.append(best_word)\n word_begin += len(best_word)\n\n return words", "def slice(self, word):\n # Short words aren't hyphenated.\n if len(word) <= 4:\n return [word]\n # If the word is an exception, get the stored points.\n if word.lower() in self.exceptions:\n points = self.exceptions[word.lower()]\n else:\n work = '.' + word.lower() + '.'\n points = [0] * (len(work) + 1)\n for i in range(len(work)):\n t = self.tree\n for c in work[i:]:\n if c in t:\n t = t[c]\n if None in t:\n p = t[None]\n for j in range(len(p)):\n points[i + j] = max(points[i + j], p[j])\n else:\n break\n # No hyphens in the first two chars or the last two.\n points[1] = points[2] = points[-2] = points[-3] = 0\n\n # Examine the points to build the pieces list.\n pieces = ['']\n for c, p in zip(word, points[2:]):\n pieces[-1] += c\n if p % 2:\n pieces.append('')\n return pieces", "def segs_safe(self, word, normalize=True):\n if normalize:\n word = FeatureTable.normalize(word)\n return self._segs(word, include_invalid=True, normalize=normalize)", "def split_word(word):\n return [(word[:i], word[i:]) for i in range(len(word) + 1)]", "def subword_tokenize(self, word: str) -> List[str]:\r\n end_idx = min([len(word), self.ngram_max])\r\n sw_tokens = [self.SOW]\r\n start_idx = 0\r\n\r\n while start_idx < len(word):\r\n subword = word[start_idx:end_idx]\r\n if subword in self.bpe_vocab:\r\n sw_tokens.append(subword)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n elif len(subword) == 1:\r\n sw_tokens.append(self.UNK)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n else:\r\n end_idx -= 1\r\n\r\n sw_tokens.append(self.EOW)\r\n return sw_tokens", "def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))", "def lists_and_segments(self):\n response = self._get(self.uri_for(\"listsandsegments\"))\n return json_to_py(response)", "def sentence_segment(self, doc, candidate_pos, lower):\n sentences = []\n for sent in doc.sents:\n selected_words = []\n for token in sent:\n # Store words only with cadidate POS tag\n if token.pos_ in candidate_pos and token.is_stop is False:\n if lower is True:\n selected_words.append(token.text.lower())\n else:\n selected_words.append(token.text)\n sentences.append(selected_words)\n return sentences", "def sub_words(word):\n sub_words_lst = []\n for i in range(len(word)):\n sub_word = word[:i]+word[i+1:]\n sub_words_lst.append(sub_word)\n return sub_words_lst", "def getWords(speech):\r\n return speech.split()", "def split_at(words, verb):\n if verb in words:\n i = words.index(verb)\n first_half = words[0:i]\n second_half = words[i+1:]\n return [first_half, second_half]\n else:\n return -1", "def getSegments(self) -> List[int]:\n ...", "def _process(self, word: str) -> List[str]:\n # if a blank arrives from splitting, just return an empty list\n if len(word.strip()) == 0:\n return []\n word = self.convert_consonantal_i(word)\n my_word = \" \" + word + \" \"\n letters = list(my_word)\n positions = []\n for dipth in self.diphthongs:\n if dipth in my_word:\n dipth_matcher = re.compile(\"{}\".format(dipth))\n matches = dipth_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n matches = self.kw_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n letters = string_utils.merge_next(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions.clear()\n if not self._contains_vowels(\"\".join(letters)):\n return [\n \"\".join(letters).strip()\n ] # occurs when only 'qu' appears by ellision\n positions = self._starting_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_right(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._starting_consonants_only(letters)\n positions = self._ending_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_left(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._ending_consonants_only(letters)\n positions = self._find_solo_consonant(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_solo_consonant(letters)\n positions = self._find_consonant_cluster(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_consonant_cluster(letters)\n return letters", "def segments(self):\n return self._segments", "def segment_spanish(input_text):\n processed_document = nlp(input_text)\n\n tokens = drop_punctuation_and_numbers([word for word in processed_document])\n\n unique_tokens = set(tokens)\n return list(unique_tokens)", "def sentence_segment(self, doc, candidate_pos, lower,bigrams,trigrams):\n sentences = []\n for sent in doc.sents:\n selected_words = []\n bigram_words=[]\n for token in sent:\n bigram_words.append(token.text)\n # Store words only with cadidate POS tag\n if token.pos_ in candidate_pos and token.is_stop is False:\n if lower is True:\n selected_words.append(token.text.lower())\n else:\n selected_words.append(token.text)\n if bigrams==True:\n for i in range(len(sent)-1):\n if sent[i].pos_ in candidate_pos and sent[i].is_stop is False and sent[i+1].pos_ in candidate_pos and sent[i+1].is_stop is False:\n if lower is True:\n selected_words.append(sent[i].text.lower())\n else:\n selected_words.append(str(sent[i].text+\" \"+sent[i+1].text))\n if trigrams==True:\n for i in range(len(sent)-2):\n if sent[i].pos_ in candidate_pos and sent[i].is_stop is False and sent[i+1].pos_ in candidate_pos and sent[i+1].is_stop is False and sent[i+2].pos_ in candidate_pos and sent[i+2].is_stop is False:\n if lower is True:\n selected_words.append(sent[i].text.lower())\n else:\n selected_words.append(str(sent[i].text+\" \"+sent[i+1].text+\" \"+sent[i+2].text))\n sentences.append(selected_words)\n return sentences", "def twowords(word: str) -> Iterator[List[str]]:\n\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]", "def word_fts(self, word, normalize=True):\n return [self.fts(ipa, False) for ipa in self.ipa_segs(word, normalize)]", "def get_segments(file_name):\n count = 1\n total_num_lines = num_lines_in_file(file_name)\n with open(file_name, 'r') as file_in:\n pre_segment = file_in.readline().split()[0]\n segments = [pre_segment]\n num_lines = []\n for line in file_in:\n line = line.split()\n if line[0].startswith(';;'):\n count += 1\n else:\n if len(line) >= LINE_LEN:\n if line[0] == pre_segment:\n count += 1\n else:\n segments.append(line[0])\n pre_segment = line[0]\n num_lines.append(count)\n count = 1\n else:\n count += 1\n last_num_lines_entry = total_num_lines - sum(num_lines)\n num_lines.append(last_num_lines_entry)\n assert len(segments) == len(num_lines), \"%i != %i\" %(len(segments), len(num_lines))\n return segments, num_lines", "def __init__(self, word_string, feature_table):\n self.word_string = word_string\n self.feature_table = feature_table\n self.segments = [Segment(char, self.feature_table) for char in self.word_string]", "def get_word_list(sentence):\n sentence = space1.sub(r'\\1 \\2', sentence)\n sentence = space2.sub(r\"\\1 \\2\", sentence)\n sentence = space3.split(sentence)\n sentence = \" \".join(sentence)\n wordlist = [i for i in sentence.split()]\n return \" \".join(wordlist)", "def split(text):\n doc = nlp(text)\n sentences = [x.text_with_ws for x in doc.sents]\n return sentences", "def segments(self):\r\n return Segments(self)", "def get_word(self,line):\r\n return line.split('.')", "def segment(self):\n start = self.alignment.matching_function_startpoint(self.idx)\n end = self.alignment.matching_function_endpoint(self.idx)\n return [start, end]", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def segments(self):\n L = len(self.vertices)\n return itertools.chain((self._subset((i,i+1)) for i in range(len(self)-1)),\n (self._subset((L-1,0)),))", "def getSegments(self):\n l = len(self.points)\n return [Segment(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]", "def split_words(self, position=None):\r\n if position is None:\r\n position = self.offset\r\n text = self.source_code[:position]\r\n return re.findall(self.id_regex, text)", "def getSegments(points):\n return _identifyStrokes(points)[1]", "def make_word_list(start, lines, excluded):\r\n words = []\r\n for line in lines:\r\n word = line.rstrip()\r\n if len(word) == len(start):\r\n if (word == start) or (word not in excluded):\r\n words.append(word)\r\n return words", "def split_word_in_all_comps(self, term: str) -> List[str]:\n all_stems = []\n\n words = term.split()\n for word in words:\n stems = self.decompound(word)\n all_stems.extend(stems)\n\n for stem in stems:\n more_stems = self.split_word_in_all_comps(stem)\n all_stems.extend(more_stems)\n\n return all_stems", "def splitInSentence(self,text):\n return self._support.splitInPhrase(text)", "def stokenize(txt, StopWords):\n Tokens = tokenize(txt)\n UnStopped = [t for t in Tokens if t not in StopWords]\n Stokens = [ps.stem(w) for w in UnStopped] # Stokens = Stemmed Tokens, list of all stokens in the txt\n \n return Stokens", "def splits(text, start=0, end=20) -> Tuple[str, str]:\n return [(text[:i], text[i:]) \n for i in range(start, min(len(text), end)+1)]", "def next_word(word, string, start=0, sensitive=True):\n if start in range(0, len(string)):\n ls = SE.full_words(word, string[start:], sensitive)\n if ls:\n return [ls[0][0] + start, ls[0][1] + start]\n return []", "def similar_words(word, morph_model, lm, lm_segmented):\n similar_words = []\n nbest = min(5, len(word))\n segmentations = [morph_model.viterbi_nbest(word, nbest)[i][0]\n for i in range(nbest)]\n for segmented_word in segmentations:\n word_to_search = ''.join(segmented_word[:-1]) #word without last segment\n #word_to_search = max(segmented_word, key=len) #the longest segment \n #(does not work for Finnish as well as for Swedish)\n if word_to_search in lm.vocab:\n if word_to_search not in similar_words:\n similar_words.append(word_to_search)\n \n # If the word without its last morpheme is not found from the\n # vocabulary, search for possible word continuations using the \n # the language model trained on word morphemes\n else:\n possible_continuations = [next_morph[0] for next_morph in\n lm_segmented.counts.__getitem__\n (segmented_word[:-1]).most_common(3)]\n for morph in possible_continuations:\n similar_word = word_to_search + morph\n if similar_word not in similar_words:\n if similar_word in lm.vocab:\n similar_words.append(similar_word)\n return similar_words", "def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs", "def get_sentences(text):\n \n return text.split('.')", "def parse_segments(self):\n segs = self.unixtext.split(\"$$\")\n for seg in segs:\n self.segments.append(TextProductSegment(seg, self))", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def parse_sentence(self, text):\n l = []\n tokens = word_tokenize(text)\n print(tokens)\n skip = 0\n i = -1 # index of token in tokens list\n for token in tokens:\n i += 1\n if skip:\n skip -= 1\n # CORONA TERMS:\n elif token.lower() in corona_words:\n l.append('covid')\n elif is_flag_emoji(token):\n try:\n l.append(flag.ISO3166[flag.dflagize(token)[1:3]])\n except:\n continue\n # HASHTAGS:\n elif token == '#' and i+1 < len(tokens):\n parse_hashtage(tokens[i+1], l, tokens)\n skip += 1\n # TAGS:\n elif token == '@' and i+1 < len(tokens):\n parst_tag(tokens[i+1], l)\n skip = True\n # Size AS A WORD:\n elif token.lower() in sizes.keys():\n l.append(parse_number('1', token))\n elif check_if_term_is_fraction(token):\n if i < len(tokens)-1 and tokens[i+1].lower() in percent:\n l.append(token + '%')\n skip += 1\n else:\n l.append(token)\n # NUMBERS:\n elif isNumber(token):\n token = clean_number(token)\n if (i < len(tokens) - 2) and (tokens[i+1].lower() in sizes.keys()) and (tokens[i+2].lower() in percent):\n l.append(parse_number(token, tokens[i+1]) + '%')\n skip += 2\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in percent:\n l.append(parse_number(token) + '%')\n skip += 1\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in sizes.keys():\n l.append(parse_number(token, tokens[i+1]))\n skip += 1\n elif (i < len(tokens) - 1) and check_if_term_is_fraction(tokens[i+1]):\n l.append(token +' '+ tokens[i+1])\n skip += 1\n else:\n l.append(parse_number(token))\n elif isNumber(token[0:len(token) - 1]) and token[len(token)-1].lower() in sizes:\n tokens.append(token[0:len(token) - 1])\n tokens.append(token[len(token)-1])\n # OTHER TOKENS:\n else:\n cleaning(token, tokens, l)\n\n text_tokens_without_stopwords = [w for w in l if w.lower() not in stop_words]\n print(text_tokens_without_stopwords)\n return text_tokens_without_stopwords", "def list_segment_names(self) -> PagingList[str]:\n return PagingList(self._generate_segment_names, 128)", "def words(self):\n return self.text.split()", "def segment_to_vector(self, seg, normalize=True):\n return self.fts(seg, normalize).strings()", "def lookup(self, word):\n word = word.lower()\n if self.stemmer:\n word = self.stemmer.stem(word)\n \n return [self.documents.get(id, None) for id in self.index.get(word)]", "def tag_words (lx, wds):\n if (wds == []):\n return [[]]\n else:\n tag_first = tag_word (lx, wds[0])\n tag_rest = tag_words (lx, wds[1:])\n return [[fst] + rst for fst in tag_first for rst in tag_rest]", "def get_word_list(file_name):\n\tbook = get_file_text(file_name)\n\tbook = strip_header(book)\n\tbook = strip_punctuation(book)\n\tbook = book.lower()\n\twords = re.split(r'\\s+', book)\n\treturn words", "def get_phrase_list(self, words, length):\n\n if len(words) >= length:\n return [words[i:i+length] for i in range(len(words) - length + 1)]\n else:\n return None", "def get_word_list(file_name):\n file_ = open(file_name, 'r')\n lines = file_.readlines()\n\n start_line = 0\n while lines[start_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n start_line += 1\n\n lines = lines[start_line+1:]\n\n end_line = 0\n while lines[end_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n end_line += 1\n\n lines = lines[:end_line-3]\n\n list_ = ' '.join(lines)\n list_ = str.lower(list_)\n list_ = list_.translate(None, string.punctuation)\n list_ = list_.split()\n\n return list_", "def _segment(self, string: str) -> Generator:\n buff: List = []\n segment_start = 1\n type_: Optional[Types] = None\n for i, line in enumerate(string.split(\"\\n\"), start=1):\n line_type = self._parse_segment_type(line)\n if line_type is not None:\n if type_ is not None:\n yield type_, buff\n segment_start = i + 1\n buff = []\n type_ = line_type\n buff.append((line + \"\\n\", i))\n if buff:\n if type_ is None:\n raise ValueError(\n f\"Most likely missing Var name at \" f\"line {segment_start}\"\n )\n yield type_, buff", "def get_results_from_segmentation(doc_id: int, project_id: int) -> List[dict]:\n session = konfuzio_session()\n\n segmentation_url = get_document_segmentation_details_url(doc_id, project_id, action='segmentation')\n segmentation_result = retry_get(session, segmentation_url)\n segmentation_result = segmentation_result.json()\n\n return segmentation_result", "def label_to_segments(utters, labels):\n segment_list = []\n for i, utterence in enumerate(utters):\n segments = []\n seg = \"\"\n for j, char in enumerate(utterence):\n if labels[i][j] >= 0.5:\n if len(seg) > 0:\n segments.append(seg)\n seg = \"\"\n seg = seg + char\n else:\n seg = seg + char\n if j == (len(utterence) - 1):\n segments.append(seg)\n segment_list.append(segments)\n return segment_list", "def process_sentence(sentence: str) -> list:\r\n return [process_word(word) for word in sentence.split()][:-1]", "def get_all(self):\n return self._segments", "def get_words():\n # words\n words_list = list()\n for i in range(1, 114+1):\n sura = quran.get_sura(i)\n for aya in sura:\n wordsList = aya.split(' ')\n for word in wordsList:\n words_list.append(word)\n\n return words_list", "def _get_segments(img, mean_scale=1000, num_samples=16, return_enough_segments=False):\n # randomly choose the segmentation scale\n scale = np.random.uniform(0.5*mean_scale, 1.5*mean_scale)\n # run heuristic segmentation\n segments = skimage.segmentation.felzenszwalb(img, scale=scale,\n min_size=int(scale))\n # sample a set of segmentations to use; bias toward larger ones\n max_segment = segments.max()\n indices = np.arange(max_segment+1)\n seg_count = np.array([(segments == i).sum()+1 for i in indices])\n p = seg_count/seg_count.sum()\n # try this for error correction?\n if num_samples <= max_segment:\n sampled_indices = np.random.choice(indices, p=p, size=num_samples,\n replace=False)\n else:\n warnings.warn(\"not enough unique segments; sampling WITH replacement\")\n sampled_indices = np.random.choice(indices, size=num_samples, replace=True)\n # build normalized segment occupancy masks for each segment we choose\n seg_tensor = np.stack([(segments == i)/seg_count[i] for i in sampled_indices],\n -1).astype(np.float32)\n\n if return_enough_segments:\n enough_segs = num_samples <= max_segment\n return seg_tensor, enough_segs\n return seg_tensor", "def get_synsets_rt(word: str) -> List:\n return rt.categories(word)", "def generatorToList(generator):\n # segs, postags, nertags\n\n\n '''\n words = []\n i = 0\n while i < len(segs):\n\n seg, postag, nertag = segs[i], postags[i], nertags[i]\n if postag == 'ws':\n currWord = seg\n while (i+1) < len(segs) and postags[i+1] == 'ws':\n currWord += segs[i+1]\n i += 1\n words.append((currWord, 'eng'))\n\n elif nertag == 'O':\n words.append((seg, postag))\n i += 1\n else:\n words.append((seg, nertag))\n i += 1\n return words\n '''\n words = []\n for word, flag in generator:\n words.append([word, flag])\n return words", "def index_word_pairs(word, seq):\n indices = [i for i, x in enumerate(seq) if x == word]\n res = []\n for i in indices:\n res += [(word, i)]\n return res", "def get_segments(cst):\n assert isinstance(cst, ChromStruct)\n\n # create a set of coordinates for the start and end of segments\n segs = np.load(cst.sg_files)['sg']\n end = np.cumsum(segs)\n start = np.concatenate(([0], end[:-1]))\n\n return np.column_stack((start, end)).astype(int)", "def fetch_index(self, word):\n files_ = []\n sents_ = []\n # pull dictionaries specific to the token\n for fname in self._index[word]:\n # preserve filename\n files_.append(fname)\n\n # format tokens for output\n for i, j in self._index[word][fname]:\n s = self._reader.sents(fname)[i] # list\n s[j] = '*' + s[j] + '*'\n sents_.append(' '.join(s))\n\n return (files_, sents_)", "def words(self, word):\n pass", "def segment(data):", "def get_word_pos_list(self, raw_text):\n raw_text = raw_text.strip()\n word_list = []\n pos_list = []\n # pdb.set_trace()\n seg_list = jieba.posseg.cut(raw_text,HMM=False) # 默认是精确模式\n for word, flag in seg_list:\n # remove the punctuation, we will keep punctuation as prosodic boundary\n if word in ['「', '」', '.', '-' , '', ' ', '。' , '—' , '?', ':', '、', '…',';',',',',','!']:\n continue\n word_list.append(word)\n pos_list.append(flag)\n return word_list, pos_list", "def Segments():\n for n in range(ida_segment.get_segm_qty()):\n seg = ida_segment.getnseg(n)\n if seg:\n yield seg.start_ea", "def split_words(text: str) -> typing.Iterable[Expression]:\n for token in re.split(r\"\\s+\", text):\n if token.startswith(\"$\"):\n if \":\" in token:\n # Slot with substitutions\n lhs, rhs = token[1:].split(\":\", maxsplit=1)\n yield SlotReference(text=token, slot_name=lhs, substitution=rhs)\n else:\n # Slot without substitutions\n yield SlotReference(text=token, slot_name=token[1:])\n elif \":\" in token:\n # Word with substitution\n lhs, rhs = token.split(\":\", maxsplit=1)\n yield Word(text=lhs, substitution=rhs)\n else:\n # With without substitution\n yield Word(text=token)", "def get_segments(self, sets=None):\n if sets is None:\n if self.sets is not None:\n sets = self.sets\n else:\n raise ValueError(\"sets and self.sets attributes are None, \\\n you need either to pass an origin argument to get_segments or \\\n to use get_filtration method before\")\n segments = []\n for s in sets:\n if self.epsilon <= s.getRelevance():\n t, a, b = s.getPosition()\n for i, seg in enumerate(segments):\n tp, ap, bp = seg\n if t >= tp and bp > a:\n bp = a\n elif t <= tp and ap < b:\n ap = b\n segments[i] = (tp, ap, bp)\n segments.append((t, a, b))\n return segments", "def text_to_parts(text: str) -> list:\n parts = []\n first_block_start, first_block_end, typee = find_first_block(text)\n parts.append(text[first_block_start : first_block_end + 1])\n if len(text) == first_block_end + 1:\n return [text]\n parts.append(text[first_block_end + 1])\n parts += text_to_parts(text[first_block_end + 2 : ])\n return parts", "def full_words(word, string, sensitive=True):\n temp_word = ''\n o = []\n start = 0\n if not sensitive:\n word = word.lower()\n string = string.lower()\n for i, char in enumerate(string):\n if char != ' ':\n temp_word += char\n if i == 0:\n start = 0\n else:\n if string[i - 1] == ' ':\n start = i\n if i == len(string) - 1:\n if temp_word == word:\n o.append([start, start + len(word)])\n else:\n if temp_word == word:\n o.append([start, start + len(word)])\n temp_word = ''\n return o", "def extract_segments(results):\n tt = [ ( parse_date(x[\"t1\"]), parse_date(x[\"t2\"]) ) for x in results[\"labels\"]+results[\"detected\"] ]\n ts = sorted(itertools.chain.from_iterable( tt ))\n t1 = parse_date(results[\"t1\"])\n if t1 < ts[0]:\n ts.insert(0, t1)\n t2 = parse_date(results[\"t2\"])\n if t2 > ts[-1]:\n ts.append(t2)\n return [ dict(t1=x[0].isoformat(), t2=x[1].isoformat()) for x in list(sliding_window(ts, 2)) ]", "def loadStopWordList(swFile):\n f = open(swFile, 'r')\n lines = f.readlines()\n f.close()\n result = list()\n for line in lines:\n sWord = line.strip('\\n')\n result.append(sWord)\n return result", "def getTokens(self):\n list = []\n for i in range(self.startIdx, self.endIdx + 1):\n token = self.sentence[i]\n list.append(token)\n return list", "def txt_to_word_list(text):\r\n return [w for w in text.split()]", "def get_word_list(file_name):\n # Read the file specified\n f = open(file_name,'r')\n lines = f.readlines()\n \n # Remove header text from lines\n curr_line = 0\n while lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line += 1\n lines = lines[curr_line + 1:]\n\n # Remove footer text from lines\n curr_line = -1\n while lines[curr_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line -= 1\n lines = lines[: curr_line]\n\n # Strip lines into words\n words = []\n for i in range(len(lines)):\n # Remove punctuation\n next_line = lines[i].translate(string.maketrans(\"\",\"\"), string.punctuation)\n next_line = next_line.lower()\n words += next_line.split()\n \n return words", "def get_segmentations(self, aids):\n sseg_list = []\n for aid in aids:\n ann = self.dset.anns[aid]\n coco_sseg = ann.get('segmentation', None)\n if coco_sseg is None:\n sseg = None\n else:\n sseg = kwimage.MultiPolygon.coerce(coco_sseg)\n sseg_list.append(sseg)\n return sseg_list", "def make_a_list_from(stuff):\n words_list = stuff.split(' ')\n return words_list", "def parts_of_speech_tags(self, tokenized_doc):\n return [(token.text, token.pos_) for token in self.parser(\n tokenized_doc)]", "def list_every_word(file_name): #considers file_name is valid\n file = open(file_name,\"r\")\n words = []\n lines = file.readlines()\n for line in lines:\n line = line.strip()\n line = line.split(\" \")\n for word in line:\n words.append(word)\n return words", "def getPrefixesForWord(self, word):\n prefixes = self.word_prefixes.get(word, False)\n if prefixes is not False:\n return prefixes\n prefixes = []\n if word.isalpha():\n boundary = min(5, len(word))\n for i in range(2, boundary):\n prefixes.append(word[:i])\n prefixes = tuple(prefixes)\n self.word_prefixes[word] = prefixes\n return prefixes", "def span(self, raw):\n if raw.lower() in ('*', '-', 'all'):\n raw = ':'\n results = self.spanpattern.search(raw)\n if not results:\n raise IndexError\n if not results.group('separator'):\n return [self[self._to_index(results.group('start'))]]\n start = self._to_index(results.group('start')) or 0 # Ensure start is not None\n end = self._to_index(results.group('end'))\n reverse = False\n if end is not None:\n if end < start:\n (start, end) = (end, start)\n reverse = True\n end += 1\n result = self[start:end]\n if reverse:\n result.reverse()\n return result", "def make_a_list_out_of_this(phrase):\n # below splits the phrase, which defaults at space \" \" and returns the individual elements\n # as elements within a list\n return phrase.split()", "def find(self, word):\n\n curr = self.head\n words = []\n # Do we at least contain the whole word?\n for letter in word:\n if letter in curr.children:\n curr = curr.children[letter]\n else:\n return words\n\n queue = [curr]\n\n while len(queue):\n curr = queue.pop()\n\n if \"_end\" in curr.children:\n words.append(curr.data)\n\n queue = [node\n for _, node in\n curr.children.iteritems()] + queue\n\n return words", "def _gen_segments(message):\n max_size = constants.UDP_SAFE_SEGMENT_SIZE\n count = (len(message) + max_size - 1) // max_size\n segments = (\n (count - i - 1, message[i * max_size: (i + 1) * max_size])\n for i in range(count)\n )\n return segments", "def part_of_speech(text):\n temp = nltk.pos_tag(text)\n return [word for word, tag in temp if \n (tag == \"NN\") or \n (tag == \"NNS\") or\n (tag == \"NNP\") or \n (tag == \"NNPS\")]", "def extrachar(word: str) -> Iterator[str]:\n if len(word) < 2:\n return\n\n for i in range(0, len(word)):\n yield word[:i] + word[i+1:]", "def subsentence(sentence, start, end):\r\n \r\n sentence_list=re.split(' ', sentence)\r\n return sentence_list[start:end]" ]
[ "0.7337489", "0.685515", "0.6529718", "0.6501038", "0.6476964", "0.6460025", "0.6404536", "0.63770807", "0.6358619", "0.6294323", "0.6254505", "0.6179965", "0.6169267", "0.61554486", "0.6085695", "0.6055628", "0.60535055", "0.60130686", "0.59985685", "0.5980995", "0.59705424", "0.5944051", "0.587978", "0.58674085", "0.5838219", "0.58184654", "0.5812262", "0.5794091", "0.5744386", "0.5713731", "0.5709779", "0.57096773", "0.56741655", "0.5667663", "0.56608886", "0.56608886", "0.56442314", "0.5617376", "0.5611592", "0.55925983", "0.5587635", "0.55498344", "0.553783", "0.5537318", "0.55368924", "0.5529799", "0.5528228", "0.55217475", "0.55146", "0.5507301", "0.5494833", "0.5494833", "0.5494833", "0.5494833", "0.54933465", "0.5474125", "0.54735506", "0.54414177", "0.54388523", "0.54367405", "0.5427593", "0.54133564", "0.5410876", "0.5407874", "0.54052097", "0.5400774", "0.53981626", "0.53909224", "0.5387064", "0.5386131", "0.5383627", "0.53818256", "0.53771365", "0.53653073", "0.5364933", "0.53597665", "0.53464323", "0.534414", "0.5343429", "0.5338928", "0.5337976", "0.5336225", "0.5328012", "0.5312401", "0.53104496", "0.53085375", "0.5305295", "0.53032774", "0.5302712", "0.5296613", "0.5295306", "0.5294636", "0.52903926", "0.52898514", "0.5285309", "0.5273177", "0.5270074", "0.5269049", "0.5266453", "0.52651536" ]
0.63645136
8
Returns True if `word` consists exhaustively of valid IPA segments
def validate_word(self, word, normalize=True): return not self._segs(word, include_valid=False, include_invalid=True, normalize=normalize)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_word(self, word):\n first_letter, rest = word[0], word[1:]\n\n for possible_start in self._find_letter(first_letter):\n if self._check_word(possible_start, rest):\n return True\n\n return False", "def is_valid(text):\n return is_all_word_segment_in_text(WORDS, text)", "def isShortWord(self, word):\n return len(word) < 4 and word not in self.valid_short_words", "def isWordPartOf(self,word,wordlist):\n\t\tfor w in wordlist:\n\t\t\tif w in self._part_of_badword: \n\t\t\t\treturn True \t \n\t\t\t\tif w.startswith(word) or w.endswith(word):\n\t\t\t\t\tself._part_of_badword[w] = True \n\t\t\t\t\treturn True\n\t\treturn False", "def valid(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(series_of_words.pop())\n for word in series_of_words:\n if word in words:\n return False\n words.append(word)\n return True", "def __contains__(self, word):\n if word in self.vocab:\n return True\n else:\n char_ngrams = compute_ngrams(word, self.min_n, self.max_n)\n return any(ng in self.ngrams for ng in char_ngrams)", "def checkWord(self, word):\n toFill = int(tapeLimit - len(word) / 2)\n tape = [self.blank] * toFill + list(word) + [self.blank] * toFill\n\n state = self.initialState\n pos = toFill\n\n while state != self.acceptState and state != self.rejectState:\n transition = self.transitions[(state, tape[pos])]\n state = transition[0]\n tape[pos] = transition[1]\n\n if transition[2] == '>':\n pos += 1\n elif transition[2] == '<':\n pos -= 1\n\n return state == self.acceptState", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def is_word(self, sequence: str) -> bool:\n\n for non_terminal in self._non_terminals:\n if non_terminal in sequence:\n return False\n\n return True", "def check(word):\n for i in range(1, target_length):\n if word[0:i] in fragments and word[i:] in fragments:\n print(\"%s + %s => %s\" % (word[0:i], word[i:], word))", "def check_word(self, word):\n\n return self.graph.is_in(word)", "def check_word(self, word):\n word = word.lower().strip()\n return not word or word in self.dictionary", "def is_stop_word(word):\n return word in final_stop_words", "def is_word(self, word):\r\n\r\n return self.data(word) is not None", "def check_words(dictionary_, start_word, stop_word):\n if dictionary_.is_real_word(start_word) is False:\n print(\"Word {} not found in the dictionary\".format(start_word))\n return False\n if dictionary_.is_real_word(stop_word) is False:\n print(\"Word {} not found in the dictionary\".format(stop_word))\n return False\n return True", "def viableWord(self, word, filledInSpaces):\r\n \r\n # Check if it fits blanks\r\n for (index, letter) in filledInSpaces:\r\n if letter != word[index]:\r\n return False\r\n\r\n # Check if it fits unused\r\n for letter in word:\r\n if letter in self.wrongLetters:\r\n return False\r\n\r\n return True", "def contains(self, word: Iterable[Terminal]) -> bool:\n return self._get_final_state(word) is not None", "def check_word(words, word):\r\n if word in words:\r\n return True\r\n else:\r\n return False", "def trifeca(word):\n if not word:\n return False\n\n for i in range(len(word)-1):\n if word[i]==word[i+1]:\n if len(word[i:])>=6:\n if word[i+2:i+6:2]==word[i+3:i+7:2]:\n return True \n return False", "def is_a_word(self, word):\n word = word.lower()\n if word in self.data:\n return True\n else:\n # for char in word:\n # if char.isnumeric():\n # return True\n word = list(word)\n numbers = len([x for x in word if x.isnumeric()])\n # # letters = len([x for x in word if x.isalpha()])\n if numbers >= 2 or numbers/len(word) > 0.4:\n return True\n return False", "def search(self, word: str) -> bool:\n curr_chars = self.chars\n for c in list(word):\n if c not in curr_chars:\n return False\n curr_chars = curr_chars[c]\n return self.end_of_word in curr_chars", "def valid_word(self, word, pos_tag=None):\n return (\n word not in self.stopwords\n and (self.max_word_len > len(word) > self.min_word_len)\n and (pos_tag==None or self.allowed_pos_tags==None or pos_tag not in self.allowed_pos_tags)\n )", "def search(self, word):\n if len(word) not in self.length_set:\n return False\n for i in self.mutate(word):\n if i in self.s:\n return True\n return False", "def accepts(self, word: Iterable[str]) -> bool:\n if self._enfa is None:\n self._enfa = self.to_epsilon_nfa()\n return self._enfa.accepts(word)", "def check(self, s: str, mem: dict):\n dp = [False for _ in range(len(s)+1)]\n dp[0] = True\n for i in range(1, len(s)+1):\n for j in range(i):\n if dp[j] and s[j:i] in mem:\n dp[i] = True\n return dp[-1]", "def search(self, word: str) -> bool:\n tries = [self.trie]\n for c in word:\n if c != '.':\n tries = [\n trie[c] for trie in tries if c in trie\n ]\n else:\n tries = [\n v for trie in tries for v in trie.values() if v\n ]\n\n if not tries:\n return False\n\n return any(None in trie for trie in tries)", "def word_check(word):\n word1 = word[1:]\n if word1 not in word_dict: return False\n if not homophones (word, word1): return False\n \n \n word2 = word[0] + word[2:]\n if word2 not in word_dict: return False\n if not homophones(word, word2): return False\n\n return True", "def checkWord(self, word):\n\t\treturn self.root.checkString(u' ' + word);", "def is_word(trie, string: str) -> bool:\n return any(w == string for w in trie)", "def _check_pauli_word(pauli_word):\n return all(pauli in PauliRot._ALLOWED_CHARACTERS for pauli in set(pauli_word))", "def search(self, word: str) -> bool:\n\n temp = self.start\n\n for i in range(len(word)):\n \n if temp.children[ord(word[i]) - ord('a')] is None:\n return False\n temp = temp.children[ord(word[i])-ord('a')]\n if i+1 == len(word) and temp.end == True:\n return True\n\n return False", "def check_word(word):\r\n if word in word_master:\r\n valid = True\r\n else:\r\n valid = False\r\n return valid", "def hasAnagramPlusOne(word):\n c = AnagramDB.charcount(word)\n ref = np.array(c)\n mDiff = AnagramDB._charmatrix - ref\n gt = np.all(mDiff >= 0, 1)\n sm = np.sum(mDiff, 1) == 1\n return np.any(gt & sm)", "def search(self, word):\n for i in xrange(len(word)):\n w = word[:i] + '*' + word[i+1:]\n if w in self.dict and (len(self.dict[w]) > 1 or word[i] not in self.dict[w]): return True \n return False", "def has_word(self, word)->bool:\n if len(word) == 1:\n chars = word + GNode.CHAR_EOW\n else:\n chars = word[0] + GNode.CHAR_REV + word[1:] + GNode.CHAR_EOW\n cursor = self.root\n for c in chars.lower():\n if c not in cursor.children:\n return False\n else:\n cursor = cursor.children[c]\n return True", "def isUnique(self, word):\n if len(word) < 3:\n abbrev = word\n else:\n abbrev = word[0] + str(len(word) - 2) + word[-1]\n if not abbrev in self.abbrev_dict:\n return True\n elif word in self.abbrev_dict[abbrev] and len(self.abbrev_dict[abbrev]) == 1:\n return True\n else:\n return False", "def avoids(word, forbidden):\n for letter in word:\n if letter in forbidden:\n return False\n return True", "def isWordSet(self):\n return len(self.getWord()) != 0", "def check_word(word):\n\n return bool(re.match(r'^[a-z]+$', word))", "def check(self, word: str) -> bool:\n for s in (word, word.lower(), word.capitalize()):\n if s in self.words or s in self.ignored_words:\n return True\n return False", "def basic_check(word):\n if word[-1] == \"b\" or word[-1] == \"g\":\n return False\n consonant_counter = 0\n for char in word:\n if char in VOWELS:\n consonant_counter = 0\n else:\n consonant_counter += 1\n if consonant_counter >= 3:\n return False\n return True", "def has_word(self, word):\n return word in self.word_set", "def has_word(self, word):\n try:\n iterator = iter(str(word))\n for symbol in iterator:\n if symbol not in self:\n raise Exception(symbol + \" is not in alphabet\")\n except Exception as error:\n print('Alphabet Error:', error)\n return False\n else:\n return True", "def isUnique(self, word):\n if len(word) <= 1:\n n = word\n else:\n n = word[0] + str(len(word) - 2) + word[-1] #Get the abbrviation.\n if n not in self.abbrdict or (self.abbrdict[n] == 1 and word in self.origdict): #If it is not in abbrdict or the abbrevation count is 1 and the word has appeared in dictionary, return true.\n return True\n else: #Otherwise, return false.\n return False", "def isPresent(self, word):\n\t\treturn word in self.link_words", "def can_make_word(word, block_collection=blocks):\n if not word:\n return False\n\n blocks_remaining = block_collection[:]\n for char in word.upper():\n for block in blocks_remaining:\n if char in block:\n blocks_remaining.remove(block)\n break\n else:\n return False\n return True", "def stopword(wstr):\n w = wstr.strip()\n if len(w) < 4:\n return True\n return False", "def hasAnagramPlusTwo(word):\n c = AnagramDB.charcount(word)\n ref = np.array(c)\n mDiff = AnagramDB._charmatrix - ref\n gt = np.all(mDiff >= 0, 1)\n sm = np.sum(mDiff, 1) == 2\n return np.any(gt & sm)", "def search(self, word):\n length = len(word)\n if length not in self.dic:\n return False\n else:\n candidateList = self.dic[length]\n for candidate in candidateList:\n for i in xrange(length):\n if candidate[i] != word[i]:\n if candidate[i+1:] == word[i+1:]:\n return True\n else:\n break\n return False", "def filter1(word):\n if not word: return False\n w = word.lower()\n if w in STOPWORDS: return False\n return True", "def search(self, word):\n if not word:\n return False\n if word[0] not in self.trie:\n return False\n cur = self.trie[word[0]]\n for char in word[1:]:\n if char not in cur.nexts:\n return False\n cur = cur.nexts[char]\n return (cur and cur.isTerm) == True", "def isAmbiguous(self, word):\n\t\treturn word in disambig_const.DISAMBIGUATATION_TABLE;", "def validate(self, word):\n\n return self.valid_word(word)", "def validate(self, word):\n\n return self.valid_word(word)", "def search(self, word):\n length = len(word)\n if length == 1:\n for letter in string.ascii_lowercase:\n key = \"{}/{}\".format(1, letter)\n if key in self.origin and letter != word:\n return True\n return False\n\n key = \"{}/{}\".format(len(word), word[0])\n ls = self.origin.get(key, [])\n if len(ls) == 0:\n return False\n\n for origin in ls:\n if self.only_modify_one_char(word, origin):\n return True\n return False", "def validate_word(word: str) -> bool:\n if word:\n url = f'{OXFORD_DICT_BASE_URL}{OXFORD_DICT_ENTRY_URL}/en-us/{word.lower()}'\n headers = {\n 'app_id': settings.OXFORD_APP_ID,\n 'app_key': settings.OXFORD_API_KEY,\n }\n\n logger.info(f'validating {word} against oxford dictionary...')\n response = requests.get(\n url,\n headers=headers,\n )\n\n if response.status_code == status.HTTP_200_OK:\n return True\n else:\n return False\n\n return False", "def valid_anagram(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(''.join(sorted(series_of_words.pop())))\n for word in series_of_words:\n word = ''.join(sorted(word))\n if word in words:\n return False\n words.append(word)\n return True", "def search(self, word: str) -> bool:\n tmp = self.root\n for i in range(len(word)): \n if word[i] == \".\":\n valid = False\n for nxt in tmp.seq.keys():\n valid = valid or self.search(word[:i] + nxt + word[i+1:])\n \n if valid:\n return True\n return False\n \n if word[i] not in tmp.seq:\n return False \n tmp = tmp.seq[word[i]]\n \n return tmp.value == word", "def is_stop_word(self, word):\n pass", "def is_stopword(self, word, language):", "def is_isogram(word):\n word = [char.lower() for char in word if char.isalpha()]\n for char in word:\n if word.count(char) > 1:\n return False\n return True", "def search(self, word):\n level = self.trie\n for c in word:\n if c in level:\n level = level[c]\n else:\n return False\n return self.end in level", "def check_present_and_add(self, word):\n\n current_node = self.root_node\n is_new_word = False\n\n # iterate through trie adding missing notes\n for char in word:\n if char not in current_node:\n is_new_word = True\n current_node[char] = {}\n current_node = current_node[char]\n \n # mark end of word so that words that are prefixes of present words are not\n # returned - i.e. each word must have an explicit \"End of Word\" marker\n if \"End of Word\" not in current_node:\n is_new_word = True\n current_node[\"End on Word\"] = {}\n\n return is_new_word", "def search(self, word: str) -> bool:\n node = self.root\n for char in word:\n if char not in node:\n return False\n node = node[char]\n return self.end_of_word in node", "def is_abecedarian(word):\n pass", "def read(self, word: str) -> Union[bool, str]:\n start_sets = self._sets_start()\n try:\n casuistic = [self._read(word, i.state) for i in start_sets]\n except ValueError:\n return \"No Has intraducido una cadena valida\"\n return any(casuistic)", "def contains (self,phrase,chars):\r\n\r\n for x in chars:\r\n\r\n if x in phrase:\r\n return True\r\n return False", "def search(self, word: str) -> bool:\n\n # # for candidate in self.buckets[len(word)]:\n # # for a, b in zip(word, candidate):\n # # result = any(sum(a!=b))\n return any(sum(a!=b for a, b in zip(word, candidate)) == 1\n for candidate in self.buckets[len(word)])\n #\n # for candidate in self.buckets[len(word)]:\n # sum = 0\n # for a, b in zip(word, candidate):\n # sum += (a!=b)\n # if sum == 0:\n # return True\n # return False", "def is_legit_peptide_sequence(record_seq: str) -> bool:\n aas = {\n \"A\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"I\",\n \"K\",\n \"L\",\n \"M\",\n \"N\",\n \"P\",\n \"Q\",\n \"R\",\n \"S\",\n \"T\",\n \"V\",\n \"W\",\n \"Y\",\n \"*\",\n }\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(aas)", "def include_word(word, chardict):\n if (all(char in chardict.keys() for char in word)) & (len(word)<=25):\n # Some word2vec entries are all capitals and generally are acronyms.\n # This is unlikely to be learnable\n if not word.isupper():\n return True\n\n return False", "def onlyuse(word, letters):\r\n truth = True\r\n for letter in word:\r\n truth = letter in letters and truth\r\n return truth", "def _is_real_word(self, token):\n return not (token in self._non_words)", "def check_in_dictionary(text):\n # check if text is printable\n if not text.isprintable():\n return False\n\n # if there are all complete words in the text\n if text[-1] == ' ':\n # check if all words exist in the dictionary\n if not words_in_dictionary(text.split()):\n return False\n\n # if the last word is incomplete\n else:\n # check if all words but the last exists in the dictionary\n text = text.split()\n if not words_in_dictionary(text[:-1]):\n return False\n\n # checks if there is any word in the dictionary which starts with the\n # last word in the plaintext\n word = text[-1].lower()\n raw_word = word.replace(\"'\", '').replace('.', '')\n return any(a for a in DICTIONARY_LOWER if a.startswith(word)) or \\\n any(a for a in DICTIONARY_LOWER if a.startswith(raw_word))\n\n return True", "def search(self, word):\n lenw = len(word)\n if lenw not in self.bag: return False\n return any([self.equal_to(word, item) for item in self.bag[lenw]])", "def negative(word: str) -> bool:\n\n negatives = ['no', 'negative', 'nah']\n return negatives.__contains__(word)", "def check_word(self, word):\n\n if not self.words:\n return None\n word = ''.join(word)\n return next((True for w in self.words if w == word), False)", "def is_valid_lemma(self, word):\n # Expression for finding links. Return false if one is found\n # Expression found at https://stackoverflow.com/questions/27515969/regular-expression-validation-php/27516155\n expression = re.compile(r'(http|https|ftp|ftps)\\:\\/\\/[a-zA-Z0-9\\-\\.]+\\.[a-zA-Z]{2,3}(\\/\\S*)?')\n if expression.search(word):\n return False\n # Remove anything but alphanumeric, spaces, and '\n word = re.sub('[^a-zA-Z\\'\\d\\s]', '', word)\n word = word.lower()\n for stop_word in stop_words:\n # If input word matches a stop word, return false\n if stop_word == word:\n return False\n return True", "def search(self, word: str) -> bool:\n m = len(word)\n\n for dict_word in self.dict[m]:\n i = 0\n while i < m:\n if (word[i] == dict_word[i]) or (word[i] == '.'):\n i += 1\n else:\n break\n\n if i == m:\n return True\n\n return False", "def search(self, word: str) -> bool:\n temp=self.root\n \n for char in word:\n if(not temp.children[ord(char)-ord('a')]):\n return False\n temp=temp.children[ord(char)-ord('a')]\n \n if(temp and temp.endOfWord==True):\n return True\n \n return False", "def uses_only(word, available):\n for letter in word:\n if letter not in available:\n return False\n return True", "def contains(self, word: Iterable[Terminal]) -> bool:\n # Remove epsilons\n word = [to_terminal(x) for x in word if x != Epsilon()]\n if not word:\n return self.generate_epsilon()\n cyk_table = CYKTable(self, word)\n return cyk_table.generate_word()", "def validate(self, word):\n\n # Strip unwanted characters\n clean = re.sub(r\"[^a-zA-Z- ]+\", \"\", word).strip().lower()\n if len(clean) <= 1:\n return None # Word too short\n\n # Generate candidates for possible compound words\n # \"valid\" -> [\"valid\"]\n # \"cul de sac\" -> [\"cul-de-sac\", \"culdesac\"]\n # \"top-hat\" -> [\"top-hat\", \"tophat\"]\n candidates = []\n if \" \" in clean:\n candidates.append(re.sub(r\" +\", \"-\", clean))\n candidates.append(re.sub(r\" +\", \"\", clean))\n else:\n candidates.append(clean)\n if \"-\" in clean:\n candidates.append(re.sub(r\"-+\", \"\", clean))\n for cand in candidates:\n if cand in self.vectors:\n return cand # Return first word that is in model\n return None # Could not find valid word", "def check_if_replacable(self, word):\n word_tag = pos_tag([word])\n if 'NN' in word_tag[0][1] or 'JJ' in word_tag[0][1] or 'VB' in word_tag[0][1]:\n return True\n else:\n return False", "def search(self, word):\n for wc in self.get_wildcards(word):\n # Don't forget word not in self.all_words\n if wc in self.wc_dict and (self.wc_dict[wc] > 1 or word not in self.all_words) :\n return True\n return False", "def is_valid(line):\n word_list = line.split()\n init_word_count = len(word_list)\n\n # a set to hold the words\n non_dup_words = set(word_list)\n non_dup_count = len(non_dup_words)\n\n return (init_word_count == non_dup_count)", "def search(self, word: str) -> bool:\r\n node=self.root\r\n for c in word:\r\n if c not in node:\r\n return False\r\n else:\r\n node = node[c]\r\n if self.end_of_words in node:\r\n return True\r\n else:\r\n return False", "def search(self, word):\n if not word:\n return False\n if '.' not in word:\n return word in self.word_dict[len(word)]\n for v in self.word_dict[len(word)]:\n for i, ch in enumerate(word):\n if ch != v[i] and ch != '.':\n break\n else:\n return True\n return False", "def is_valid_word(word, hand, word_list):\n failure=True\n word=word.lower()\n if word not in word_list:\n failure=False\n for i in word:\n w=hand.get(i,0)\n if w==0:\n failure=False\n break\n return failure", "def check_word_format(word):\n pattern = re.compile(\"[a-z]+\")\n if not(1 <= len(word) <= 5):\n print(\"BAD FORMAT: Word must be 1-5 letters long.\")\n return False\n if not(pattern.fullmatch(word)):\n print(\"BAD FORMAT: Word must be only letters (a-z, A-Z)\")\n return False\n return True", "def isValid(text):\n return bool(re.search(r'\\b(start|stop) (look|watch|guard)ing\\b', text, re.IGNORECASE))", "def isExcluded(self, word):\n #print word\n return ((self.isExcludedWord(word) != False) \n or (self.isMeasure(word) != False) \n or (self.isAllDigits(word) != False) \n or (self.isShortWord(word) != False))", "def and_sum (self, phrase):\r\n for x in phrase:\r\n if not x:\r\n return False\r\n return True", "def search(self, word: str) -> bool:\n node = self.head\n for c in word:\n if c not in node.next:\n return False\n node = node.next[c]\n return node.valid", "def isExcludedFromMerge(self, word):\n #print word\n return ((self.isExcludedWord(word) != False) \n or (self.isMeasure(word) != False) \n or (self.isShortWord(word) != False))", "def avoids (word, frbdn_letters):\n for letter in frbdn_letters:\n if letter in word: \n return False\n return True", "def has_letter(word):\r\n for char in word:\r\n if char.isalpha():\r\n return True\r\n return False", "def search(self, word: str) -> bool:\n return self.trie.search(word + '#', self.trie.trie)", "def search(self, word):\r\n t = self.trie\r\n for w in word: \r\n if w not in t: \r\n return False\r\n t = t[w]\r\n if '#' in t:\r\n return True\r\n return False", "def is_unimportant(word):\n return word in ['.', '!', ',', ] or '\\'' in word or word in stop_words", "def search(self, word):\n for candidate in self.candidates(word):\n if self.neighbors[candidate] > 1:\n return True\n elif self.neighbors[candidate] == 1 and word not in self.word_set:\n return True\n return False" ]
[ "0.7004589", "0.6847744", "0.6789878", "0.6735888", "0.6684416", "0.6627616", "0.651143", "0.6505042", "0.6428736", "0.6355093", "0.63382703", "0.63290393", "0.63254887", "0.63188684", "0.631191", "0.6290618", "0.62664974", "0.626073", "0.6243883", "0.62189406", "0.6189003", "0.61609536", "0.6149941", "0.6140094", "0.61359596", "0.6135147", "0.6133048", "0.6120673", "0.61062187", "0.6092864", "0.60831356", "0.6082807", "0.6080772", "0.6063288", "0.6053527", "0.6000352", "0.5992046", "0.59916836", "0.598426", "0.5981388", "0.5973514", "0.59667915", "0.596316", "0.59626865", "0.5934249", "0.5931766", "0.59201634", "0.59014654", "0.5867315", "0.5844014", "0.58267605", "0.5818745", "0.5803575", "0.5803575", "0.58031636", "0.57982147", "0.57942796", "0.5792327", "0.5791508", "0.5784517", "0.57826865", "0.5778396", "0.5764454", "0.57396644", "0.57392603", "0.5731233", "0.5706396", "0.5697645", "0.5697073", "0.56838685", "0.56836677", "0.5679131", "0.5678532", "0.5677395", "0.5673852", "0.56716496", "0.56668335", "0.56667393", "0.56654006", "0.5656836", "0.5649446", "0.56457597", "0.56456906", "0.5643023", "0.5642043", "0.564183", "0.56263894", "0.5613117", "0.560739", "0.55968064", "0.55950236", "0.55933964", "0.559236", "0.55892086", "0.55866015", "0.55865204", "0.558472", "0.5575714", "0.55754054", "0.5569614" ]
0.69290006
1
Return a list of Segment objects corresponding to the segments in word.
def word_fts(self, word, normalize=True): return [self.fts(ipa, False) for ipa in self.ipa_segs(word, normalize)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def segment(text: str) -> List[str]:\n\n if not text or not isinstance(text, str):\n return []\n\n return _cut_subword(_cut_etcc.word_tokenize(text))", "def ipa_segs(self, word, normalize=True):\n if normalize:\n word = FeatureTable.normalize(word)\n return self._segs(word, include_invalid=False, normalize=normalize)", "def get_segments(input_path):\n with open(input_path, 'r') as segments_file:\n segments = []\n for line in segments_file:\n words = line.split('\\t')\n sg_dict = {}\n sg_dict['start'] = float(words[0].replace(',', '.'))\n sg_dict['end'] = float(words[1].replace(',', '.'))\n sg_dict['class'] = words[2][:-1]\n segments.append(sg_dict)\n return segments", "def segment(text, WORDS) -> List[Word]:\n Pword = Bag(WORDS)\n if not text: \n return []\n else:\n candidates = ([first] + segment(rest, WORDS)\n for (first, rest) in splits(text, 1))\n return max(candidates, key=lambda x: Pwords(x, Pword))", "def segs_safe(self, word, normalize=True):\n if normalize:\n word = FeatureTable.normalize(word)\n return self._segs(word, include_invalid=True, normalize=normalize)", "def lists_and_segments(self):\n response = self._get(self.uri_for(\"listsandsegments\"))\n return json_to_py(response)", "def find_segment(bv: binaryninja.binaryview.BinaryView, name: str) -> List[Tuple[int, int]]:\n result = []\n for sn in bv.sections:\n sec = bv.get_section_by_name(sn)\n if sec.name == name:\n result.append((sec.start, sec.end))\n return result", "def segment(text: str, model: str = \"attacut-sc\") -> List[str]:\n if not text or not isinstance(text, str):\n return []\n\n _tokenizer = AttacutTokenizer(model)\n\n return _tokenizer.tokenize(text)", "def __init__(self, word_string, feature_table):\n self.word_string = word_string\n self.feature_table = feature_table\n self.segments = [Segment(char, self.feature_table) for char in self.word_string]", "def segments(self):\r\n return Segments(self)", "def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents", "def segments(self):\n return self._segments", "def getSegments(self):\n l = len(self.points)\n return [Segment(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]", "def get_segments(self):\n\t\tos.chdir(self.segment_path)\n\t\tfor path in glob.glob(\"%s/*.seg\" % self.segment_path):\n\t\t\t_file = os.path.split(path)[1]\n\t\t\tdae = DiscreetArchiveElement(self,_file,element_type='segment')\n\t\t\tself.elements.append(dae)\n\t\treturn True", "def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))", "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def get_results_from_segmentation(doc_id: int, project_id: int) -> List[dict]:\n session = konfuzio_session()\n\n segmentation_url = get_document_segmentation_details_url(doc_id, project_id, action='segmentation')\n segmentation_result = retry_get(session, segmentation_url)\n segmentation_result = segmentation_result.json()\n\n return segmentation_result", "def get_all(self):\n return self._segments", "def get_segmentations(self, aids):\n sseg_list = []\n for aid in aids:\n ann = self.dset.anns[aid]\n coco_sseg = ann.get('segmentation', None)\n if coco_sseg is None:\n sseg = None\n else:\n sseg = kwimage.MultiPolygon.coerce(coco_sseg)\n sseg_list.append(sseg)\n return sseg_list", "def sentence_segment(self, doc, candidate_pos, lower):\n sentences = []\n for sent in doc.sents:\n selected_words = []\n for token in sent:\n # Store words only with cadidate POS tag\n if token.pos_ in candidate_pos and token.is_stop is False:\n if lower is True:\n selected_words.append(token.text.lower())\n else:\n selected_words.append(token.text)\n sentences.append(selected_words)\n return sentences", "def segment(text):\n if not text: return []\n candidates = ([first]+segment(rest) for first,rest in splits(text))\n return max(candidates, key=Pwords)", "def __load_segments(self):\r\n self.__segments = []\r\n if len(self.points) > 1:\r\n s = self.points[0]\r\n k = 1\r\n while k < len(self.points):\r\n e = self.points[k]\r\n self.__segments.append(Segment(s, e))\r\n s = e \r\n k += 1\r\n e = self.points[0]\r\n self.__segments.append(Segment(s, e))", "def segment_words(self, string):\n words = []\n\n word_begin = 0\n while word_begin < len(string):\n word_options = self.find_prefixes(string[word_begin:])\n if len(word_options) > 0:\n best_word = self.unigram_provider.get_most_frequent_word(word_options)\n else:\n best_word = string[word_begin:word_begin+1]\n words.append(best_word)\n word_begin += len(best_word)\n\n return words", "def getSegments(self) -> List[int]:\n ...", "def list_segment_names(self) -> PagingList[str]:\n return PagingList(self._generate_segment_names, 128)", "def sentence_segment(self, doc, candidate_pos, lower,bigrams,trigrams):\n sentences = []\n for sent in doc.sents:\n selected_words = []\n bigram_words=[]\n for token in sent:\n bigram_words.append(token.text)\n # Store words only with cadidate POS tag\n if token.pos_ in candidate_pos and token.is_stop is False:\n if lower is True:\n selected_words.append(token.text.lower())\n else:\n selected_words.append(token.text)\n if bigrams==True:\n for i in range(len(sent)-1):\n if sent[i].pos_ in candidate_pos and sent[i].is_stop is False and sent[i+1].pos_ in candidate_pos and sent[i+1].is_stop is False:\n if lower is True:\n selected_words.append(sent[i].text.lower())\n else:\n selected_words.append(str(sent[i].text+\" \"+sent[i+1].text))\n if trigrams==True:\n for i in range(len(sent)-2):\n if sent[i].pos_ in candidate_pos and sent[i].is_stop is False and sent[i+1].pos_ in candidate_pos and sent[i+1].is_stop is False and sent[i+2].pos_ in candidate_pos and sent[i+2].is_stop is False:\n if lower is True:\n selected_words.append(sent[i].text.lower())\n else:\n selected_words.append(str(sent[i].text+\" \"+sent[i+1].text+\" \"+sent[i+2].text))\n sentences.append(selected_words)\n return sentences", "def segment_to_vector(self, seg, normalize=True):\n return self.fts(seg, normalize).strings()", "def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def segments(seg_type=None):\n\n for index in xrange(idaapi.get_segm_qty()):\n seg = Segment(index=index)\n if (seg_type is None) or (seg.type == seg_type):\n yield Segment(index=index)", "def lookup(self, word):\n word = word.lower()\n if self.stemmer:\n word = self.stemmer.stem(word)\n \n return [self.documents.get(id, None) for id in self.index.get(word)]", "def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def subword_tokenize(self, word: str) -> List[str]:\r\n end_idx = min([len(word), self.ngram_max])\r\n sw_tokens = [self.SOW]\r\n start_idx = 0\r\n\r\n while start_idx < len(word):\r\n subword = word[start_idx:end_idx]\r\n if subword in self.bpe_vocab:\r\n sw_tokens.append(subword)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n elif len(subword) == 1:\r\n sw_tokens.append(self.UNK)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n else:\r\n end_idx -= 1\r\n\r\n sw_tokens.append(self.EOW)\r\n return sw_tokens", "def get_segments(self, sets=None):\n if sets is None:\n if self.sets is not None:\n sets = self.sets\n else:\n raise ValueError(\"sets and self.sets attributes are None, \\\n you need either to pass an origin argument to get_segments or \\\n to use get_filtration method before\")\n segments = []\n for s in sets:\n if self.epsilon <= s.getRelevance():\n t, a, b = s.getPosition()\n for i, seg in enumerate(segments):\n tp, ap, bp = seg\n if t >= tp and bp > a:\n bp = a\n elif t <= tp and ap < b:\n ap = b\n segments[i] = (tp, ap, bp)\n segments.append((t, a, b))\n return segments", "def segments(self):\n L = len(self.vertices)\n return itertools.chain((self._subset((i,i+1)) for i in range(len(self)-1)),\n (self._subset((L-1,0)),))", "def get_synsets_rt(word: str) -> List:\n return rt.categories(word)", "def fetch_index(self, word):\n files_ = []\n sents_ = []\n # pull dictionaries specific to the token\n for fname in self._index[word]:\n # preserve filename\n files_.append(fname)\n\n # format tokens for output\n for i, j in self._index[word][fname]:\n s = self._reader.sents(fname)[i] # list\n s[j] = '*' + s[j] + '*'\n sents_.append(' '.join(s))\n\n return (files_, sents_)", "def parse_segments(self):\n segs = self.unixtext.split(\"$$\")\n for seg in segs:\n self.segments.append(TextProductSegment(seg, self))", "def _process(self, word: str) -> List[str]:\n # if a blank arrives from splitting, just return an empty list\n if len(word.strip()) == 0:\n return []\n word = self.convert_consonantal_i(word)\n my_word = \" \" + word + \" \"\n letters = list(my_word)\n positions = []\n for dipth in self.diphthongs:\n if dipth in my_word:\n dipth_matcher = re.compile(\"{}\".format(dipth))\n matches = dipth_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n matches = self.kw_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n letters = string_utils.merge_next(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions.clear()\n if not self._contains_vowels(\"\".join(letters)):\n return [\n \"\".join(letters).strip()\n ] # occurs when only 'qu' appears by ellision\n positions = self._starting_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_right(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._starting_consonants_only(letters)\n positions = self._ending_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_left(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._ending_consonants_only(letters)\n positions = self._find_solo_consonant(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_solo_consonant(letters)\n positions = self._find_consonant_cluster(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_consonant_cluster(letters)\n return letters", "def create_segments(data, bbox):\n\n nb, sb, eb, wb = bbox\n G = ox.graph_from_bbox(nb, sb, eb, wb)\n dist = 0.0001\n edges = ox.utils_graph.graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)\n edges['index'] = range(1, len(edges)+1)\n\n # Get closest point on each segment\n lng = data['long']\n lat = data['lat']\n ne, dist = ox.distance.nearest_edges(G, lng, lat, return_dist=True)\n\n # Format edges to upload to mapbox\n all_edges = []\n all_lines = []\n for n in ne:\n u, v, key = n\n edge = edges.loc[(u, v, key), \"geometry\"]\n index = edges.loc[(u, v, key), \"index\"]\n if edge not in all_edges:\n feature = Feature(id=int(index), geometry=edge)\n all_edges.append(edge)\n all_lines.append(feature)\n all_lines = FeatureCollection(all_lines)\n\n return all_lines", "def similar_words(word, morph_model, lm, lm_segmented):\n similar_words = []\n nbest = min(5, len(word))\n segmentations = [morph_model.viterbi_nbest(word, nbest)[i][0]\n for i in range(nbest)]\n for segmented_word in segmentations:\n word_to_search = ''.join(segmented_word[:-1]) #word without last segment\n #word_to_search = max(segmented_word, key=len) #the longest segment \n #(does not work for Finnish as well as for Swedish)\n if word_to_search in lm.vocab:\n if word_to_search not in similar_words:\n similar_words.append(word_to_search)\n \n # If the word without its last morpheme is not found from the\n # vocabulary, search for possible word continuations using the \n # the language model trained on word morphemes\n else:\n possible_continuations = [next_morph[0] for next_morph in\n lm_segmented.counts.__getitem__\n (segmented_word[:-1]).most_common(3)]\n for morph in possible_continuations:\n similar_word = word_to_search + morph\n if similar_word not in similar_words:\n if similar_word in lm.vocab:\n similar_words.append(similar_word)\n return similar_words", "def _get_segments(img, mean_scale=1000, num_samples=16, return_enough_segments=False):\n # randomly choose the segmentation scale\n scale = np.random.uniform(0.5*mean_scale, 1.5*mean_scale)\n # run heuristic segmentation\n segments = skimage.segmentation.felzenszwalb(img, scale=scale,\n min_size=int(scale))\n # sample a set of segmentations to use; bias toward larger ones\n max_segment = segments.max()\n indices = np.arange(max_segment+1)\n seg_count = np.array([(segments == i).sum()+1 for i in indices])\n p = seg_count/seg_count.sum()\n # try this for error correction?\n if num_samples <= max_segment:\n sampled_indices = np.random.choice(indices, p=p, size=num_samples,\n replace=False)\n else:\n warnings.warn(\"not enough unique segments; sampling WITH replacement\")\n sampled_indices = np.random.choice(indices, size=num_samples, replace=True)\n # build normalized segment occupancy masks for each segment we choose\n seg_tensor = np.stack([(segments == i)/seg_count[i] for i in sampled_indices],\n -1).astype(np.float32)\n\n if return_enough_segments:\n enough_segs = num_samples <= max_segment\n return seg_tensor, enough_segs\n return seg_tensor", "def surface_labelled_data_preparation_pipeline(word_list: [str]):\n X = []\n\n for word in word_list:\n segments = word.split('-')\n segment_features = []\n for i in range(len(segments)):\n features = {}\n\n segment_length = len(segments[i])\n features['length'] = segment_length\n\n features['segment.lower()'] = segments[i].lower()\n features['pos_in_word'] = i\n\n if segment_length % 2 == 0:\n features['even'] = 1\n else:\n features['odd'] = 1\n\n features['begin'] = segments[i][0]\n features['end'] = segments[i][len(segments[i]) - 1]\n\n try:\n features['prev_segment'] = segments[i - 1]\n except IndexError:\n features['prev_segment'] = ''\n # continue\n\n try:\n features['next_segment'] = segments[i + 1]\n except IndexError:\n features['next_segment'] = ''\n\n if segments[0].isupper():\n features['start_upper'] = 1\n else:\n features['start_lower'] = 1\n\n if segments[0] in 'aeiou':\n features['first_vowel'] = 1\n else:\n features['first_const'] = 1\n\n segment_features.append(features)\n\n X.append(segment_features)\n\n return X", "def get_trees(self, word): # -> list:\r\n raise NotImplementedError", "def slice(self, word):\n # Short words aren't hyphenated.\n if len(word) <= 4:\n return [word]\n # If the word is an exception, get the stored points.\n if word.lower() in self.exceptions:\n points = self.exceptions[word.lower()]\n else:\n work = '.' + word.lower() + '.'\n points = [0] * (len(work) + 1)\n for i in range(len(work)):\n t = self.tree\n for c in work[i:]:\n if c in t:\n t = t[c]\n if None in t:\n p = t[None]\n for j in range(len(p)):\n points[i + j] = max(points[i + j], p[j])\n else:\n break\n # No hyphens in the first two chars or the last two.\n points[1] = points[2] = points[-2] = points[-3] = 0\n\n # Examine the points to build the pieces list.\n pieces = ['']\n for c, p in zip(word, points[2:]):\n pieces[-1] += c\n if p % 2:\n pieces.append('')\n return pieces", "def split_word_in_all_comps(self, term: str) -> List[str]:\n all_stems = []\n\n words = term.split()\n for word in words:\n stems = self.decompound(word)\n all_stems.extend(stems)\n\n for stem in stems:\n more_stems = self.split_word_in_all_comps(stem)\n all_stems.extend(more_stems)\n\n return all_stems", "def iter_segments(self):\n return\n yield", "def Segments():\n for n in range(ida_segment.get_segm_qty()):\n seg = ida_segment.getnseg(n)\n if seg:\n yield seg.start_ea", "def extract_segments(results):\n tt = [ ( parse_date(x[\"t1\"]), parse_date(x[\"t2\"]) ) for x in results[\"labels\"]+results[\"detected\"] ]\n ts = sorted(itertools.chain.from_iterable( tt ))\n t1 = parse_date(results[\"t1\"])\n if t1 < ts[0]:\n ts.insert(0, t1)\n t2 = parse_date(results[\"t2\"])\n if t2 > ts[-1]:\n ts.append(t2)\n return [ dict(t1=x[0].isoformat(), t2=x[1].isoformat()) for x in list(sliding_window(ts, 2)) ]", "def stokenize(txt, StopWords):\n Tokens = tokenize(txt)\n UnStopped = [t for t in Tokens if t not in StopWords]\n Stokens = [ps.stem(w) for w in UnStopped] # Stokens = Stemmed Tokens, list of all stokens in the txt\n \n return Stokens", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def get_searched_pair_words(self):\n words = [word for word in self.searched_words if \" \" in word]\n return [Word(word).singularize() for word in words]", "def get_from_word_edges(self, word: str) -> Set[str]:\n all_edges = set()\n\n for def_dict in self.word_dictionary[word]:\n processed_def = self.get_filtered_set_tokens(\n definition=def_dict[\"definition\"]\n )\n\n if self.drop_self_cycles:\n if word not in processed_def:\n all_edges = all_edges.union(processed_def)\n else:\n all_edges = all_edges.union(processed_def)\n\n return all_edges", "def get_words(self):\n return [self.id2word[idx] for idx in range(len(self))]", "def find(self, word):\n\n curr = self.head\n words = []\n # Do we at least contain the whole word?\n for letter in word:\n if letter in curr.children:\n curr = curr.children[letter]\n else:\n return words\n\n queue = [curr]\n\n while len(queue):\n curr = queue.pop()\n\n if \"_end\" in curr.children:\n words.append(curr.data)\n\n queue = [node\n for _, node in\n curr.children.iteritems()] + queue\n\n return words", "def segment_spanish(input_text):\n processed_document = nlp(input_text)\n\n tokens = drop_punctuation_and_numbers([word for word in processed_document])\n\n unique_tokens = set(tokens)\n return list(unique_tokens)", "def random_segs(cls, shape, lemma = None, case = None):\n # For each C or V segment in `shape`, initialize a random Segment of the\n # appropriate type. Initialize a new WordForm with all these Segments.\n return cls([Segment(seg_type = seg) for seg in shape], lemma, case)", "def get_segments(self, start=False):\n cmd = [\"ipcs\", \"-a\"]\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output, err = process.communicate()\n output = output.decode(\"utf-8\").split(\"\\n\")\n\n keys = ['key', 'shmid', 'owner', 'perms', 'bytes', 'nattch',\n 'status']\n segments = {}\n\n for line in output:\n # this should capture all keys\n # note: it won't do queues vs mem vs sem etc.\n if line[0:2] == '0x':\n values = list(filter(None, line.split(\" \")))\n data = dict(zip(keys, values))\n if start:\n # print (data['shmid'])\n self.segments[data['shmid']] = data\n segments[data['shmid']] = data\n return segments", "def segment_sphere(seg, sph):\n ints = line_sphere(seg.line(), sph)\n if ints:\n return [ a for a,i in zip(ints, map(seg.affine, ints)) if i >= 0 and i <= 1 ]\n return []", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def filter_segs(self, segs, normalize=True):\n return list(filter(lambda seg: self.seg_known(seg, normalize), segs))", "def segment(self):\n start = self.alignment.matching_function_startpoint(self.idx)\n end = self.alignment.matching_function_endpoint(self.idx)\n return [start, end]", "def get_segments(file_name):\n count = 1\n total_num_lines = num_lines_in_file(file_name)\n with open(file_name, 'r') as file_in:\n pre_segment = file_in.readline().split()[0]\n segments = [pre_segment]\n num_lines = []\n for line in file_in:\n line = line.split()\n if line[0].startswith(';;'):\n count += 1\n else:\n if len(line) >= LINE_LEN:\n if line[0] == pre_segment:\n count += 1\n else:\n segments.append(line[0])\n pre_segment = line[0]\n num_lines.append(count)\n count = 1\n else:\n count += 1\n last_num_lines_entry = total_num_lines - sum(num_lines)\n num_lines.append(last_num_lines_entry)\n assert len(segments) == len(num_lines), \"%i != %i\" %(len(segments), len(num_lines))\n return segments, num_lines", "def get_synonyms(word):\n synsets = [];\n syns = wn.synsets(word)\n for ss in syns:\n lemmas = []\n for l in ss.lemmas():\n lemma = { \"name\": l.name(), \"related_forms\": [] }\n for x in l.derivationally_related_forms():\n lemma['related_forms'].append(x.name())\n lemmas.append(lemma)\n synsets.append({\n \"lemmas\": lemmas,\n \"d\": ss.definition(),\n \"pos\": ss.pos(),\n \"id\": ss.name()\n })\n return synsets", "def raw_segments(self) -> List[\"RawSegment\"]:\n return self.get_raw_segments()", "def loadStopWordList(swFile):\n f = open(swFile, 'r')\n lines = f.readlines()\n f.close()\n result = list()\n for line in lines:\n sWord = line.strip('\\n')\n result.append(sWord)\n return result", "def get_segments(cst):\n assert isinstance(cst, ChromStruct)\n\n # create a set of coordinates for the start and end of segments\n segs = np.load(cst.sg_files)['sg']\n end = np.cumsum(segs)\n start = np.concatenate(([0], end[:-1]))\n\n return np.column_stack((start, end)).astype(int)", "def create_word_list(self):\n return set(self.split(self.title)+self.split(self.conditions)+self.split(self.interventions))", "def search(self, word):\n return self._dfs(word, 0, self.trie)", "def getTokens(self):\n list = []\n for i in range(self.startIdx, self.endIdx + 1):\n token = self.sentence[i]\n list.append(token)\n return list", "def parse(self, word):\n word = self.son.segs(word)\n son_map = self._sonority_map(word)\n son_map = self._mark_offglides(son_map)\n son_map = self._adjust_anom_fric_cod(son_map)\n son_map = self._adjust_anom_fric_ons(son_map)\n ons_son = self._initial_onset(son_map)\n cod_son = self._final_coda(son_map)\n ons = self.from_map(ons_son, word)\n cod = self.from_reverse_map(cod_son, word)\n return (ons, cod)", "def sentences(self, text):\n if not self.__isValidInput(text):\n return [Sentence(text, Sentence.NONE)]\n\n uniText = unicode_str(text)\n result = []\n textLen = len(uniText)\n sentenceLen = c_size_t()\n position = 0\n while textLen > 0:\n sentenceType = self.__lib.voikkoNextSentenceStartUcs4(\n self.__handle,\n uniText[position:],\n textLen,\n byref(sentenceLen),\n )\n sentenceText = uniText[position:position + sentenceLen.value]\n result.append(Sentence(sentenceText, sentenceType))\n if sentenceType == Sentence.NONE:\n break\n position = position + sentenceLen.value\n textLen = textLen - sentenceLen.value\n return result", "def parts_of_speech_tags(self, tokenized_doc):\n return [(token.text, token.pos_) for token in self.parser(\n tokenized_doc)]", "def _segment(self, string: str) -> Generator:\n buff: List = []\n segment_start = 1\n type_: Optional[Types] = None\n for i, line in enumerate(string.split(\"\\n\"), start=1):\n line_type = self._parse_segment_type(line)\n if line_type is not None:\n if type_ is not None:\n yield type_, buff\n segment_start = i + 1\n buff = []\n type_ = line_type\n buff.append((line + \"\\n\", i))\n if buff:\n if type_ is None:\n raise ValueError(\n f\"Most likely missing Var name at \" f\"line {segment_start}\"\n )\n yield type_, buff", "def find(self, word):\n currnode = self.root\n\n for letter in word:\n if letter not in currnode.children:\n return Set()\n currnode = currnode.children[letter]\n\n return currnode.pages", "def segment_tuples(self):\n return ((self.vertices[i], self.vertices[i+1])\n for i in range(len(self.vertices)-1))", "def getSegments(points):\n return _identifyStrokes(points)[1]", "def get_occurrences(self, word):\n try:\n word_id = self.token2id[word]\n except KeyError:\n word_id = word\n return self._get_occurrences(self.id2contiguous[word_id])", "def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs", "def inters_segment(self, s):\r\n x1 = s.start[0] - self.center[0]\r\n y1 = s.start[1] - self.center[1]\r\n x2 = s.end[0] - self.center[0]\r\n y2 = s.end[1] - self.center[1]\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n dr = math.sqrt(dx * dx + dy * dy)\r\n D = x1 * y2 - x2 * y1\r\n dr2 = dr * dr\r\n d = self.radius * self.radius * dr2 - D * D \r\n \r\n if d < 0:\r\n return []\r\n else: \r\n if dy < 0:\r\n sgndy = -1\r\n else:\r\n sgndy = 1 \r\n \r\n Ddy = D * dy\r\n mDdx = -D * dx\r\n sgndydxsqrtd = sgndy * dx * math.sqrt(d)\r\n absdysqrtd = abs(dy) * math.sqrt(d) \r\n \r\n xa = float(Ddy + sgndydxsqrtd) / dr2 + self.center[0]\r\n ya = float(mDdx + absdysqrtd) / dr2 + self.center[1]\r\n \r\n xb = (Ddy - sgndydxsqrtd) / dr2 + self.center[0]\r\n yb = (mDdx - absdysqrtd) / dr2 + self.center[1]\r\n \r\n if (d == 0) or not s.contains_point(xb, yb):\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya))]\r\n else:\r\n return []\r\n else:\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya)), (int(xb), int(yb))]\r\n else:\r\n return [(int(xb), int(yb))]", "def get_word_pos_list(self, raw_text):\n raw_text = raw_text.strip()\n word_list = []\n pos_list = []\n # pdb.set_trace()\n seg_list = jieba.posseg.cut(raw_text,HMM=False) # 默认是精确模式\n for word, flag in seg_list:\n # remove the punctuation, we will keep punctuation as prosodic boundary\n if word in ['「', '」', '.', '-' , '', ' ', '。' , '—' , '?', ':', '、', '…',';',',',',','!']:\n continue\n word_list.append(word)\n pos_list.append(flag)\n return word_list, pos_list", "def parse_sentence(self, text):\n l = []\n tokens = word_tokenize(text)\n print(tokens)\n skip = 0\n i = -1 # index of token in tokens list\n for token in tokens:\n i += 1\n if skip:\n skip -= 1\n # CORONA TERMS:\n elif token.lower() in corona_words:\n l.append('covid')\n elif is_flag_emoji(token):\n try:\n l.append(flag.ISO3166[flag.dflagize(token)[1:3]])\n except:\n continue\n # HASHTAGS:\n elif token == '#' and i+1 < len(tokens):\n parse_hashtage(tokens[i+1], l, tokens)\n skip += 1\n # TAGS:\n elif token == '@' and i+1 < len(tokens):\n parst_tag(tokens[i+1], l)\n skip = True\n # Size AS A WORD:\n elif token.lower() in sizes.keys():\n l.append(parse_number('1', token))\n elif check_if_term_is_fraction(token):\n if i < len(tokens)-1 and tokens[i+1].lower() in percent:\n l.append(token + '%')\n skip += 1\n else:\n l.append(token)\n # NUMBERS:\n elif isNumber(token):\n token = clean_number(token)\n if (i < len(tokens) - 2) and (tokens[i+1].lower() in sizes.keys()) and (tokens[i+2].lower() in percent):\n l.append(parse_number(token, tokens[i+1]) + '%')\n skip += 2\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in percent:\n l.append(parse_number(token) + '%')\n skip += 1\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in sizes.keys():\n l.append(parse_number(token, tokens[i+1]))\n skip += 1\n elif (i < len(tokens) - 1) and check_if_term_is_fraction(tokens[i+1]):\n l.append(token +' '+ tokens[i+1])\n skip += 1\n else:\n l.append(parse_number(token))\n elif isNumber(token[0:len(token) - 1]) and token[len(token)-1].lower() in sizes:\n tokens.append(token[0:len(token) - 1])\n tokens.append(token[len(token)-1])\n # OTHER TOKENS:\n else:\n cleaning(token, tokens, l)\n\n text_tokens_without_stopwords = [w for w in l if w.lower() not in stop_words]\n print(text_tokens_without_stopwords)\n return text_tokens_without_stopwords", "def tag_words (lx, wds):\n if (wds == []):\n return [[]]\n else:\n tag_first = tag_word (lx, wds[0])\n tag_rest = tag_words (lx, wds[1:])\n return [[fst] + rst for fst in tag_first for rst in tag_rest]", "def _extract_terms(self, obj):\r\n terms = set()\r\n if 'paths' in obj:\r\n for path in obj['paths']:\r\n segs = re.split('[/{}]', path)\r\n for seg in segs:\r\n terms.add(seg.lower())\r\n self.terms = terms", "def to_dict(self):\n speaker = self.get_speaker()\n segs = []\n for seg in self._segments:\n tmp = seg.get_line()[2:]\n tmp[-1] = speaker\n tmp[0] = int(seg.get_start())\n tmp[1] = int(seg.get_end())\n tmp.append(self.get_name())\n tmp[3] = self.speakers\n segs.append(tmp)\n return segs", "def segment(data):", "def get_raw_segments(self) -> List[\"RawSegment\"]:\n return [item for s in self.segments for item in s.raw_segments]", "def words(self):\n return self.text.split()", "def get_words(self):\n words = self.wiki.get_words(cleaner=self.cleaner)\n df = pd.DataFrame({\"word\": words})\n df = df.drop_duplicates(\"word\")\n df = df.head(100)\n mask = df[\"word\"].isin(self.common[\"word\"])\n mask |= df[\"word\"].str.lower().isin(self.common[\"word\"])\n\n words = [ Word(word) for word in df[~mask][\"word\"] ]\n for word in words:\n word.get_definition(definer=self.definer)", "def get_page_words(parsed_hocr_page, pageid):\n page_words = []\n page_height = parsed_hocr_page.box.height\n page_width = parsed_hocr_page.box.width\n page_dim_string = \"%sx%s\" %(page_width, page_height)\n \n for word in parsed_hocr_page.words:\n this_word = {\n 'x0':word.box.left, 'x1':word.box.right, \n 'y0':page_height-word.box.bottom, 'y1':page_height-word.box.top,\n 'text':word.text, 'width':word.box.width,\n 'height':word.box.height, 'pageid':pageid,\n 'page_dim':page_dim_string,\n 'object_type':'word',\n 'lang':word.lang,\n }\n page_words.append(this_word)\n \n return page_words", "def segment(args):\n logger = logging.getLogger('SegEDU')\n rst_data = RSTData()\n logger.info('Loading vocab...')\n with open(args.word_vocab_path, 'rb') as fin:\n word_vocab = pickle.load(fin)\n logger.info('Word vocab size: {}'.format(word_vocab.size()))\n rst_data.word_vocab = word_vocab\n logger.info('Loading the model...')\n model = AttnSegModel(args, word_vocab)\n model.restore('best', args.model_dir)\n if model.use_ema:\n model.sess.run(model.ema_backup_op)\n model.sess.run(model.ema_assign_op)\n\n spacy_nlp = spacy.load('en', disable=['parser', 'ner', 'textcat'])\n for file in args.input_files:\n logger.info('Segmenting {}...'.format(file))\n raw_sents = []\n with open(file, 'r') as fin:\n for line in fin:\n line = line.strip()\n if line:\n raw_sents.append(line)\n samples = []\n for sent in spacy_nlp.pipe(raw_sents, batch_size=1000, n_threads=5):\n samples.append({'words': [token.text for token in sent],\n 'edu_seg_indices': []})\n rst_data.test_samples = samples\n data_batches = rst_data.gen_mini_batches(args.batch_size, test=True, shuffle=False)\n\n edus = []\n for batch in data_batches:\n batch_pred_segs = model.segment(batch)\n for sample, pred_segs in zip(batch['raw_data'], batch_pred_segs):\n one_edu_words = []\n for word_idx, word in enumerate(sample['words']):\n if word_idx in pred_segs:\n edus.append(' '.join(one_edu_words))\n one_edu_words = []\n one_edu_words.append(word)\n if one_edu_words:\n edus.append(' '.join(one_edu_words))\n\n if not os.path.exists(args.result_dir):\n os.makedirs(args.result_dir)\n save_path = os.path.join(args.result_dir, os.path.basename(file))\n logger.info('Saving into {}'.format(save_path))\n with open(save_path, 'w') as fout:\n for edu in edus:\n fout.write(edu + '\\n')", "def load_segments(filename):\n coordinates_struct = struct.Struct('4d')\n segments = []\n adjuster = CoordinatesHash()\n\n with open(filename, \"rb\") as bo_file:\n packed_segment = bo_file.read(32)\n while packed_segment:\n coordinates = coordinates_struct.unpack(packed_segment)\n raw_points = [Point(coordinates[0:2]), Point(coordinates[2:])]\n adjusted_points = [adjuster.hash_point(p) for p in raw_points]\n segments.append(Segment(adjusted_points))\n packed_segment = bo_file.read(32)\n\n return adjuster, segments", "def get_segment_by_name(self, name):\n for seg in self.segments:\n if seg.segname == name:\n return seg\n\n return None", "def convert_segments(segments):\n polygons = []\n interiors = []\n linestrings = []\n for segment in segments:\n ls = LineString(segment)\n if segment[0][0] == segment[-1][0] and segment[0][1] == segment[-1][1]:\n lr = LinearRing(ls)\n if not lr.is_ccw:\n polygons.append(Polygon(segment))\n else:\n interiors.append(lr)\n continue\n linestrings.append(ls)\n\n return polygons, interiors, linestrings", "def children(word, word_dict):\n res = []\n for i in range(len(word)):\n child = word[:i]+word[i+1:]\n if child in word_dict:\n res.append(child)\n return res", "def get_speech(self, word):\n posses = ['verb', 'noun', 'adj', 'adv', 'as in', 'conjunction']\n speeches = []\n\n def get_all_synonyms(word1, speech1):\n for w in Word(word1).synonyms('all', partOfSpeech=speech1):\n if not w == []:\n return w\n return []\n\n def empty_tree(input_list):\n # print(input_list)\n if type(input_list) == type([]):\n for l in input_list:\n if not empty_tree(l):\n return False\n return True\n else:\n return False\n\n for poss in posses:\n if not empty_tree(get_all_synonyms(word, poss)):\n speeches.append(poss)\n return speeches", "def get_indexes_for_word (self,word):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,word,)\r\n db_cursor.execute(\"SELECT note_index\"\r\n +\" FROM word_to_indexes\"\r\n +\" WHERE notebook=? and word=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n\r\n return self.word_dict[word]" ]
[ "0.6500796", "0.64950335", "0.6456525", "0.6327646", "0.63106126", "0.626178", "0.6221559", "0.61808854", "0.6148151", "0.6108368", "0.60761374", "0.6001049", "0.5893235", "0.58411497", "0.5838696", "0.5827947", "0.58147967", "0.57902044", "0.57790804", "0.5765505", "0.57108724", "0.5699152", "0.5675839", "0.56539285", "0.56483376", "0.56302977", "0.5611321", "0.5599798", "0.5582741", "0.55615616", "0.55552644", "0.54585093", "0.54525137", "0.5450572", "0.5446269", "0.54260194", "0.53857636", "0.53624636", "0.53371185", "0.53131074", "0.5308159", "0.52816814", "0.5276337", "0.5275621", "0.52546924", "0.52480525", "0.5210298", "0.519909", "0.51960546", "0.5184821", "0.5184821", "0.5184821", "0.5184821", "0.5173125", "0.5170531", "0.5152788", "0.5151949", "0.5138956", "0.5122585", "0.5115565", "0.51110077", "0.510796", "0.510796", "0.5106568", "0.51056397", "0.5092486", "0.5084024", "0.50758564", "0.5058328", "0.50461257", "0.5040504", "0.5026112", "0.50241005", "0.50170904", "0.5012771", "0.50072354", "0.49994004", "0.49865326", "0.49862477", "0.4984329", "0.4979746", "0.4973171", "0.49692792", "0.49644792", "0.49571562", "0.49570104", "0.49520147", "0.49476528", "0.49420053", "0.49378788", "0.49373412", "0.49350774", "0.4934522", "0.49274382", "0.49178347", "0.49176294", "0.4917182", "0.4913938", "0.49084142", "0.49050888" ]
0.5958538
12
Return a nparray of features namd in ft_name for the segments in word
def word_array(self, ft_names, word, normalize=True): return numpy.array([s.numeric(ft_names) for s in self.word_fts(word, normalize)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features_from_labels(audio_file, segments):\n segments_features = []\n #for each segment\n for segment in segments:\n features = features_from_label(audio_file, segment)\n #and append it to the list\n segments_features.append(features)\n return segments_features", "def word_fts(self, word, normalize=True):\n return [self.fts(ipa, False) for ipa in self.ipa_segs(word, normalize)]", "def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector", "def get_feature_names(self):\n\t\treturn np.array(['nouns', 'adjectives', 'verbs', 'adverbs'])", "def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def make_tweet_nparr( txt ):\n # result storage\n fvec = numpy.empty( len(testFeatures) )\n\n # search for each feature\n txtLow = ' ' + txt.lower() + ' '\n for i in range( 0, len(testFeatures) ):\n\n key = testFeatures[i][0]\n\n fvec[i] = False\n for tstr in testFeatures[i][1]:\n fvec[i] = fvec[i] or (txtLow.find(tstr) != -1)\n\n return fvec", "def get_name_to_features(self):\n name_to_features = {\n 'input_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),\n 'label_ids': tf.io.FixedLenFeature([], tf.int64),\n }\n return name_to_features", "def get_name_to_features(self, is_training):\n name_to_features = {\n 'input_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),\n 'input_mask': tf.io.FixedLenFeature([self.seq_len], tf.int64),\n 'segment_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),\n }\n\n if is_training:\n name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64)\n name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64)\n else:\n name_to_features['unique_ids'] = tf.io.FixedLenFeature([], tf.int64)\n\n return name_to_features", "def word_features(table):\n\tfeatures = numpy.zeros((len(table), 620), dtype='float32')\n\tkeys = table.keys()\n\tfor i in range(len(table)):\n\t\tf = table[keys[i]]\n\t\tfeatures[i] = f / norm(f)\n\treturn features", "def get_graph_embedding_features(fn='taxi_all.txt'):\n ge = []\n with open(fn, 'r') as fin:\n fin.readline()\n for line in fin:\n ls = line.strip().split(\" \")\n ge.append([float(i) for i in ls])\n ge = np.array(ge)\n ge = ge[np.argsort(ge[:,0])]\n return ge[:,1:]", "def _create_feature_vec():\n\tnum_tags = NGRAM_TUPLE[0]\n\tfvec = []\n\tfor _, size in FEATURE_TUPLE:\n\t\tfvec.append(np.zeros((num_tags, size)))\n\n\t# Append tag ngram weights to end\n\tfvec.append(np.zeros((num_tags, num_tags)))\n\treturn fvec", "def extract_feats(word, nlp):\n feat_dict = {}\n feat_string = ''\n doc = nlp(word).to_dict()[0][0]\n if 'feats' in doc:\n for pair in doc['feats'].split('|'):\n feat, val = pair.split('=')\n feat_dict[feat] = val\n feat_string += feat + ': ' + val + ', '\n if feat_string:\n feat_string = ' (' + feat_string[:-2] + ')'\n return feat_dict, feat_string", "def get_feature_array(tweets):\n feats=[]\n for t in tweets:\n feats.append(sent_features(t))\n return np.array(feats)", "def features_from_label(audio_file, segment):\n duration = segment['end'] - segment['start']\n audio, sample_rate = librosa.core.load(\n audio_file,\n duration=duration,\n offset=segment['start']\n )\n features = fe.get_features(audio, sample_rate)\n return features", "def features_extract(document, wordset):\n words_doc = nltk.FreqDist(document)\n features = []\n for word in wordset:\n features.append(words_doc[word])\n return features", "def get_nft_list(self):\n tokens = self.api.find_all(\"nft\", \"nfts\", query={})\n return tokens", "def extract_features(self, doc):\n\n features = dict()\n\n bow = self.vectorize_doc_simple(doc)\n\n charcount = self.char_count(doc)\n wordcount = self.word_count(doc)\n sentencecount = self.sentence_count(doc)\n paragraphcount = self.paragraph_count(doc)\n\n # extract characters features\n features['characters per word'] = charcount / wordcount\n features['characters per sentence'] = charcount / sentencecount\n features['characters per paragraph'] = charcount / paragraphcount\n features['characters per document'] = charcount\n\n features['word characters length variance'] = numpy.std(\n self.word_char_length_variance(doc))\n features['sentence characters length variance'] = numpy.std(\n self.sentence_char_length_variance(doc))\n\n # extract words features\n features['words per sentence'] = wordcount / sentencecount\n features['words per paragraph'] = wordcount / paragraphcount\n features['words per document'] = wordcount\n\n features['sentence words length variance'] = numpy.std(\n self.sentence_words_length_variance(doc))\n\n # extract sentences features\n features['sentences per paragraph'] = sentencecount / paragraphcount\n features['sentences per document'] = sentencecount\n\n # extract paragraphs features\n features['paragraphs per document'] = paragraphcount\n\n # extract syllables features\n syllablecount = 0\n for word, count in bow.iteritems():\n syllablecount += self.num_of_syllables(word) * count\n features['syllables per word'] = syllablecount / wordcount\n features['syllables per sentence'] = syllablecount / sentencecount\n features['syllables per paragraph'] = syllablecount / paragraphcount\n\n # extract part of speech features\n tokens = self.pos_tag_doc(doc)\n\n pos_counts = self.vectorize_pos_tags(tokens)\n poswordcount = sum(pos_counts.values())\n for i in xrange(82, 101):\n features['%d per word' % i] = pos_counts[i] / poswordcount\n\n sorted_pos_counts = sorted(pos_counts, key=pos_counts.get, reverse=True)\n features['1st top tag'] = str(sorted_pos_counts[0])\n features['2nd top tag'] = str(sorted_pos_counts[1])\n features['3rd top tag'] = str(sorted_pos_counts[2])\n features['4th top tag'] = str(sorted_pos_counts[3])\n features['5th top tag'] = str(sorted_pos_counts[4])\n\n # extract vocab features\n vocabsize = len(self.vectorize_doc_simple(doc))\n features['vocab size'] = vocabsize\n features['words per vocab size'] = wordcount / vocabsize\n\n return features", "def get_feature_names(self):\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features))[self.feature_mask_]\n return self.features_", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def lexicon_features(tokens, feats):\n# feats['neg_words'] = 0;\n# feats['pos_words'] = 0;\n# tokens = list([token.lower() for token in tokens])\n# feats['neg_words'] , feats['pos_words'] = np.count_nonzero(np.in1d(tokens, list(neg_words))), np.count_nonzero(np.in1d(tokens, list(pos_words)))\n neg_count=0\n pos_count=0\n for i in tokens:\n if i.lower() in neg_words:\n neg_count+=1\n if i.lower() in pos_words:\n pos_count+=1\n feats[\"neg_words\"]=neg_count\n feats[\"pos_words\"]=pos_count", "def get_ngram_features(train_data, test_data):\n print(\"getting ngram features\")\n ngram_vectorizer = CountVectorizer(ngram_range = (1, 2))\n ngram_vectorizer = ngram_vectorizer.fit(train_data)\n return ngram_vectorizer.transform(train_data), ngram_vectorizer.transform(test_data)", "def doc2features(self,sent):\n return [self.word2features(sent['tokens'], i) for i in range(len(sent['tokens']))]", "def get_ntype_featnames(ntype_name, schema_map):\n node_data = schema_map[constants.STR_NODE_DATA]\n feats = node_data.get(ntype_name, {})\n return [feat for feat in feats]", "def compute_label_feature(text, token_to_idx):\n tokens = list(text.strip().lower())\n feats = [token_to_idx[token] for token in tokens]\n return feats", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def get_tfidf_features(n_features) -> np.array:\r\n # Transform all titles from the original DataFrame into TF-IDF matrix\r\n vectorizer = TfidfVectorizer(decode_error='ignore',\r\n stop_words='english',\r\n max_features=n_features)\r\n\r\n vectors = vectorizer.fit_transform(data['title']).toarray().astype(np.float16, copy=False)\r\n print('TF-IDF features extracted. Shape:', vectors.shape)\r\n\r\n return vectors", "def word2features(self,sent, i):\n word = sent[i][0]\n #postag = sent[i][1]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(),\n 'word.isupper()': word.isupper(),\n 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n 'word.shape()':self.shape(word),\n 'word.isalnum()':word.isalnum(),\n 'word.isalpha()':word.isalpha(),\n # 'postag': postag,\n # 'postag[:2]': postag[:2],\n }\n if i > 0:\n word1 = sent[i - 1][0]\n #postag1 = sent[i - 1][1]\n features.update({\n '-1:word.lower()': word1.lower(),\n '-1:word.istitle()': word1.istitle(),\n '-1:word.isupper()': word1.isupper(),\n '-1:word.isdigit()': word1.isdigit(),\n '-1:word.isalnum()':word1.isalnum(),\n '-1:word.isalpha()':word1.isalpha(),\n # '-1:postag': postag1,\n # '-1:postag[:2]': postag1[:2],\n })\n else:\n features['BOS'] = True\n\n if i > 1:\n word2 = sent[i - 2][0]\n #postag2 = sent[i - 2][1]\n features.update({\n '-2:word.lower()': word2.lower(),\n '-2:word.istitle()': word2.istitle(),\n '-2:word.isupper()': word2.isupper(),\n '-2:word.isdigit()': word2.isdigit(),\n '-2:word.isalnum()': word2.isalnum(),\n '-2:word.isalpha()': word2.isalpha(),\n # '-2:postag': postag2,\n # '-2:postag[:2]': postag2[:2],\n })\n else:\n features['BOS1'] = True\n if i > 2:\n word3 = sent[i - 3][0]\n #postag3 = sent[i - 3][1]\n features.update({\n '-3:word.lower()': word3.lower(),\n '-3:word.istitle()': word3.istitle(),\n '-3:word.isupper()': word3.isupper(),\n '-3:word.isdigit()': word3.isdigit(),\n '-3:word.isalnum()': word3.isalnum(),\n '-3:word.isalpha()': word3.isalpha(),\n # '-3:postag': postag3,\n # '-3:postag[:2]': postag3[:2],\n })\n else:\n features['BOS2'] = True\n\n if i > 3:\n word4 = sent[i - 4][0]\n #postag4 = sent[i - 4][1]\n features.update({\n '-4:word.lower()': word4.lower(),\n '-4:word.istitle()': word4.istitle(),\n '-4:word.isupper()': word4.isupper(),\n '-4:word.isdigit()': word4.isdigit(),\n '-4:word.isalnum()': word4.isalnum(),\n '-4:word.isalpha()': word4.isalpha(),\n # '-4:postag': postag4,\n # '-4:postag[:2]': postag4[:2],\n })\n else:\n features['BOS2'] = True\n\n if i < len(sent) - 1:\n word1 = sent[i + 1][0]\n features.update({\n '+1:word.lower()': word1.lower(),\n '+1:word.istitle()': word1.istitle(),\n '+1:word.isupper()': word1.isupper(),\n '+1:word.isdigit()': word1.isdigit(),\n '+1:word.isalnum()': word1.isalnum(),\n '+1:word.isalpha()': word1.isalpha(),\n # '+1:postag': postag1,\n # '+1:postag[:2]': postag1[:2],\n })\n else:\n features['EOS'] = True\n if i < len(sent) - 2:\n word12 = sent[i + 2][0]\n #postag12 = sent[i + 2][1]\n features.update({\n '+2:word.lower()': word12.lower(),\n '+2:word.istitle()': word12.istitle(),\n '+2:word.isupper()': word12.isupper(),\n '+2:word.isdigit()': word12.isdigit(),\n '+2:word.isalnum()': word12.isalnum(),\n '+2:word.isalpha()': word12.isalpha(),\n # '+2:postag': postag12,\n # '+2:postag[:2]': postag12[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 3:\n word13 = sent[i + 3][0]\n #postag13 = sent[i + 3][1]\n features.update({\n '+3:word.lower()': word13.lower(),\n '+3:word.istitle()': word13.istitle(),\n '+3:word.isupper()': word13.isupper(),\n '+3:word.isdigit()': word13.isdigit(),\n '+3:word.isalnum()': word13.isalnum(),\n '+3:word.isalpha()': word13.isalpha(),\n # '+3:postag': postag13,\n # '+3:postag[:2]': postag13[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 4:\n word14 = sent[i + 4][0]\n #postag14 = sent[i + 4][1]\n features.update({\n '+4:word.lower()': word14.lower(),\n '+4:word.istitle()': word14.istitle(),\n '+4:word.isupper()': word14.isupper(),\n '+4:word.isdigit()': word14.isdigit(),\n '+4:word.isalnum()': word14.isalnum(),\n '+4:word.isalpha()': word14.isalpha(),\n # '+4:postag': postag14,\n # '+4:postag[:2]': postag14[:2],\n })\n else:\n features['EOS2'] = True\n return features", "def features(self, sent, position):\n if type(sent[0]) is str:\n fts = []\n if self.training:\n curr_word = 'curr=' + sent[position].lower()\n fts.append(curr_word)\n elif sent[position].lower() in self.vocab:\n curr_word = 'curr=' + sent[position].lower()\n fts.append(curr_word)\n else:\n curr_word = 'curr=UNK'\n fts.append(curr_word)\n prefix = 'pref=' + sent[position][:2].lower()\n suffix = 'suff=' + sent[position][-2:].lower()\n if position == 0:\n prev_word1 = 'prev_word1=*START*'\n fts.append(prev_word1)\n if position == len(sent) - 1:\n next_word1 = 'next_word1=*END*'\n fts.append(next_word1)\n if position >= 1:\n if self.training:\n prev_word1 = 'prev_word1=' + sent[position - 1].lower()\n fts.append(prev_word1)\n elif 'prev_word1=' + sent[position - 1].lower() in self.vocab:\n prev_word1 = 'prev_word1=' + sent[position - 1].lower()\n fts.append(prev_word1)\n else:\n prev_word1 = 'prev_word1=UNK'\n fts.append(prev_word1)\n\n if position >= 2:\n if self.training:\n prev_word2 = 'prev_word2=' + sent[position - 2].lower()\n fts.append(prev_word2)\n elif 'prev_word2=' + sent[position - 2].lower() in self.vocab:\n prev_word2 = 'prev_word2=' + sent[position - 2].lower()\n fts.append(prev_word2)\n else:\n prev_word2 = 'prev_word2=UNK'\n fts.append(prev_word2)\n\n if position <= (len(sent) - 2):\n if self.training:\n next_word1 = 'next_word1=' + sent[position + 1].lower()\n fts.append(next_word1)\n elif 'next_word1=' + sent[position + 1].lower() in self.vocab:\n next_word1 = 'next_word1=' + sent[position + 1].lower()\n fts.append(next_word1)\n else:\n next_word1 = 'next_word1=UNK'\n fts.append(next_word1)\n if position <= (len(sent) - 3):\n if self.training:\n next_word2 = 'next_word2=' + sent[position + 2].lower()\n fts.append(next_word2)\n elif 'next_word2=' + sent[position + 2].lower() in self.vocab:\n next_word2 = 'next_word2=' + sent[position + 2].lower()\n fts.append(next_word2)\n else:\n next_word2 = 'next_word2=UNK'\n fts.append(next_word2)\n\n if self.training:\n fts.append(prefix)\n elif prefix in self.vocab:\n fts.append(prefix)\n if self.training:\n fts.append(suffix)\n elif suffix in self.vocab:\n fts.append(suffix)\n\n else:\n fts = []\n if self.training:\n curr_word = 'curr=' + sent[position][0].lower()\n fts.append(curr_word)\n elif sent[position][0].lower() in self.vocab:\n curr_word = 'curr=' + sent[position][0].lower()\n fts.append(curr_word)\n else:\n curr_word = 'curr=UNK'\n fts.append(curr_word)\n prefix = 'pref=' + sent[position][0][:2].lower()\n suffix = 'suff=' + sent[position][0][-2:].lower()\n if position == 0:\n prev_word1 = 'prev_word1=*START*'\n fts.append(prev_word1)\n if position == len(sent) - 1:\n next_word1 = 'next_word1=*END*'\n fts.append(next_word1)\n if position >= 1:\n if self.training:\n prev_word1 = 'prev_word1=' + sent[position-1][0].lower()\n fts.append(prev_word1)\n elif 'prev_word1=' + sent[position-1][0].lower() in self.vocab:\n prev_word1 = 'prev_word1=' + sent[position-1][0].lower()\n fts.append(prev_word1)\n else:\n prev_word1 = 'prev_word1=UNK'\n fts.append(prev_word1)\n\n if position >= 2:\n if self.training:\n prev_word2 = 'prev_word2=' + sent[position-2][0].lower()\n fts.append(prev_word2)\n elif 'prev_word2=' + sent[position-2][0].lower() in self.vocab:\n prev_word2 = 'prev_word2=' + sent[position-2][0].lower()\n fts.append(prev_word2)\n else:\n prev_word2 = 'prev_word2=UNK'\n fts.append(prev_word2)\n\n if position <= (len(sent) - 2):\n if self.training:\n next_word1 = 'next_word1=' + sent[position+1][0].lower()\n fts.append(next_word1)\n elif 'next_word1=' + sent[position+1][0].lower() in self.vocab:\n next_word1 = 'next_word1=' + sent[position+1][0].lower()\n fts.append(next_word1)\n else:\n next_word1 = 'next_word1=UNK'\n fts.append(next_word1)\n if position <= (len(sent) - 3):\n if self.training:\n next_word2 = 'next_word2=' + sent[position+2][0].lower()\n fts.append(next_word2)\n elif 'next_word2=' + sent[position+2][0].lower() in self.vocab:\n next_word2 = 'next_word2=' + sent[position + 2][0].lower()\n fts.append(next_word2)\n else:\n next_word2 = 'next_word2=UNK'\n fts.append(next_word2)\n\n if self.training:\n fts.append(prefix)\n elif prefix in self.vocab:\n fts.append(prefix)\n if self.training:\n fts.append(suffix)\n elif suffix in self.vocab:\n fts.append(suffix)\n\n return fts", "def get_feature_names(self):\n ...", "def surface_labelled_data_preparation_pipeline(word_list: [str]):\n X = []\n\n for word in word_list:\n segments = word.split('-')\n segment_features = []\n for i in range(len(segments)):\n features = {}\n\n segment_length = len(segments[i])\n features['length'] = segment_length\n\n features['segment.lower()'] = segments[i].lower()\n features['pos_in_word'] = i\n\n if segment_length % 2 == 0:\n features['even'] = 1\n else:\n features['odd'] = 1\n\n features['begin'] = segments[i][0]\n features['end'] = segments[i][len(segments[i]) - 1]\n\n try:\n features['prev_segment'] = segments[i - 1]\n except IndexError:\n features['prev_segment'] = ''\n # continue\n\n try:\n features['next_segment'] = segments[i + 1]\n except IndexError:\n features['next_segment'] = ''\n\n if segments[0].isupper():\n features['start_upper'] = 1\n else:\n features['start_lower'] = 1\n\n if segments[0] in 'aeiou':\n features['first_vowel'] = 1\n else:\n features['first_const'] = 1\n\n segment_features.append(features)\n\n X.append(segment_features)\n\n return X", "def ipa_segs(self, word, normalize=True):\n if normalize:\n word = FeatureTable.normalize(word)\n return self._segs(word, include_invalid=False, normalize=normalize)", "def create_feature_space(sentences):\n splits = [s.split() for s in sentences]\n types = set(reduce(lambda x, y: x + y, splits))\n lookup = dict()\n for i, word in enumerate(types):\n lookup[word] = i\n return lookup", "def featurize(self, prev_tag, prev_word, token, next_word):\n features = np.array([])\n # one of our features will be the previous tag\n if None or prev_tag == 'O':\n features = np.append(features, [0])\n else:\n features = np.append(features, [1])\n # another set of features will be our word shape\n # condensed word shape form with the truncation thing\n\n # another feature will be our pos tags\n # use nltk pos tagging\n\n # another feature will be our word embeddings\n features = np.append(features, self.model[token])\n # and the final feature will be bias\n features = np.append(features, [1])\n return features", "def get_feature_names(self, input_features=...):\n ...", "def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec", "def getFeatureNames(self):\n return [\"f100\", \"f103\", \"f104\"]", "def bag_of_features(self, word, normalize=True):\n word_features = self.word_fts(word, normalize)\n features = [v + f for f in self.names for v in ['+', '0', '-']]\n bag = collections.OrderedDict()\n for f in features:\n bag[f] = 0\n vdict = {-1: '-', 0: '0', 1: '+'}\n for w in word_features:\n for (f, v) in w.items():\n bag[vdict[v] + f] += 1\n return numpy.array(list(bag.values()))", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def compute_feature(cls, HL : Headline) -> np.ndarray:\n TokenizerContainer.init()\n tokens, input_ids = cls.get_ids(f'[CLS] {HL.GetSentWithoutEdit()} [SEP]')\n segments = cls.get_segments(tokens)\n masks = cls.get_masks(tokens)\n input_ids.extend(segments)\n input_ids.extend(masks)\n return np.array(input_ids)", "def get_features(self, words):\n word_indices = []\n word_char_indices = []\n for word in words:\n if word in self.w2i:\n word_indices.append(self.w2i[word])\n else:\n word_indices.append(self.w2i[\"_UNK\"])\n\n if self.c_in_dim > 0:\n chars_of_word = [self.c2i[\"<w>\"]]\n for char in word:\n if char in self.c2i:\n chars_of_word.append(self.c2i[char])\n else:\n chars_of_word.append(self.c2i[\"_UNK\"])\n chars_of_word.append(self.c2i[\"</w>\"])\n word_char_indices.append(chars_of_word)\n return word_indices, word_char_indices", "def get_features(sentences: tuple) -> np.ndarray:\n sen_embedding = [_single_sentence(st) for st in sentences]\n sen_embedding = np.array(sen_embedding)\n return sen_embedding", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l", "def compute_features(names):\n Alphabet = ['a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j','k', 'l', 'm', 'n', 'o',\n 'p', 'q', 'r', 's', 't','u', 'v', 'w', 'x', 'y' , 'z']\n \n N = len(names)\n Feature_matrix = np.zeros((N, 260))\n for row in range(0, N):\n firstLast = names[row].split()\n first = firstLast[0] #First Name\n last = firstLast[1] #Last Name\n if(len(first) < 5):\n firstRange = len(first)\n else:\n firstRange = 5\n if(len(last) < 5):\n lastRange = len(last)\n else:\n lastRange = 5\n for index in range(0,firstRange): #iterate though first 5 letters of First name\n offset = 26 * index\n featureIndex = offset + Alphabet.index(first[index])\n Feature_matrix[row,featureIndex] = 1\n index = 4 #advance index in case length was less than 5 \n for Lastindex in range(0,lastRange): #iterate though first 5 letters of Last name\n index += 1\n offset = 26 * index\n featureIndex = offset + Alphabet.index(last[Lastindex])\n Feature_matrix[row,featureIndex] = 1\n return Feature_matrix", "def nmfFeatures\\\n(\n # Data:\n docs,\n trainDocs=None,\n # Parameters:\n min_df=1,\n max_df=1.0,\n max_features=None,\n sublinear_tf=True,\n stop_words=None,\n useTrainDocs=False,\n n_components=100,\n init='nndsvd',\n l1_ratio=0,\n alpha=0.1,\n lowercase=True,\n # Others:\n max_iter=200,\n random_state=1,\n # Misc:\n logger=None, verbose=True,\n):\n if useTrainDocs:\n assert trainDocs is not None\n assert len(trainDocs) > 0\n # if isinstance(trainDocs[0], list):\n # trainDocs = flattenLists(trainDocs)\n assert len(docs) > 0\n # if isinstance(docs[0], list):\n # docs = flattenLists(docs)\n tfidf_vectorizer = TfidfVectorizer\\\n (\n \tlowercase=lowercase,\n min_df=min_df,\n max_df=max_df,\n max_features=max_features,\n stop_words=stop_words,\n sublinear_tf=sublinear_tf,\n tokenizer=None if isinstance(docs[0], str) else lambda x: x,\n preprocessor=None if isinstance(docs[0], str) else lambda x: x,\n )\n if useTrainDocs:\n docs = docs + trainDocs \n tfidf = tfidf_vectorizer.fit_transform(docs)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf = NMF\\\n (\n n_components=n_components,\n random_state=random_state,\n alpha=alpha,\n l1_ratio=l1_ratio,\n init=init,\n max_iter=max_iter,\n ).fit(tfidf)\n vectors = nmf.transform(tfidf)\n if useTrainDocs:\n vectors = list(vectors)\n vectors = vectors[:-len(trainDocs)]\n vectors = np.array(vectors)\n return np.array(vectors)", "def findFeatures(self):\n\t\tpass", "def buildFeatureList():\n with open('./feature_list.txt', 'w')as out:\n res = es.search(index=indexName, doc_type=document,\n body={\n 'query': {\n 'query_string': {\n \"default_field\": \"split\",\n \"query\": \"training\"\n }\n },\n \"size\": indexSize\n })\n ids = [d['_id'] for d in res['hits']['hits']]\n for id in ids:\n text = es.get(index=indexName, doc_type=document, id=id)['_source']['body']\n terms = text.split()\n for term in terms:\n features[term] = term\n count = 0\n for term in features:\n count += 1\n out.write(str(count)+ \" \" + term + '\\n')", "def w2f(sents,i,j,filename,freq):\n w = sents[i][j][0] #current word\n pos = sents[i][j][1] #POS of current word\n f = [ \n 'bias', #non-contextual feature \n 'w=' + w, #current word \n 'w.istitle=%s' % w.istitle(), #first letter - capitalized\n 'pos=' + pos, # POS tag\n 'w.intitle=%s' % contained_in_title(w, filename), # w matches title\n 'w.lowtitle=%s' % lower_in_title(w, filename), # w lower matches title\n 'w.freq=%s' % frequency(w, freq), # freq of w \n 'w.stopword=%s' % stop_word(w), # # stop word\n ]\n \n # previous word features\n if j>0:\n pw = sents[i][j-1][0] #previous word\n ppos = sents[i][j-1][1] #POS of previous word\n f.extend([ \n 'pw=' + pw, # previous word \n 'pw.istitle=%s' % pw.istitle(), #first letter - capitalized\n 'ppos=' + ppos, # POS tag\n 'pw.intitle=%s' % contained_in_title(pw, filename), # w matches title\n 'pw.lowtitle=%s' % lower_in_title(pw,filename), # w lower matches title\n 'pw.freq=%s' % frequency(pw, freq), # freq of w\n 'pw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('BOS') #first word of a sentence\n\n # next word features\n if j<len(sents[i])-1:\n nw = sents[i][j+1][0] #next word\n npos = sents[i][j+1][1] #POS of next word\n f.extend([ \n 'nw=' + nw, # previous word\n 'nw.istitle=%s' % nw.istitle(), #first letter - capitalized\n 'npos=' + npos, #POS tag\n 'nw.intitle=%s' % contained_in_title(nw, filename), # w matches title\n 'nw.lowtitle=%s' % lower_in_title(nw,filename), # w lower matches title\n 'nw.freq=%s' % frequency(nw, freq), # freq of w\n 'nw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('EOS') # last word of a sentence\n\n #if j>1: ...\n #if j<len(sents[i])-2: ...\n #if j>0 and j<len(sents[i])-1: ...\n return f", "def segment_to_vector(self, seg, normalize=True):\n return self.fts(seg, normalize).strings()", "def _extract_features(self, a_rel, a_parses):\n feats = {}\n doc_id = a_rel[DOC_ID]\n toks_pos1 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG1)\n toks_pos2 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG2)\n self._get_product_rules(feats, doc_id, a_rel, a_parses)\n self._get_dep_rules(feats, doc_id, a_rel, a_parses)\n self._get_first_last_toks(feats, toks_pos1, toks_pos2)\n self._get_modality(feats, toks_pos1, toks_pos2)\n self._get_vb_class(feats, toks_pos1, toks_pos2)\n self._get_brown_clusters(feats, toks_pos1, toks_pos2)\n self._get_inquirer(feats, toks_pos1, toks_pos2)\n self._get_MPQA(feats, toks_pos1, toks_pos2)\n return feats", "def get_feature_distributions(cls,gen,folder,chosen_seg):\n\n filename = 'feature_distributions{}.txt'.format(str(gen))\n path = os.path.join(folder,filename)\n try:\n with open(path, mode='r', encoding='utf-8 sig') as f:\n lines = [line.strip() for line in f.readlines()]\n except FileNotFoundError:\n return 'error'\n\n features = collections.defaultdict(dict)\n foundit = False\n for line in lines:\n if line == '' or line == '\\n':\n continue\n line = line.split('(')\n if len(line)>1:\n\n if line[1].rstrip(')') == chosen_seg:\n foundit = True\n feature = line[0].strip()\n feature = feature.strip('\\ufeff')\n else:\n foundit = False\n else:\n line = line[0]\n if line[0].isdigit() and foundit:\n bin_,value = line.split(':')\n bin_ = float(bin_.split('-')[0])\n value = int(value)\n features[feature][bin_] = value\n return features", "def extract_features(EEG_segs, channel_names, Fs, return_feature_names=False, process_num=None):\n\n\n if type(EEG_segs)!=list:\n raise TypeError('EEG segments should be list of numpy.ndarray, with size=(sample_point, channel_num).')\n\n seg_num = len(EEG_segs)\n if seg_num <= 0:\n return []\n\n seg_size = EEG_segs[0].shape[0]\n channel_num = EEG_segs[0].shape[1]\n\n features= Parallel(n_jobs=16,verbose=2)(delayed(compute_features_each_seg)(EEG_segs[segi], seg_size, channel_num, combined_channel_num, band_num, NW, window_length, window_step, Fs, band_freq, total_freq_range, segi, seg_num) for segi in range(seg_num))\n # print(features.shape)\n \n\n\n\n if return_feature_names:\n feature_names = ['mean_gradient_%s'%chn for chn in channel_names]\n feature_names += ['kurtosis_%s'%chn for chn in channel_names]\n feature_names += ['sample_entropy_%s'%chn for chn in channel_names]\n for ffn in ['max','min','mean','std','kurtosis']:#,'skewness'\n for bn in band_names:\n if ffn=='kurtosis' or bn!='sigma': # no need for sigma band\n feature_names += ['%s_bandpower_%s_%s'%(bn,ffn,chn) for chn in combined_channel_names]\n\n power_ratios = ['delta/theta','delta/alpha','theta/alpha']\n for pr in power_ratios:\n feature_names += ['%s_max_%s'%(pr,chn) for chn in combined_channel_names]\n feature_names += ['%s_min_%s'%(pr,chn) for chn in combined_channel_names]\n feature_names += ['%s_mean_%s'%(pr,chn) for chn in combined_channel_names]\n feature_names += ['%s_std_%s'%(pr,chn) for chn in combined_channel_names]\n\n if return_feature_names:\n return np.array(features), feature_names#, pxx_mts, freqs\n else:\n return np.array(features)#, pxx_mts, freqs", "def concept_features_for_chunk(self, sentence, ind):\n\n features = {'dummy':1}\n\n # Word-level features for each word of the chunk\n for w in sentence[ind].split():\n word_features = self.concept_features_for_word(w)\n features.update(word_features)\n\n return features\n\n # Context windows\n for feature in self.enabled_concept_features:\n\n # Feature: Previous word\n if feature == \"previous_word_stem\":\n if ind != 0:\n prev_ind = ind - 1\n prev_chunk = sentence[prev_ind].split()\n prev_word = porter_st.stem( prev_chunk[-1] )\n features[ ('prev_word_stem',prev_word) ] = 1\n else:\n features[ ('prev_word_stem','<START>') ] = 1\n\n # Feature: Previous word\n if feature == \"next_word_stem\":\n if ind != len(sentence)-1:\n next_ind = ind + 1\n next_chunk = sentence[next_ind].split()\n next_word = porter_st.stem( next_chunk[0] )\n features[ ('next_word_stem',next_word) ] = 1\n else:\n features[ ('next_word_stem','<END>') ] = 1\n\n\n return features", "def get_list_features(feature):\n result = np.array([])\n result = np.append(result,feature.mfcc)\n result = np.append(result,feature.d_mfcc)\n result = np.append(result,feature.lpc)\n result = np.append(result,feature.d_lpc)\n result = np.append(result,feature.zc_rate)\n result = np.append(result,feature.d_zc_rate)\n result = np.append(result,feature.spec_centroid)\n result = np.append(result,feature.d_spec_centroid)\n return result", "def _get_nouns(self, review):\n review_features = []\n for sent in review:\n doc = self.nlp(sent)\n # noun_phrase = [np.text for np in doc.noun_chunks]\n nouns = [unicode(lemma(str(word).lower())) for word in doc if word.pos == NOUN]\n review_features.append(nouns)\n return review_features", "def word2features(sent, i):\n features = []\n\n # the [-1,+1] window of words around the token\n for o in [-1,0,1]:\n if i+o >= 0 and i+o < len(sent):\n word_tuple = sent[i+o]\n word_window = get_words_in_window(word_tuple, o)\n features.extend(word_window)\n\n # # part of speech\n # pos = ('pos', sent[i][1])\n # features.append(pos)\n\n # prop = ('prop', is_proper_case(sent[i][0]))\n # features.append(prop)\n\n return dict(features)", "def generate_feature_vector(self, test_document, n):\n m = len(self.bag_of_features)\n feature_vector = np.zeros(m)\n for feature, col in self.bag_of_features.items():\n if feature in test_document.tfs['all'].keys():\n tf = test_document.tfs['all'][feature]\n df = self.df_term[feature]\n tf_idf = calculate_tf_idf(tf=tf, df=df, doc_num=n)\n feature_vector[col] = tf_idf\n\n np.linalg.norm(feature_vector, axis=0)\n test_document.feature_vector = feature_vector\n return feature_vector", "def get_conv_features(self, word):\n cout = self.conv.forward(word)\n cout = cout.reshape(cout.shape[0], self.cout_numel)\n return cout", "def atoms_to_node_features(self, atoms):\n\n node_feature_matrix = np.zeros((len(atoms),2))\n for (i,atom) in enumerate(atoms):\n node_feature_matrix[i] = self.get_atom_features(atom)\n return node_feature_matrix", "def get_features_train(tweets):\n feats = get_feature_array(tweets)\n tfidf = vectorizer.fit_transform(tweets).toarray()\n M = np.concatenate([tfidf,feats],axis=1)\n return M", "def addr2features(address):\n return [Parser.get_current_and_neighbor_features(i, address) for i in range(len(address))]", "def get_text_features() -> np.array:\r\n # Universal sentence encoder model\r\n # Original model by Google could be loaded from: https://tfhub.dev/google/universal-sentence-encoder/4\r\n # In this notebook the model is loaded from a public dataset on Kaggle\r\n # at https://www.kaggle.com/dimitreoliveira/universalsentenceencodermodels\r\n text_model = tf.keras.Sequential(\r\n [KerasLayer(txt_model_path, input_shape=[], dtype=tf.string, # Pretrained model\r\n output_shape=[512], trainable=False),\r\n tf.keras.layers.Layer(512, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Convert all texts to vectors\r\n features = text_model.predict(data['title'],\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Text features extracted. Shape:', features.shape)\r\n\r\n return features", "def features(self, words, tags, config):\n buffer = config['buffer']\n stack = config['stack']\n pred_tree = config['pred_tree']\n\n feat = []\n\n # Single word features\n b1_w = words[buffer[0]] if buffer else \"<empty>\"\n b1_t = tags[buffer[0]] if buffer else \"<empty>\"\n b1_wt = b1_w + \" \" + b1_t\n\n b2_w = words[buffer[1]] if len(buffer) > 1 else \"<empty>\"\n b2_t = tags[buffer[1]] if len(buffer) > 1 else \"<empty>\"\n b2_wt = b2_w + \" \" + b2_t\n\n b3_w = words[buffer[2]] if len(buffer) > 2 else \"<empty>\"\n b3_t = tags[buffer[2]] if len(buffer) > 2 else \"<empty>\"\n b3_wt = b3_w + \" \" + b3_t\n\n s1_w = words[stack[-1]] if stack else \"<empty>\"\n s1_t = tags[stack[-1]] if stack else \"<empty>\"\n s1_wt = s1_w + \" \" + s1_t\n\n s2_w = words[stack[-2]] if len(stack) > 1 else \"<empty>\"\n s2_t = tags[stack[-2]] if len(stack) > 1 else \"<empty>\"\n s2_wt = s2_w + \" \" + s2_t\n\n '''\n for i in pred_tree:\n if stack and pred_tree[stack[-1]] == i:\n feat.append(\"tag\" + str(i) + str(tags[i]))\n '''\n\n # Triple word features\n\n def is_parent(parent, child):\n if child == 0:\n return False\n if parent == child:\n return True\n return is_parent(parent, pred_tree[child])\n\n # Child that is the most on the left\n def lc1(parent):\n for i in range(0, len(words)):\n if is_parent(parent, i):\n return i\n return -1\n \n # Child that is the most on the right\n def rc1(parent):\n for i in range(0, len(words), -1):\n if is_parent(parent, i):\n return i\n return -1\n\n lc1_s1 = lc1(stack[-1]) if stack else -1\n rc1_s1 = rc1(stack[-1]) if stack else -1\n lc1_s2 = lc1(stack[-2]) if len(stack) > 1 else -1\n rc1_s2 = rc1(stack[-2]) if len(stack) > 1 else -1\n\n s2_t_s1_t_b1_t = s2_t + \" \" + s1_t + \" \" + b1_t\n if lc1_s1 >= 0:\n s2_t_s1_t_lc1_s1_t = s2_t + \" \" + s1_t + \" \" + tags[lc1_s1]\n else:\n s2_t_s1_t_lc1_s1_t = \"<empty>\"\n if rc1_s1 >= 0:\n s2_t_s1_t_rc1_s1_t = s2_t + \" \" + s1_t + \" \" + tags[rc1_s1]\n else:\n s2_t_s1_t_rc1_s1_t = \"<empty>\"\n if lc1_s2 >= 0:\n s2_t_s1_t_lc1_s2_t = s2_t + \" \" + s1_t + \" \" + tags[rc1_s2]\n else:\n s2_t_s1_t_lc1_s2_t = \"<empty>\"\n if rc1_s2 >= 0:\n s2_t_s1_t_rc1_s2_t = s2_t + \" \" + s1_t + \" \" + tags[rc1_s2]\n else:\n s2_t_s1_t_rc1_s2_t = \"<empty>\"\n if lc1_s2 >= 0:\n s2_t_s1_w_rc1_s2_t = s2_t + \" \" + s1_w + \" \" + tags[rc1_s2]\n else:\n s2_t_s1_w_rc1_s2_t = \"<empty>\"\n if lc1_s1 >= 0:\n s2_t_s1_w_lc1_s1_t = s2_t + \" \" + s1_w + \" \" + tags[lc1_s1]\n else:\n s2_t_s1_w_lc1_s1_t = \"<empty>\"\n\n feat.append(\"b1_w:\" + b1_w)\n feat.append(\"b1_t:\" + b1_t)\n feat.append(\"b1_wt:\" + b1_wt)\n\n feat.append(\"b2_w:\" + b2_w)\n feat.append(\"b2_t:\" + b2_t)\n feat.append(\"b2_wt:\" + b2_wt)\n\n feat.append(\"b3_w:\" + b3_w)\n feat.append(\"b3_t:\" + b3_t)\n feat.append(\"b3_wt:\" + b3_wt)\n\n feat.append(\"s1_w:\" + s1_w)\n feat.append(\"s1_t:\" + s1_t)\n feat.append(\"s1_wt:\" + s1_wt)\n\n feat.append(\"s2_w:\" + s2_w)\n feat.append(\"s2_t:\" + s2_t)\n feat.append(\"s2_wt:\" + s2_wt)\n\n feat.append(\"s1_wt_s2_wt:\" + s1_wt + \" \" + s2_wt)\n feat.append(\"s1_wt_s2_w:\" + s1_wt + \" \" + s2_w)\n feat.append(\"s1_wt_s2_t:\" + s1_wt + \" \" + s2_t)\n feat.append(\"s1_w_s2_wt:\" + s1_w + \" \" + s2_wt)\n feat.append(\"s1_t_s2_wt:\" + s1_t + \" \" + s2_wt)\n feat.append(\"s1_w_s2_w:\" + s1_w + \" \" + s2_w)\n feat.append(\"s1_t_s2_t:\" + s1_t + \" \" + s2_t)\n feat.append(\"s1_t_b1_t:\" + s1_t + \" \" + b1_t)\n\n feat.append(\"s2_t_s1_t_b1_t:\" + s2_t_s1_t_b1_t)\n feat.append(\"s2_t_s1_t_lc1_s1_t:\" + s2_t_s1_t_lc1_s1_t)\n feat.append(\"s2_t_s1_t_rc1_s1_t:\" + s2_t_s1_t_rc1_s1_t)\n feat.append(\"s2_t_s1_t_lc1_s2_t:\" + s2_t_s1_t_lc1_s2_t)\n feat.append(\"s2_t_s1_t_rc1_s2_t:\" + s2_t_s1_t_rc1_s2_t)\n feat.append(\"s2_t_s1_w_rc1_s2_t:\" + s2_t_s1_w_rc1_s2_t)\n feat.append(\"s2_t_s1_w_lc1_s1_t:\" + s2_t_s1_w_lc1_s1_t)\n\n\n return feat", "def getFeatureNames(self):\n feature_names = super().getFeatureNames()\n feature_names.extend([\"f101\", \"f102\", \"f105\", \"fNum\", \"fCapStart\", \"fCapNoStart\"])\n return feature_names", "def get_fields() :\n st=hp.synfast(cltt+nltt,nside,new=True,verbose=False,pol=True)\n ff0=nmt.NmtField(mask_lss,[st])\n return ff0", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec", "def extract_features_for_file(input_file, output_file, posfile):\n if not unlabeled:\n sents = read_file(input_file)\n else:\n sents = read_file_unlabeled(input_file)\n postags = get_pos_tags(posfile)\n with open(output_file,'w') as output_fileobj:\n if not unlabeled:\n for tokens,goldtags in sents:\n feats = extract_features_for_sentence(tokens, postags)\n for t in range(len(tokens)):\n feats_tabsep = \"\\t\".join(feats[t])\n print>>output_fileobj, \"%s\\t%s\" % (goldtags[t], feats_tabsep)\n print>>output_fileobj, \"\"\n else:\n for tokens in sents:\n feats = extract_features_for_sentence(tokens, postags)\n for t in range(len(tokens)):\n feats_tabsep = \"\\t\".join(feats[t])\n print>>output_fileobj, \"%s\" % (feats_tabsep) #for nolabels dat\n print>>output_fileobj, \"\"", "def extract_feature_vectors(file, dict):\n f = open(file, 'r')\n num_lines = 0\n\n for line in f:\n if(line.strip()):\n num_lines = num_lines + 1\n\n f.close()\n\n feature_matrix = np.zeros([num_lines, len(dict)])\n\n f = open(file, 'r')\n pos = 0\n\n for line in f:\n if(line.strip()):\n flist = extract_words(line)\n for word in flist:\n if(word in dict):\n feature_matrix[pos, dict.index(word)] = 1\n pos = pos + 1\n\n f.close()\n\n return feature_matrix", "def getTfidfFeat(words, dictionary, tfidf_model):\r\n # bow feats\r\n bow = dictionary.doc2bow(words)\r\n # tf-idf feats\r\n tfidf = tfidf_model[bow]\r\n feat = \"\"\r\n if len(tfidf) > 0:\r\n for f in tfidf:\r\n feat += \"%s:%s \" % (f[0], f[1])\r\n feat = feat[:-1]\r\n return feat", "def fvsSyntax(data):\n\n def token_to_pos(text):\n tokens = nltk.word_tokenize(text)\n return [p[1] for p in nltk.pos_tag(tokens)]\n\n texts_pos = [token_to_pos(text) for id, text, author in data]\n pos_list = ['NN', 'NNP', 'DT', 'IN', 'JJ', 'NNS']\n fvs_syntax = np.array([[text.count(pos) for pos in pos_list]\n for text in texts_pos]).astype(np.float64)\n labels_syntax = [author for id, text, author in data]\n\n return fvs_syntax, labels_syntax", "def feature_values(words, word_features):\r\n freq = nltk.FreqDist(words)\r\n values = []\r\n for wf in word_features:\r\n if wf in freq:\r\n values.append(freq[wf])\r\n else:\r\n values.append(0)\r\n return values", "def featurize(self, tokens):\n features = []\n \n nrc_hashtag_emotion_features = self.nrc_hashtag_emotion(tokens)\n nrc_affect_intensity_features = self.nrc_affect_intensity(tokens)\n nrc_hashtag_sentiment_lexicon_unigrams_features = self.nrc_hashtag_sentiment_lexicon_unigrams(tokens)\n nrc_hashtag_sentiment_lexicon_bigrams_features = self.nrc_hashtag_sentiment_lexicon_bigrams(tokens)\n sentiment140_unigrams_features = self.sentiment140_unigrams(tokens)\n sentiment140_bigrams_features = self.sentiment140_bigrams(tokens)\n senti_wordnet_features = self.senti_wordnet(tokens)\n bing_lui_sentiment_lexicons_features = self.bing_lui_sentiment_lexicons(tokens)\n nrc_expanded_lexicon_features = self.nrc_10_expanded(tokens)\n negating_word_list_features = self.negating_words_list(tokens)\n total_number_of_words_features = self.get_total_number_of_words(tokens)\n mpqa_subjectivity_lexicon_features = self.mpqa_subjectivity_lexicon(tokens)\n afinn_sentiment_features = self.afinn_sentiment_scores(tokens)\n # senti_strength_features = self.get_sentistrength(\" \".join(tokens))\n\n features.extend(nrc_hashtag_emotion_features.values()) # 10 features\n features.extend(nrc_affect_intensity_features.values()) # 10 features\n features.extend(nrc_hashtag_sentiment_lexicon_unigrams_features.values()) # 4 features\n features.extend(nrc_hashtag_sentiment_lexicon_bigrams_features.values()) # 4 features\n features.extend(sentiment140_unigrams_features.values()) # 4 features \n features.extend(sentiment140_bigrams_features.values()) # 4 features\n features.extend(senti_wordnet_features.values()) # 4 features\n features.extend(bing_lui_sentiment_lexicons_features.values()) # 2 features\n features.extend(nrc_expanded_lexicon_features.values()) # 10 features\n features.extend(negating_word_list_features.values()) # 1 feature\n features.extend(total_number_of_words_features.values()) # 1 feature\n features.extend(mpqa_subjectivity_lexicon_features.values()) # 2 features\n features.extend(afinn_sentiment_features.values()) # 2 features\n # features.extend(senti_strength_features.values()) # 2 features\n\n return features", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def get_features_test(tweets):\n feats = get_feature_array(tweets)\n tfidf = vectorizer.transform(tweets).toarray()\n M = np.concatenate([tfidf,feats],axis=1)\n return M", "def create_feature_vector(ix, term_dict, bow):\n\n\ttfv = list()\n\t# get corpus length (n. of docs)\n\tnum_docs = ix.num_docs\n\tfor idx, tf in bow:\n\t\t# get term from dict index\n\t\tterm = ix[idx]\n\t\t# filter out terms not contained in self.term_dict\n\t\tif term not in term_dict:\n\t\t\tcontinue\n\t\t# filter out terms w/ length gt 20\n\t\tif len(term) > 20:\n\t\t\tcontinue\n\t\t# filter out non-alphabetical terms\n\t\tif not term.isalpha():\n\t\t\tcontinue\n\t\t# get document frequency \n\t\tdf = ix.dfs[idx]\n\t\t# compute ratio between df and num_docs\n\t\tratio = df / num_docs\n\t\tif ratio > 0.1: # skip term - requires tuning: check if it's okay to keep it as is\n\t\t\tcontinue\n\t\t# append term w/ tf to tfv\n\t\ttfv.append((term, tf))\n\treturn tfv", "def sent_features(tweet):\n twitter_objs = count_twitter_objs(tweet)\n tweet=clean_tweet(tweet) \n sentiment = sentiment_analyzer.polarity_scores(tweet)\n #Get text only\n words = preprocess(tweet) \n syllables = textstat.syllable_count(words)\n num_chars = sum(len(w) for w in words)\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n \n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n \n \\\n retweet = 0\n if \"rt\" in words:\n retweet = 1\n features = [FKRA, FRE,syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],\n twitter_objs[2], twitter_objs[1],\n twitter_objs[0], retweet]\n return features", "def token_features(tokens, feats):\n ###TODO\n \n for token in tokens :\n t = 'token=' + token\n \n if t not in feats.keys() : \n feats.setdefault(t,1)\n else :\n feats[t] += 1", "def getFeatures(self,layer): \n numFeatures = layer.GetFeatureCount()\n features = []\n for i in range(numFeatures):\n feature = layer.GetNextFeature()\n if feature is not None:\n geomRef = feature.GetGeometryRef()\n if((geomRef is not None and geomRef.GetPointCount() != 0)):\n features.append(self.getFeatureInfo(feature))\n return features", "def countize(word, ind, count_words, features):\n word = clean(word)\n word = word.split()\n if len(word)>1:\n for i in range(1,len(word)):\n bigram = (word[i-1],word[i])\n count_words[ind].append(bigram)\n features.append(bigram)\n if len(word)>2:\n for i in range(2,len(word)):\n trigram = (word[i-2],word[i-1], word[i])\n count_words[ind].append(trigram)\n features.append(trigram)\n for i in range(len(word)):\n unigram = word[i]\n count_words[ind].append((unigram))\n features.append((unigram))\n return count_words, features", "def extract_features(docs_train, docs_test, perform_dimensionality_reduction):\n word_ngram_range = (1, 4)\n char_ngram_range = (2, 5)\n\n '''\n Build an n grams vectorizer with word_n_gram_range and char_n_gram_range\n '''\n\n ngrams_vectorizer = create_n_grams_vectorizer(\n word_ngram_range, char_ngram_range)\n\n # use the n_gram vectorizer to form the train and test dataset\n # it will take a lot of time... i think\n X_train = ngrams_vectorizer.fit_transform(docs_train)\n X_test = ngrams_vectorizer.transform(docs_test)\n print(\"Performed fitting of data\")\n\n ############ dimensionality reduction ################\n\n if(perform_dimensionality_reduction == True):\n X_train, X_test = perform_dimensionality_reduction(X_train, X_test)\n\n # print(docs_train[0])\n return X_train, X_test", "def extractFeatures(self, data, tf=False):\n tfidf_training_matrix, tfidf_terms = self.useTfidfVectorizer(data)\n \n if tf:\n tf_vectorizer = CountVectorizer(max_df=0.5, min_df=2, max_features=10000,\n stop_words='english')\n \n tf_training_matrix = tf_vectorizer.fit_transform(data)\n tf_terms = tf_vectorizer.get_feature_names()\n \n return tfidf_training_matrix, tfidf_terms, tf_training_matrix, tf_terms\n \n else:\n return tfidf_training_matrix, tfidf_terms", "def emailFeatures(word_indices, n=1899):\n x = np.zeros(n)\n x[word_indices] = 1\n return x.reshape(1, -1)", "def extract_features_scope(sentence_dicts, mode='training'):\n instances = []\n sentence_splits = []\n for sent in sentence_dicts:\n if not sent['neg']:\n continue\n print(sent)\n graph = make_dir_graph_for_sentence(sent)\n bidir_graph = make_bidir_graph_for_sentence(sent)\n for cue_i, (cue, cue_position, cue_type) in enumerate(sent['cues']):\n seq_length = -1\n for key, value in sent.items():\n features = {}\n if isinstance(key, int):\n features['token'] = value[3]\n features['lemma'] = value[4]\n features['pos'] = value[5]\n features['dir-dep-dist'] = get_shortest_path(graph, sent, cue_position, key)\n features['dep-graph-path'] = get_dep_graph_path(bidir_graph, sent, cue_position, key)\n\n dist = key - cue_position\n nor_index = find_nor_index(sent)\n if cue == \"neither\" and nor_index > -1 and abs(key-nor_index) < abs(dist):\n dist = key - nor_index\n #token is to the left of cue\n if dist < 0:\n if abs(dist) <= 9:\n features['left-cue-dist'] = 'A'\n else:\n features['left-cue-dist'] = 'B'\n features['right-cue-dist'] = 'null'\n #token is to the right of cue\n elif dist > 0:\n if dist <= 15:\n features['right-cue-dist'] = 'A'\n else:\n features['right-cue-dist'] = 'B'\n features['left-cue-dist'] = 'null'\n else:\n features['left-cue-dist'] = '0'\n features['right-cue-dist'] = '0'\n features['cue-type'] = cue_type\n features['cue-pos'] = sent[cue_position][5]\n\n if key == 0:\n features['bw-bigram1'] = 'null'\n features['bw-bigram2'] = 'null'\n else:\n features['bw-bigram1'] = \"%s_*\" %sent[key-1][4]\n features['bw-bigram2'] = \"%s_*\" %sent[key-1][5]\n if not (key+1) in sent:\n features['fw-bigram1'] = 'null'\n features['fw-bigram2'] = 'null'\n else:\n features['fw-bigram1'] = \"*_%s\" %sent[key+1][4]\n features['fw-bigram2'] = \"*_%s\" %sent[key+1][5]\n instances.append(features)\n if key > seq_length:\n seq_length = key\n sentence_splits.append(seq_length)\n if mode == 'training':\n labels = extract_labels_scope(sentence_dicts, mode)\n return sentence_dicts, instances, labels, sentence_splits\n return sentence_dicts, instances, sentence_splits", "def _update_feature_vec(fvec, word, tag_ngram):", "def get_features(words, vectors):\n result = [vectors.loc[word].values for word in words if word in df_keys.values.reshape(-1)]\n if result:\n return np.stack(result)\n return None", "def get_features(docs, max_length):\n docs = list(docs)\n Xs = numpy.zeros((len(docs), max_length), dtype='int32')\n for i, doc in enumerate(docs):\n j = 0\n for token in doc:\n vector_id = token.vocab.vectors.find(key=token.orth)\n if vector_id >= 0:\n Xs[i, j] = vector_id\n else:\n Xs[i, j] = 0\n j += 1\n if j >= max_length:\n break\n return Xs", "def get_word_stats(segments, feats_dict):\r\n word_count_list = []\r\n word_lengths = []\r\n long_count = 0\r\n for segment in segments:\r\n word_count_list.append(len(segment))\r\n for word in segment:\r\n word_lengths.append(len(word))\r\n if len(word) > 6:\r\n long_count += 1\r\n # Compute segment level statistics\r\n feats_dict['wc_mean'] = np.mean(word_count_list) if word_count_list else float('nan')\r\n feats_dict['wc_median'] = np.median(word_count_list) if word_count_list else float('nan')\r\n feats_dict['wc_stdev'] = np.std(word_count_list) if word_count_list else float('nan')\r\n feats_dict['wc_min'] = min(word_count_list) if word_count_list else float('nan')\r\n feats_dict['wc_max'] = max(word_count_list) if word_count_list else float('nan')\r\n feats_dict['total_count'] = sum(word_count_list) if word_count_list else float('nan')\r\n\r\n # Compute fraction of words across whole call that are long (i.e. 6+ words)\r\n feats_dict['lw_count'] = (long_count / feats_dict['total_count']) if feats_dict['total_count'] else float('nan')\r\n # Compute mean length of any word used\r\n feats_dict['word_len'] = np.mean(word_lengths) if word_lengths else float('nan')", "def feat_dict(pos_feat,text):\n dict = {}\n bigrams = ngrams(word_tokenize(text),2)\n trigrams = ngrams(word_tokenize(text),3)\n \n for feat in pos_feat:\n dict[feat]=features(feat,text,bigrams,[],[])\n return dict", "def token2features(sent, i, add_neighs=True):\n \n def add_lexicon_feats(tpl, lookupLexiconDict, usedTags):\n if tpl in lookupLexiconDict:\n for cls in lookupLexiconDict[tpl]:\n if cls not in usedTags:\n ftrs.append(cls) #<--------------------\n usedTags[cls]=1\n else:\n usedTags[cls]+=1\n \n \n ftrs = []\n # bias\n ftrs.append(\"BIAS\")\n # position features\n if i == 0:\n ftrs.append(\"SENT_BEGIN\")\n if i == len(sent)-1:\n ftrs.append(\"SENT_END\")\n\n # the word itself\n word = unicode(sent[i])\n ftrs.append(\"WORD=\" + word)\n word_lcase = word.lower()\n ftrs.append(\"LCASE=\" + word_lcase)\n # some features of the word\n if word.isalnum():\n ftrs.append(\"IS_ALNUM\")\n if word.isnumeric():\n ftrs.append(\"IS_NUMERIC\")\n if word.isdigit():\n ftrs.append(\"IS_DIGIT\")\n if word.isupper():\n ftrs.append(\"IS_UPPER\")\n if word.islower():\n ftrs.append(\"IS_LOWER\")\n\n # USE LEXICONS################################################## !\n maxTries=5\n usedTags = {}\n \n #look front up to 5 places \n if type(sent[0])== str: lSent = map(str.lower, sent)\n else: lSent = map(unicode.lower, sent)\n while(maxTries!=0):\n\n if len(lSent)-i>=maxTries:\n tpl = tuple(lSent[i:maxTries+i])\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n maxTries-=1\n \n #also look backwards: lexicons\n if i>=1:\n tpl = tuple(lSent[i-1:i+1]) # size 2\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n if i<len(lSent) : \n tpl = tuple(lSent[i-1:i+2]) # size 3\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n \n #analyze and add bias towards max used classification \n if usedTags:\n usedTags = list(usedTags.iteritems())\n maxused = max(usedTags, key=operator.itemgetter(1))\n minused = min(usedTags, key=operator.itemgetter(1)) \n if minused[1]!=maxused[1]:\n ftrs.append('BIAS='+maxused[0])\n \n\n #R ************************************************\n if len(word) > 15:\n ftrs.append(\"IS_LENGTHY\")\n if word[0].upper():\n ftrs.append(\"IS_FIRST_UPPER\")\n if word.__contains__(\"http\"):\n ftrs.append(\"IS_HYPERLINK\")\n if any(x.isupper() for x in word):\n ftrs.append(\"IS_MIXEDCASE\")\n if word.isupper():\n ftrs.append(\"ALL_UPPERCASE\")\n if word.__contains__(\"@\"):\n ftrs.append(\"IS_TAG\")\n if word.__contains__(\"#\"):\n ftrs.append(\"IS_HASHTAG\")\n if word in stop_words:\n ftrs.append(\"IS_STOPWORD\")\n if word in ['ing','ly','ed','ious','ies','ive','es','s','ment']:\n ftrs.append(\"CONTAINS_SUFFIX\")\n ftrs.append( nltk.pos_tag([word])[0][1] )\n\n # previous/next word feats\n if add_neighs:\n if i > 0:\n for pf in token2features(sent, i-1, add_neighs = False):\n ftrs.append(\"PREV_\" + pf)\n if i < len(sent)-1:\n for pf in token2features(sent, i+1, add_neighs = False):\n ftrs.append(\"NEXT_\" + pf)\n \n \n \n # return it!\n return ftrs", "def _collect_features(self, save=None):\n makedir(self.modeldir)\n if save is None:\n save = '{:s}/all.fts'.format(self.modeldir)\n \n feats = []\n fls = glob('{:s}/*.fts'.format(self.modeldir))\n for i,fl in enumerate(fls):\n if fl.split(os.sep)[-1].split('.')[0] in ['all','ranked']: continue\n with open(fl) as fp:\n lns = fp.readlines()\n feats += [' '.join(ln.rstrip().split()[1:]) for ln in lns] \n\n labels = list(set(feats))\n freqs = [feats.count(label) for label in labels]\n labels = [label for _,label in sorted(zip(freqs,labels))][::-1]\n freqs = sorted(freqs)[::-1]\n # write out feature frequencies\n with open(save, 'w') as fp:\n _ = [fp.write('{:d},{:s}\\n'.format(freq,ft)) for freq,ft in zip(freqs,labels)]\n return labels, freqs", "def getFeaturesByBBox(self,bboxtuple, srsname):\n raise NotImplementedError", "def compute_nterm_feature_matrix(sequences, split, dinuc=False):\n if 0 < split < 23:\n X = binned_bag_of_words(sequences.str[:split],\n int(split), n=int(split),\n dinuc=dinuc, cterm=False)\n X2 = binned_bag_of_words(sequences.str[split:],\n 1, n=23-int(split),\n dinuc=False, cterm=False)\n X = np.hstack([X2.toarray(), X])\n elif split == 0:\n X = binned_bag_of_words(sequences,\n int(split), n=int(split),\n dinuc=False, cterm=False)\n elif split == 23:\n X = binned_bag_of_words(sequences,\n int(split), n=int(split),\n dinuc=dinuc, cterm=False)\n return X", "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out", "def makeFeatureVec(words, model, num_features):\n\t# Initialize an empty numpy array (for speed) \n\tfeatureVec = np.zeros((num_features,), dtype=\"float32\")\n\t# Initialize a counter (number of words)\n\tnwords = 0.\n\t \n\t# Index2word is a list that contains the names of the words in the model's vocabulary. \n\tindex2word_set = set(model.index2word)\n\t# \n\t# Loop over each word in the review and, if it is in the model's vocaublary, add \n\t# its feature vector to the total \n\tfor word in words:\n\t\tif word in index2word_set:\n\t\t\tnwords = nwords + 1.\n\t\t\tfeatureVec = np.add(featureVec,model[word])\n\t# \n\t# Divide the result by the number of words to get the average \n\tfeatureVec = np.divide(featureVec,nwords)\n\treturn featureVec", "def generate_feats_and_labels(features_without_activity, feature_with_activity):\n num_time_steps = 64\n step = 6\n segments = []\n labels = []\n for i in range(0, len(features_without_activity) - num_time_steps, step):\n xlist = [features_without_activity[cols].values[i: i + num_time_steps] for cols in features_without_activity.columns]\n label = stats.mode(feature_with_activity['activity'][i: i + num_time_steps])[0][0]\n segments.append(xlist)\n labels.append(label)\n shape_of_segment = np.array(segments).shape\n labels = np.asarray(pd.get_dummies(labels), dtype=np.float32)\n num_features = shape_of_segment[1]\n reshaped_segments = np.asarray(segments, dtype=np.float32).reshape(-1, num_time_steps, num_features)\n return num_features, reshaped_segments, labels", "def extract_features(input_feature_map, points=conv43Points):\n arr = []\n for y,x in points:\n arr.append(input_feature_map[:,y,x,:])\n return tf.stack(arr, axis=1, name=\"extracted_features\"), len(points)", "def extractFeatures(self, datum):\n abstract", "def features(self, sentence, tags, index):\n return{\n 'word': sentence[ index ],\n 'prevWord': '' if index == 0 else sentence[ index - 1 ],\n 'nextWord': '' if index == len( sentence ) -1 else sentence[ index + 1 ],\n 'isFirst': index == 0,\n 'isLast': index == len( sentence ) - 1,\n 'isCapitalized': sentence[index][0].upper() == sentence[ index ][ 0],\n 'isAllCaps': sentence[ index ].upper() == sentence[ index ],\n 'isAllLowers': sentence[ index ].lower() == sentence[ index ],\n 'prefix-1': sentence[ index ][ 0 ],\n 'prefix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][:2],\n 'prefix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][:3],\n 'prefix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][:4],\n 'suffix-1': sentence[ index ][ -1 ],\n 'suffix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][-2:],\n 'suffix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][-3:],\n 'suffix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][-4:],\n 'tag-1': '' if index == 0 else tags[ index - 1 ],\n 'tag-2': '' if index < 2 else tags[ index - 2 ]\n }" ]
[ "0.62442225", "0.62187326", "0.60698456", "0.6069478", "0.5983086", "0.5907946", "0.58122337", "0.5752974", "0.56823516", "0.5657387", "0.5654542", "0.5639809", "0.5626534", "0.5601375", "0.55624735", "0.55408305", "0.55369127", "0.55353904", "0.5529067", "0.55137616", "0.5497478", "0.54859024", "0.5479119", "0.54790163", "0.54158705", "0.54146236", "0.54145515", "0.54024404", "0.5394978", "0.5369311", "0.536816", "0.5365094", "0.5361107", "0.5343357", "0.53412837", "0.53259057", "0.5325319", "0.5318788", "0.5308954", "0.5279765", "0.5279312", "0.5265343", "0.5255722", "0.52555025", "0.5253072", "0.5246884", "0.52362996", "0.5229722", "0.5223779", "0.52181697", "0.52026945", "0.51761603", "0.5175382", "0.5172118", "0.5171618", "0.51659995", "0.51359916", "0.512828", "0.5126372", "0.5124352", "0.5123621", "0.5113676", "0.51099586", "0.51091933", "0.5102144", "0.5085819", "0.50791377", "0.50721925", "0.50613225", "0.5050951", "0.5047046", "0.5038375", "0.5031579", "0.5028474", "0.5026401", "0.5024691", "0.50139636", "0.50090694", "0.500723", "0.500478", "0.49937636", "0.4992001", "0.49865007", "0.49787885", "0.49675244", "0.49665457", "0.496646", "0.49647406", "0.49638173", "0.49636546", "0.49600402", "0.49598604", "0.4957826", "0.49573585", "0.49491566", "0.49444103", "0.49433818", "0.4941854", "0.49381924", "0.49364963" ]
0.5945811
5
Return a vector in which each dimension is the number of times a featurevalue pair occurs in the word
def bag_of_features(self, word, normalize=True): word_features = self.word_fts(word, normalize) features = [v + f for f in self.names for v in ['+', '0', '-']] bag = collections.OrderedDict() for f in features: bag[f] = 0 vdict = {-1: '-', 0: '0', 1: '+'} for w in word_features: for (f, v) in w.items(): bag[vdict[v] + f] += 1 return numpy.array(list(bag.values()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def countize(word, ind, count_words, features):\n word = clean(word)\n word = word.split()\n if len(word)>1:\n for i in range(1,len(word)):\n bigram = (word[i-1],word[i])\n count_words[ind].append(bigram)\n features.append(bigram)\n if len(word)>2:\n for i in range(2,len(word)):\n trigram = (word[i-2],word[i-1], word[i])\n count_words[ind].append(trigram)\n features.append(trigram)\n for i in range(len(word)):\n unigram = word[i]\n count_words[ind].append((unigram))\n features.append((unigram))\n return count_words, features", "def get_frequencies(tokens):\n cnt = {}\n\n for word in tokens:\n if word not in cnt:\n cnt[word] = 0\n\n cnt[word] += 1\n\n return cnt", "def count(words):\n\n values = []\n \n # dictionary whose keys are words and values number of occurrences\n D = {}\n\n for word in words:\n # if word is already in dict add 1 to the count\n try : D[word] +=1\n # otherwise add entrye to dict\n except : D[word] = 1\n\n values += [D[word]]\n\n return values", "def feature_values(words, word_features):\r\n freq = nltk.FreqDist(words)\r\n values = []\r\n for wf in word_features:\r\n if wf in freq:\r\n values.append(freq[wf])\r\n else:\r\n values.append(0)\r\n return values", "def count_words(words, dataset):\n result = []\n\n dist = np.sum(dataset, axis=0)\n\n for tag, count in zip(words, dist):\n result.append((count, tag))\n\n return result", "def extractWordFeatures(x):\n # BEGIN_YOUR_CODE (around 5 lines of code expected)\n a = Counter(x.split())\n return dict(a)\n # END_YOUR_CODE", "def count(self, word):\n pass", "def count_each_tag(mat):\n cnts = {}\n for vec in mat:\n if vec[-1] not in cnts:\n cnts[vec[-1]] = 0.0\n cnts[vec[-1]] += 1.0\n return cnts", "def get_occurrences(self, word):\n try:\n self.token2id[word] # is this a token or an id?\n except KeyError:\n word = self.dictionary.id2token[word]\n return self.model.vocab[word].count", "def tf(word,document):\n words = document.split()\n\n return sum(1 for w in words if w == word)", "def fruit_nb(x):\r\n return len([y for y in metamer(x) if Feature(y, 'fruit')])", "def count_words(data):\n return np.array([len(text.split()) for text in data]).reshape(-1, 1)", "def find_freq(word):\n word_list = [1 for line in lines if word in line] # create a list of 1s that reduce() can use\n return reduce(lambda x,y: x+1 if y==1 else x, word_list) # x takes on the type of the elements in the array", "def countFreq(self,document):\n self.document = document\n vocab=['python','js','android','php','django','javascript','oracle','ruby','rails','java']\n cnt_vector = CountVectorizer(vocabulary=vocab)\n self.freq_term_matrix = cnt_vector.fit_transform(self.document)\n return self.freq_term_matrix.toarray()", "def token_features(tokens, feats):\n ###TODO\n \n for token in tokens :\n t = 'token=' + token\n \n if t not in feats.keys() : \n feats.setdefault(t,1)\n else :\n feats[t] += 1", "def find_freq_words(words):\n word_list = [1 for line in f if words in line]\n return reduce(lambda x,y: x+1 if y==1 else x, word_list) # x takes on the type of the elements in the array", "def count_tokens(self, words: Iterable[str]) -> Dict[str, int]:\r\n token_counts = Counter(words)\r\n return {\" \".join(token): count for token, count in token_counts.items()}", "def freq(word, document):\n return document.split(None).count(word)", "def lexicon_features(tokens, feats):\n# feats['neg_words'] = 0;\n# feats['pos_words'] = 0;\n# tokens = list([token.lower() for token in tokens])\n# feats['neg_words'] , feats['pos_words'] = np.count_nonzero(np.in1d(tokens, list(neg_words))), np.count_nonzero(np.in1d(tokens, list(pos_words)))\n neg_count=0\n pos_count=0\n for i in tokens:\n if i.lower() in neg_words:\n neg_count+=1\n if i.lower() in pos_words:\n pos_count+=1\n feats[\"neg_words\"]=neg_count\n feats[\"pos_words\"]=pos_count", "def total_occurrences(word1, word2, flag):\n result = 0\n word1_length = len(word1)\n for i in range(word1_length):\n if word1[i] == flag:\n result += 1\n\n word2_length = len(word2)\n for i in range(word2_length):\n if word2[i] == flag:\n result += 1\n\n return result", "def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq", "def instance_dist(novel, word):\n output = []\n count = 0\n start = False\n text = novel.get_tokenized_text()\n\n for e in text:\n if not start:\n if e == word:\n start = True\n else:\n count += 1\n if e == word:\n output.append(count)\n count = 0\n return output", "def get_count_vector(self, result_vector, \n feature_type, target_label ):\n key = []\n for versus_labels in result_vector:\n tokened_versus_labels = re.findall(\"'(\\w+)'\", versus_labels)\n key.append(tuple(tokened_versus_labels))\n \n for t in self.total_histogram[feature_type]:\n if sorted(key) == sorted(eval(t)):\n key = t\n \n target_label = unicode(target_label)\n \n \n bin_indices = []\n for versus_labels in result_vector:\n bin_indices.append(self.get_bin(result_vector[versus_labels]))\n bin_indices = unicode(tuple(bin_indices))\n \n if target_label in self.total_histogram[feature_type][key]:\n if bin_indices in self.total_histogram[feature_type][key][target_label]:\n return self.total_histogram[feature_type][key][target_label][bin_indices] \n return 0", "def featurize(vector,features):\n dictionary = collections.defaultdict(lambda:0)\n for feature in iter(set(features)):\n dictionary[feature] = [vector[key][feature] if feature in vector[key] else 0 for key in vector] #populates vectors with zeroes where there's no value in an industry for an n-gram.\n return dictionary", "def get_word_frequencies(documents):\n cv_model = CountVectorizer(binary=True)\n tf_matrix = cv_model.fit_transform(documents)\n tf_matrix_transpose = tf_matrix.transpose()\n\n vocabulary = get_vocabulary(documents)\n n_words = len(vocabulary)\n\n word_frequency = {}\n word_frequency_in_documents = {}\n\n for word_idx in range(n_words):\n word = vocabulary[word_idx]\n tf_word = tf_matrix_transpose[word_idx]\n\n # getnnz -> Get the count of explicitly-stored values (nonzeros)\n word_frequency[word] = float(tf_word.getnnz(1))\n # nonzero -> Return the indices of the elements that are non-zero\n word_frequency_in_documents[word] = set(tf_word.nonzero()[1])\n\n return word_frequency, word_frequency_in_documents", "def frequency(self, word):\n if word in self:\n return self[word].tokens\n return 0", "def totalOccurrences(word, words):\n totalCount = 0\n if word in words:\n for item in words[word]:\n totalCount += item.count\n return totalCount", "def how_many_vocals(word):\n\n word= word.lower()\n result1 = word.count('a')\n result2 = word.count('e')\n result3 = word.count('i')\n result4 = word.count('o')\n result5 = word.count('u')\n\n total_result = result1 +result2+result3+result4+result5\n return total_result", "def count_words(word, words):\n same_words_in_message = 0\n for element in words:\n if element == word:\n same_words_in_message += 1\n return same_words_in_message", "def word_count(phrase):\n return collections.Counter(phrase.split())", "def get_corpus_counts(x,y,label):\n corpus_counts = defaultdict(float)\n for pos, curr_label in enumerate(y):\n if curr_label == label:\n for word in x[pos]:\n corpus_counts[word] += x[pos][word]\n return corpus_counts", "def word_stats(word_counts):\n num_unique = len(word_counts)\n counts = word_counts.values()\n return (num_unique,counts)", "def ngram_frequency(word):\r\n\tword = word.lower()\r\n\tword = re.sub(r'[^A-Za-z. ]','',word)\r\n\tngram_statistics = {}\r\n\tngram_categorization_model_keys = []\r\n\tngram_categorization_model_occurances = []\r\n\tres = [0 for _ in range(0,300)]\r\n\tfor ituple in ngram_categorization_model:\r\n\t\tngram_categorization_model_keys.append(ituple[0])\r\n\t\tngram_categorization_model_occurances.append(int(ituple[1]))\r\n\tfor grams in range(2,6):\r\n\t\tfor i in range(len(word)-grams+1):\r\n\t\t\tseq = word[i:i+grams]\r\n\t\t\tif seq not in ngram_statistics.keys():\r\n\t\t\t\tngram_statistics.update({seq:1})\r\n\t\t\telse:\r\n\t\t\t\tngram_occurances = ngram_statistics[seq]\r\n\t\t\t\tngram_statistics.update({seq:ngram_occurances+1})\r\n\tngram_frequency_keys = ngram_statistics.keys()\r\n\tngram_frequency_occurances = list(ngram_statistics.values())\r\n\tfor index, val in enumerate(ngram_categorization_model_keys):\r\n\t\tfor index1, val1 in enumerate(ngram_frequency_keys):\r\n\t\t\tif val == val1:\r\n\t\t\t\tres[index] = ngram_categorization_model_occurances[index]*ngram_frequency_occurances[index1]\r\n\treturn res", "def word_count(text):\n\n # Tokenize text on whitespace / newline\n words = text.strip().split()\n\n # Create a dictionary from the set of tokens, initializing each count to 0\n counts = dict.fromkeys(words, 0)\n\n # Iterate over the text to count occurences of each token\n for word in words:\n counts[word] += 1\n\n # Return the counts\n return counts", "def count(words):\n word_count = {}\n num_words = 0\n unique_words = 0\n for word in words:\n num_words += 1\n if word_count.has_key(word):\n word_count[word] += 1\n else:\n word_count[word] = 1\n unique_words += 1\n word_count[\"total\"] = num_words\n word_count[\"unique\"] = unique_words\n return word_count", "def feature_index(self, feature: Text) -> int:\n count = 0\n for feature_name in self.vectorizer.get_feature_names():\n if(feature == feature_name):\n return count\n count += 1", "def word_frequency(words):\r\n frequency = {}\r\n for w in words:\r\n frequency[w] = frequency.get(w, 0) + 1\r\n return frequency", "def countOccurrences(self, wordsToCheck):\n count = 0\n for token in self.importantTokenList():\n w = token.text\n for wtc in wordsToCheck:\n if wtc == w:\n count = count + 1\n return count", "def count(self, word):\n return self.tokens.count(word)", "def count_terms(self, tokens):\n\n terms = [self.term_match(t) for t in tokens ]\n \n terms = [t for t in terms if t != None]\n\n #print terms\n lf = dict(Counter(terms))\n for k in lf:\n lf[k] /= float(len(tokens))\n #lf[k] = 1 # binarize?\n pass\n return lf", "def word_count(self):\n from collections import Counter\n counts = Counter(self._replace_non_alnum().split())\n return counts", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def get_feature_set_SC(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = 0.0\n obj_score = 0.0\n nrof_subwords = 0\n nrof_objwords = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n sub_score = sub_score + sentimentvalues[word][0]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][1]>0:\n sub_score = sub_score + sentimentvalues[word][1]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][2]>0:\n obj_score = obj_score + sentimentvalues[word][2]\n nrof_objwords = nrof_objwords + 1\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n if nrof_subwords>0:\n additional_freq[\"subjective_words\"] = nrof_subwords*1.0\n if nrof_objwords>0:\n additional_freq[\"objective_words\"] = nrof_objwords*1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END", "def word_count(input_str):\n counts = dict()\n words = input_str.split()\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts", "def convert_word_to_count(counter={}, doc=[]):\n for sentence in doc:\n for word in sentence.split():\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n return counter", "def word_count(text):\n # Use a dictionary to store the words\n words = {}\n\n # Simple way to strip extra whitespace\n text = ' '.join(text.split())\n\n # Now iterate through, splitting on space\n for word in text.split(\" \"):\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n\n return words", "def count_words(item):\n word, occurences = item\n return word, sum(occurences)", "def get_tag_counts(label_matches):\r\n\ttag_counts = {}\r\n\tfor word_and_tag in label_matches.keys():\r\n\t\tcurrent_count = tag_counts.get(word_and_tag[_TAG], 0)\r\n\t\ttag_counts[word_and_tag[_TAG]] = current_count+1\r\n\treturn tag_counts", "def calc_tf(doc):\r\n tf = {}\r\n for term in doc:\r\n if term not in tf:\r\n tf[term] = doc.count(term)\r\n return tf", "def n_containing(word, bloblist):\n return sum(1 for blob in bloblist if word in blob.words)", "def number_of_variables(dataset, name_of_variable):\r\n first_row = dataset[0].keys()\r\n num = 0\r\n for variable in first_row:\r\n if name_of_variable in variable:\r\n num += 1 \r\n return num", "def partition(list_of_tokens):\n freq_count = defaultdict(int)\n for word in list_of_tokens:\n freq_count[word] += 1\n\n return freq_count", "def vectorize_content(self, content):\r\n file_dict = {}\r\n for word in content:\r\n if word in file_dict:\r\n file_dict[word] += 1\r\n else:\r\n file_dict[word] = 1\r\n return file_dict", "def word_count(poem):\n lines = [line for line in poem.split(\"\\n\") if line]\n word_map = {}\n for line in lines:\n for word in line.split(\" \"):\n if word:\n if word in word_map:\n word_map[word] += 1\n else:\n word_map[word] = 1\n return word_map", "def word_frequency(seq):\n\n # Initializes an emtpy hash map from HashMap class\n hash_map = HashMap()\n\n # For each word (not unique) in sequence\n for word in seq:\n\n # if that word is already in hash map\n if word in hash_map:\n\n # Increment value for that word\n hash_map[word] += 1\n\n # if word not yet in hash map\n else:\n\n # set count value for word equal to one\n hash_map[word] = 1\n\n # return filled hash map from sequence, words and words counts\n return hash_map", "def get_text_frequencies(connection, feature, text_id):\n tindex2mtindex = {}\n findex2mfindex = {}\n word_counts = Counter()\n word_feature_pairs = set()\n text_token_count = 0\n unit_proj = {\n '_id': False,\n 'tokens.features.form': True\n }\n if feature != 'form':\n unit_proj['tokens.features.'+feature] = True\n db_cursor = connection.connection[Unit.collection].find(\n {'text': text_id, 'unit_type': 'line'},\n unit_proj\n )\n for unit in db_cursor:\n text_token_count += len(unit['tokens'])\n for token in unit['tokens']:\n cur_features = token['features']\n # use the form index as an identifier for this token's word\n # type\n cur_tindex = cur_features['form'][0]\n if cur_tindex not in tindex2mtindex:\n tindex2mtindex[cur_tindex] = len(tindex2mtindex)\n mtindex = tindex2mtindex[cur_tindex]\n # we want to count word types by matrix indices for faster\n # lookup when we get to the stage of counting up word type\n # occurrences\n word_counts[mtindex] += 1\n for cur_findex in cur_features[feature]:\n if cur_findex not in findex2mfindex:\n findex2mfindex[cur_findex] = len(findex2mfindex)\n mfindex = findex2mfindex[cur_findex]\n # record when a word type is associated with a feature type\n word_feature_pairs.add((mtindex, mfindex))\n csr_rows = []\n csr_cols = []\n for mtindex, mfindex in word_feature_pairs:\n csr_rows.append(mtindex)\n csr_cols.append(mfindex)\n word_feature_matrix = csr_matrix(\n (\n np.ones(len(csr_rows), dtype=np.bool),\n (np.array(csr_rows), np.array(csr_cols))\n ),\n shape=(len(tindex2mtindex), len(findex2mfindex))\n )\n # if matching_words_matrix[i, j] == True, then the word represented by\n # position i shared at least one feature type with the word represented\n # by position j\n matching_words_matrix = word_feature_matrix.dot(\n word_feature_matrix.transpose())\n\n mtindex2tindex = {\n mtindex: tindex for tindex, mtindex in tindex2mtindex.items()}\n freqs = {}\n coo = matching_words_matrix.tocoo()\n for i, j in zip(coo.row, coo.col):\n # since only matching tokens remain, the column indices indicate\n # which tokens match the token represented by row i; we need to\n # count up how many times each word appeared\n cur_token = mtindex2tindex[i]\n if cur_token not in freqs:\n freqs[cur_token] = word_counts[j]\n else:\n freqs[cur_token] += word_counts[j]\n for tok_ind in freqs:\n freqs[tok_ind] = freqs[tok_ind] / text_token_count\n return freqs", "def count_word_instances_in_file(file_name, target_word):\n\n\tcount = 0\n\twords = get_words_in_file(file_name)\n\tfor word in words:\n\t\tif target_word == word:\n\t\t\tcount += 1\n\treturn count", "def word_occurrences(corpus):\n occur_array = []\n\n for e in corpus:\n occur = Counter(e)\n occur_array.append(occur)\n\n return occur_array", "def wordcount(word, word_list):\n\n count = 0\n\n for item in word_list:\n\n if item == word:\n count += 1\n\n return count", "def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)", "def mapWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n return token_map", "def histogram(word):\n d = {}\n for letters in word:\n d[letters] = d.get(letters, 0) + 1\n return d", "def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic", "def count_word(doc):\n count = count = 0\n for w in document.split(\" \"):\n count = count + 1\n return count", "def count_label_feature_frequency(self):\n label_feature_frequency = {}\n for label in self.labels:\n label_feature_frequency[label] = self.count_document_frequency(label)\n return label_feature_frequency", "def words(word):\n new_list = []\n \n for i in word.split():\n new_list.append(i)\n return Counter(new_list)", "def get_freqs(self):\n dictionary = {}\n for word in self.word_list:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n letter_sorted = sorted(dictionary.items(), key=lambda entry: entry[0]) #sorts dictionary into alphabetized tuples\n count_sorted = sorted(letter_sorted, key=lambda seq: seq[1], reverse=True) #sorts alphabetical tuples into count order\n return count_sorted", "def _get_counts(self, X: np.ndarray) -> Dict[int, np.ndarray]:\n return {f: np.bincount(X[:, f].astype(int), minlength=n_cat) for f, n_cat in\n self.categories_per_feature.items()}", "def compute_token_count(token_dict):\n return sum([token_dict[key] for key in token_dict.keys()])", "def _count_word_frequency(self, data):\n _dict = {}\n for _docs in data:\n for _word in _docs:\n if _word in _dict:\n _dict[_word] += 1\n else:\n _dict[_word] = 1\n return _dict", "def __getTermDocumentOccurences(col, matrix):\n term_document_occurances = 0\n rows, cols = matrix.shape\n for n in xrange(0, rows):\n if matrix[n][col] > 0: # Term appears in document\n term_document_occurances += 1\n return term_document_occurances", "def generate_feature_counts(traj, mdp):\n #count each time a state was visited \n counts = Counter({feature:0 for feature in mdp.features})\n for state,action in traj:\n counts[mdp.observe_features(state)] += 1\n \n return [counts[feature] for feature in mdp.features]", "def word_given_tag(word, tag, train_bag): # train_bag=train_tagged_words\n tag_list = [pair for pair in train_bag if pair[1] == tag]\n count_tag = len(tag_list) # total number of times the passed tag occurred in train_bag\n w_given_tag_list = [pair[0] for pair in tag_list if pair[0] == word]\n # now calculate the total number of times the passed word occurred as the passed tag.\n count_w_given_tag = len(w_given_tag_list)\n return count_w_given_tag, count_tag", "def word_count(text, word):\n \n #answer\n word_list = text.split(\" \")\n return (word_list.count(word))\n \n #return (text.count(word)) - deoesn't work", "def count(self, tokens):\n return self._count[tuple(tokens)]", "def count(self, tokens):\n return self.counts[tokens]", "def word_freq(self, word_list):\n hist = {}\n for word in word_list:\n hist[word] = hist.get(word, 0) + 1\n return hist", "def num_of_words(line, context):\n return [('num_of_word', len(line.txt.split()))]", "def word_stats(word_counts):\n num_unique = len(word_counts)\n counts = word_counts.values()\n return (num_unique, counts)", "def compute_vocab_count(sents):\n counter = collections.Counter()\n for sentence in sents:\n counter.update(untag(sentence))\n return counter", "def cat_count(data, column_str, criteria):\r\n ct1 = []\r\n ct2 = []\r\n for i in range(len(find_cats_freq(data, column_str))):\r\n ct1.append(find_cats_freq(data[criteria], column_str)[i])\r\n ct2.append(find_cats_freq(data, column_str)[i])\r\n return np.array(ct1)/np.array(ct2)", "def feature_count(self, trajs: List[Dict[str, list]],\n gamma: float) -> np.ndarray:\n # This was moved to utils:\n return irl_utils.feature_count(self.env, trajs, gamma)", "def count_words_and_dublicates(novel):", "def get_count_words(novel, words):\n dic_word_counts = {}\n for word in words:\n dic_word_counts[word] = novel.get_count_of_word(word)\n return dic_word_counts", "def computeWordMatrix( Docs, Keywords ) :\n\n w2vec_count = CountVectorizer( ngram_range=(1, 4), vocabulary=Keywords )\n X_Count = w2vec_count.fit_transform( Docs )\n\n return X_Count", "def count_words(tokenized_sentences):\r\n \r\n word_counts = {}\r\n \r\n # Loop through each sentence\r\n for sentence in tokenized_sentences: # complete this line\r\n \r\n for token in sentence: # complete this line\r\n\r\n # If the token is not in the dictionary yet, set the count to 1\r\n if token not in word_counts.keys(): # complete this line\r\n word_counts[token] = 1\r\n \r\n # If the token is already in the dictionary, increment the count by 1\r\n else:\r\n word_counts[token] += 1\r\n \r\n return word_counts", "def countWords(text):\r\n\r\n\tlistOfWord = []\r\n\tlistOfFrequency = []\r\n\r\n\tfor word in text:\t\t\t\t\t \t# menghitung frekuensi kata\r\n if word == '':\r\n pass\r\n elif word not in listOfWord:\t\t\t\t\t# menyimpan kata ke dalam list\r\n listOfWord.append(word)\r\n listOfFrequency.append(1)\r\n else:\r\n index = listOfWord.index(word)\r\n listOfFrequency[index] = listOfFrequency[index] + 1 # menambah frekuensi kata yang sudah ada\r\n\r\n\r\n\tlst = [listOfWord, listOfFrequency]\r\n\r\n\treturn lst", "def _getValueCounts(mapping):\n return Counter({k: len(v) for k, v in viewitems(mapping)})", "def make_bag_words(document_tokenized):\n bag_words = dict()\n for token in document_tokenized:\n if token in bag_words.keys():\n bag_words[token] += 1\n else:\n bag_words[token] = 1\n return bag_words", "def _feature_vec(xs, y):\n\tf = _create_feature_vec()\n\n\t# Iterate over rows in x, values of y, and update f.\n\tcount = y.shape[0]\n\tfor idx in range(count):\n\t\tword = xs[idx, :]\n\t\ttag = y[idx]\n\n\t\t# Defense!\n\t\tassert len(word) + 1 == len(f)\n\n\t\t# Iterate over feature values in word, increment the vector\n\t\tfor fidx, fvalue in enumerate(word):\n\t\t\tf[fidx][tag, fvalue] += 1\n\n\t\t# Update ngram matrix at the end of fvec. Must update edge potential\n\t\t# for previous AND next tag.\n\t\tif idx != 0:\n\t\t\tprev_tag = y[idx-1]\n\t\t\tf[-1][prev_tag, tag] += 1\n\t\tif idx != count - 1:\n\t\t\tnext_tag = y[idx+1]\n\t\t\tf[-1][tag, next_tag] += 1\n\n\treturn f", "def _count_vocab(self,raw_documents, fixed_vocab=False):\n if fixed_vocab:\n vocabulary = self.vocabulary_\n else:\n # Add a new value when a new vocabulary item is seen\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n\n analyze = super().build_analyzer()\n \n j_indices = []\n indptr = []\n\n values = array.array(str('f'))\n indptr.append(0)\n for doc in raw_documents:\n #doc = tupla[0]\n feature_counter = {}\n #texttlist = doc.split(sep=\" \")\n for feature in analyze(doc):#texttlist:\n try:\n \n # Ignore out-of-vocabulary items for fixed_vocab=True\n feature_idx = vocabulary[feature]\n #print(feature_idx)\n #fti_feature = calc_fti(feature,raw_documents)\n \n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n #print(feature_counter[feature_idx])\n except KeyError:\n # Ignore out-of-vocabulary items for fixed_vocab=True\n continue\n\n\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n if not fixed_vocab:\n # disable defaultdict behaviour\n vocabulary = dict(vocabulary)\n if not vocabulary:\n raise ValueError(\"empty vocabulary; perhaps the documents only\"\n \" contain stop words\")\n\n if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1\n if _IS_32BIT:\n raise ValueError(('sparse CSR array has {} non-zero '\n 'elements and requires 64 bit indexing, '\n 'which is unsupported with 32 bit Python.')\n .format(indptr[-1]))\n indices_dtype = np.int64\n\n else:\n indices_dtype = np.int32\n \n j_indices = np.asarray(j_indices, dtype=indices_dtype)\n indptr = np.asarray(indptr, dtype=indices_dtype)\n \n #print (vocabulary)\n X = sp.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.float32)\n X.sort_indices() \n \n self.vocabulary_calculated = vocabulary\n\n return vocabulary, X", "def bow(tokens):\n return dict(collections.Counter(re.findall(r'\\w+', \" \".join(tokens))))", "def number_tokens(dgraph):\n cnt = 0\n for node in dgraph.subgraphs(exclude_root=True):\n node[WORD] = (cnt, node[WORD])\n cnt += 1\n return dgraph", "def frequency(lst):\n\n count = dict()\n for word in lst:\n if word in count:\n count[word] += 1\n else:\n count[word] = 1\n return count", "def create_feature_space(sentences):\n splits = [s.split() for s in sentences]\n types = set(reduce(lambda x, y: x + y, splits))\n lookup = dict()\n for i, word in enumerate(types):\n lookup[word] = i\n return lookup", "def make_word_num_map(words):\n\tword_num_map = dict()\n\tfor word in words:\n\t\tword_num_map[word] = word_num_map.get(word, 0) + 1\n\treturn word_num_map", "def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))", "def count_vector(df:pd.DataFrame, column_name:str, y:list=None):\n vectorizer = CountVectorizer()\n # print(vectorizer.get_feature_names())\n ans = vectorizer.fit_transform(raw_documents=df[column_name], y=y)\n return ans", "def CountWords(input1):\n tokens = []\n\n d={}\n\n tokens = tokens+input1.lower().split()\n\n for token in tokens:\n # Remove Punctuation\n word=token.replace(\"!\", \"\")\n word=word.replace(\".\", \"\")\n d[word] = d.get(word,0)+1\n\n word_frq=[]\n for i,v in d.items():\n word_frq.append((v,i))\n word_frq.sort(reverse=True)\n\n for word in word_frq:\n print (word[1],word[0])" ]
[ "0.7089016", "0.66866636", "0.6684821", "0.6607445", "0.6593285", "0.65438855", "0.6478012", "0.6474119", "0.64096224", "0.6368129", "0.6366926", "0.633694", "0.6299358", "0.62755966", "0.62697554", "0.6268828", "0.62654084", "0.6255192", "0.6243507", "0.62417585", "0.624167", "0.6203283", "0.6187209", "0.6181345", "0.6181004", "0.6167314", "0.6165074", "0.6132464", "0.6116753", "0.6103522", "0.6102888", "0.609964", "0.60947776", "0.60932106", "0.60930717", "0.6085253", "0.6079162", "0.6067249", "0.60629857", "0.60507464", "0.6039812", "0.60386693", "0.6032929", "0.6031875", "0.6027091", "0.60199183", "0.60175633", "0.60136205", "0.60105485", "0.59999555", "0.5995825", "0.59857094", "0.59827954", "0.59800357", "0.5966739", "0.5965046", "0.59641266", "0.5958329", "0.59521514", "0.5949205", "0.59463793", "0.5945411", "0.59389293", "0.59373385", "0.5932548", "0.59321374", "0.5923568", "0.5917827", "0.5917643", "0.5917302", "0.590849", "0.5905442", "0.5902904", "0.58977497", "0.58939326", "0.58921766", "0.58815134", "0.58811444", "0.5878625", "0.5878121", "0.58706945", "0.5866637", "0.5865707", "0.58641887", "0.5863559", "0.585448", "0.584816", "0.5846561", "0.5842595", "0.5840284", "0.5829976", "0.58275723", "0.58218086", "0.58214664", "0.5821265", "0.581819", "0.58164155", "0.58077824", "0.58056825", "0.5802422" ]
0.6490124
6
Return True if `segment` is in segment features database
def seg_known(self, segment, normalize=True): if normalize: segment = FeatureTable.normalize(segment) return segment in self.seg_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, feature):\n return feature in self.features", "def isSegmentFile(self, segment):\n return os.path.isfile(\"{wd}/{jn}-run/{seg}.rst7\".format( wd=self.workdir, jn=self.jobname, seg=segment.getNameString()))", "def check_segment_for_agent(self, segment, agent):\n mappings = agent['configurations'].get('interface_mappings', {})\n tunnel_types = agent['configurations'].get('tunnel_types', [])\n LOG.debug(\"Centec mech driver - Checking segment: %(segment)s \"\n \"for mappings: %(mappings)s \"\n \"with tunnel_types: %(tunnel_types)s\",\n {'segment': segment, 'mappings': mappings,\n 'tunnel_types': tunnel_types})\n network_type = segment[api.NETWORK_TYPE]\n if network_type == 'gre':\n return True\n if network_type == 'local':\n return True\n elif network_type in tunnel_types:\n return True\n elif network_type in 'flat':\n return True\n elif network_type in ['vlan']:\n return segment[api.PHYSICAL_NETWORK] in mappings\n else:\n return False", "def has_feature(self, feature):\n features = self.features\n if features is None:\n return False\n \n return feature in features", "def check_segment(self, segment):\n network_type = segment[api.NETWORK_TYPE]\n return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,\n constants.TYPE_VXLAN, constants.TYPE_VLAN]", "def check_segment(self, segment):\n network_type = segment[api.NETWORK_TYPE]\n return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,\n constants.TYPE_VXLAN, constants.TYPE_VLAN]", "def __contains__(self, feature):\n return feature == 'cvarsort' or feature in self.features", "def feature_one(ds, tup):\n # try:\n # if (nx.shortest_path_length(G, frm, to) == 1):\n # o1.write(\"trusted\\n\")\n # else:\n # o1.write(\"unverified\\n\")\n # except:\n # o1.write(\"unverified\\n\")\n\n return tup[0] in ds[tup[1]]", "def counterSeg(self, x, y, X, Y):\n if self.segs == []:\n return False\n st = self.segs[-1].getStartPoint()\n end = self.segs[-1].getEndPoint()\n return st == (X, Y) and end == (x, y)", "def has_keypoints_label(self, label):\n return label in self.schema", "def can_fix_intersection(self, segment):\n\n points = segment.points\n points = [points[1], points[2], points[3], points[2], points[1], points[0]]\n path = create_path(points)\n layer = GSLayer()\n layer.paths.append(path)\n\n if layer.paths[0].insertNodeWithPathTime_(2.5) is None:\n return False\n for segment in layer.paths[0].segments[:-1]:\n # We need to check only curve segments which consist of four points.\n if len(segment.points) == 4:\n s_t = self.triangle_error_of(segment.points, do_round=True)\n if s_t is not None:\n points = points2vectors(segment.points)\n ok = False\n for s, t in self.calculate_s_t_candidates(points, s_t):\n if self.try_update_points(points, s, t) is not None:\n ok = True\n break\n if not ok:\n return False\n return True", "def has_sector(self,s):\n for run in self.byteruns():\n if run.has_sector(s): return True\n return False", "def contains(self, point):\n return point in self.console", "def is_intersection_segment_plane(segment, plane, epsilon=1e-6):\n pt1 = segment[0]\n pt2 = segment[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = - dot_vectors(p_norm, v2) / dot\n if fac > 0. and fac < 1.:\n return True\n return False\n else:\n return False", "def test_feature_in_collection(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n feature = fc1.features[0]\n assert fc1.feature_in_collection(feature)\n\n feature = fc2.features[0]\n assert not fc1.feature_in_collection(feature)", "def check_for_existing_market_segment(segment):\r\n for var in list(globals().keys()):\r\n if isinstance(eval(\"{var}\".format(var=var)), MarketSegment):\r\n if eval(\"{var}.name\".format(var=var)) == segment.name:\r\n return\r\n\r\n # no matching segment found in globals, create it!\r\n var_name = \"{}_ms\".format(segment.name.replace(\" \", \"_\"))\r\n regex = re.compile('[^a-zA-Z0-9_]')\r\n var_name = regex.sub(\"\", var_name)\r\n globals()[var_name] = segment", "def is_point_on_segment(point, segment, tol=0.0):\n a, b = segment\n if not is_point_on_line(point, segment, tol=tol):\n return False\n d_ab = distance_point_point(a, b)\n if d_ab == 0:\n return False\n d_pa = distance_point_point(a, point)\n d_pb = distance_point_point(b, point)\n if d_pa + d_pb <= d_ab + tol:\n return True\n return False", "def pointInSegment(point, segmentPoint1, segmentPoint2):\n\t\tx = point[0]\n\t\ty = point[1]\n\n\t\tif x < segmentPoint1[0] and x < segmentPoint2[0]:\n\t\t\treturn False\n\t\t\n\t\tif x > segmentPoint1[0] and x > segmentPoint2[0]:\n\t\t\treturn False\n\t\t\n\t\tif y < segmentPoint1[1] and y < segmentPoint2[1]:\n\t\t\treturn False\n\t\t\n\t\tif y > segmentPoint1[1] and y > segmentPoint2[1]:\n\t\t\treturn False\n\t\t\n\t\treturn True", "def __contains__(self, fragment):\n return fragment in self._items", "def __contains__(self, ngram):\n return ngram in self.root", "def __contains__(self, record):\n with self.session as session:\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.did == record)\n\n return query.exists()", "def is_spec_segm(*args):\n return _ida_segment.is_spec_segm(*args)", "def __contains__(self, x):\n indexes = self.get_indexes(x)\n return self.sketch[indexes] > 0", "def has_feature_access(self, feature):\n\n return self.has_feature_access_by_id(feature.value[0])", "def return_flag_on_feature(self, feature, pos_tag_list):\n for i in pos_tag_list:\n if i[1] == feature:\n return True\n return False", "def url_is_in_db(url):\n return bool(find_url(url).first())", "def is_in(self, entry):\n return entry in self.__entries", "def __contains__(self, gid: uuid.UUID) -> bool:\n return gid in self._nodes", "def __contains__(self, cvs_path):\n\n return cvs_path in self._entries", "def __contains__(self, essid):\n return essid in self.essids", "def near_segment(point:tuple, edge:tuple)->bool:\n return between(point[0], edge[0][0], edge[1][0]) and between(point[1], edge[0][1], edge[1][1])", "def test_contains_point() -> None:\n point_1 = Point(1, 2)\n point_2 = Point(-2, -4)\n point_3 = Point(3, 3)\n point_4 = Point(0, 0)\n\n line_segment = LineSegment(first=point_1, second=point_2)\n\n assert line_segment.does_contain(point_1)\n assert line_segment.does_contain(point_2)\n assert not line_segment.does_contain(point_3)\n assert line_segment.does_contain(point_4)", "def __contains__(self, doc_label):\n return doc_label in self.docs", "def intersects_segment(\n self, a: Tuple[float, float], b: Tuple[float, float]\n ) -> bool:\n assert len(a) == 2\n assert len(b) == 2\n return bool(lib.cpBBIntersectsSegment(self, a, b))", "def is_visible_segm(*args):\n return _ida_segment.is_visible_segm(*args)", "def __contains__(self, token: Hashable) -> bool:\n return token in self._token_to_idx", "def can_reserve(train_id,segment_id):\n cursor.execute(\"\"\"select freeseat from seats_free where train_id= %s and segment_id= %s\"\"\", [train_id,segment_id]) # query\n available_seats = cursor.fetchone() # fetch all reservations related to that passenger\n print(available_seats)\n if available_seats[0] == 448:\n return False;\n return True;", "def contains(self, symbol):\r\n return symbol in self.s_table", "def __contains__(self, edge):\n return edge in self._edges", "def __contains__(self, essid):\n with SessionContext(self.SessionClass) as session:\n q = session.query(ESSID_DBObject)\n return q.filter(ESSID_DBObject.essid == essid).count() == 1", "def __contains__(self, essid):\n return self.cli.essids.contains(essid)", "def _has_endpoint(self, endpoint):\n return self.endpoints.filter(pk=endpoint.pk).exists()", "def __contains__(self, context):\n return context in self._contexts", "def has_table(self, table):\n return table in self.get_table_list(\".\" in table)", "def has_data(self, fragment, number):\n if fragment in self.mdv:\n if number in self.mdv[fragment]:\n return True\n return False", "def __contains__(self, seq):\n return bool(libhts.faidx_has_seq(self._fai, seq))", "def __contains__(self, key):\n\t\treturn key in self.__dStore", "def is_in(batch, data):\n _id = batch[-1]\n for d in data:\n if d[-1] == _id:\n return True\n return False", "def contains(self, edge):\n return edge in self.edges", "def __contains__(self, f) :\n if self.__disc is infinity :\n return True\n \n (s, l) = f\n\n (a, _, c) = apply_GL_to_form(self.__p1list[l], s)\n if not c % self.__level == 0 :\n return False\n\n return a + c < self.index()", "def segment_segment(s1, s2):\n l1=s1.line()\n l2=s2.line()\n i = line_line(l1, l2)\n if isinstance(i, bool): return False\n k = s1.affine(i)\n return k >= 0 and k <= 1 and i", "def is_segment(pattern):\n return (type(pattern) is list\n and pattern\n and len(pattern[0]) > 2\n and pattern[0][0] == '?'\n and pattern[0][1] == '*'\n and pattern[0][2] in string.ascii_letters\n and ' ' not in pattern[0])", "def contains(self, symbol):\n return symbol in self.table", "def __contains__(self, point):\n raise NotImplementedError(f\"The `in` operator is not supported for {self.__class__.__name__}\")", "def __contains__(self, sentence):\n return sentence in self._sentences", "def intersection(self, segment):\n intersection = self.hyperplane.intersection(segment)\n if intersection is not None and np.linalg.norm(intersection - self.closest_point_to(intersection)) < epsilon:\n return intersection\n\n return None", "def __contains__(self, point: Point[Scalar]) -> bool:\n return point in self._points_set", "def contains(self,other):\n retVal = False\n\n bounds = self.points\n if( isinstance(other,Feature) ):# A feature\n retVal = True\n for p in other.points: # this isn't completely correct - only tests if points lie in poly, not edges.\n p2 = (int(p[0]),int(p[1]))\n retVal = self._pointInsidePolygon(p2,bounds)\n if( not retVal ):\n break\n # a single point\n elif( (isinstance(other,tuple) and len(other)==2) or ( isinstance(other,np.ndarray) and other.shape[0]==2) ):\n retVal = self._pointInsidePolygon(other,bounds)\n\n elif( isinstance(other,tuple) and len(other)==3 ): # A circle\n #assume we are in x,y, r format\n retVal = True\n rr = other[2]*other[2]\n x = other[0]\n y = other[1]\n for p in bounds:\n test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))\n if( test < rr ):\n retVal = False\n break\n\n elif( isinstance(other,tuple) and len(other)==4 and ( isinstance(other[0],float) or isinstance(other[0],int))):\n retVal = ( self.maxX() <= other[0]+other[2] and\n self.minX() >= other[0] and\n self.maxY() <= other[1]+other[3] and\n self.minY() >= other[1] )\n elif(isinstance(other,list) and len(other) >= 4): # an arbitrary polygon\n #everything else ....\n retVal = True\n for p in other:\n test = self._pointInsidePolygon(p,bounds)\n if(not test):\n retVal = False\n break\n else:\n logger.warning(\"SimpleCV did not recognize the input type to features.contains. This method only takes another blob, an (x,y) tuple, or a ndarray type.\")\n return False\n\n return retVal", "def __contains__(self, arg):\r\n\r\n return arg in self.grfx[0]", "def is_isin(value):\n return True", "def dz_is_in(dz_string, substring):\n if substring not in dz_string:\n return 0\n else:\n return 1", "def __contains__(self, point):\n for component, dim in zip(point, self.dimensions):\n if component not in dim:\n return False\n return True", "def contains(self, point):\n raise Exception(\"contains not implemented.\")", "def feature_two(ds, tup):\n # try:\n # if (nx.shortest_path_length(G, frm, to) == 2):\n # o2.write(\"trusted\\n\")\n # else:\n # o2.write(\"unverified\\n\")\n # except:\n # o2.write(\"unverified\\n\")\n\n A_child = ds[tup[0]]\n C_child = ds[tup[1]]\n return ((len(A_child.intersection(C_child)) > 0) | (tup[0] in ds[tup[1]]))", "def _isFIdx(self, featureName):\n return 1 if (featureName in self.featureNames) else 0", "def __contains__(self, contact):\n if contact.getId() in self._node_dict.keys():\n return True\n else:\n return False", "def _has_sparse_features(\n feature_container: Iterable[schema_pb2.Feature]\n ) -> bool:\n for f in feature_container:\n if isinstance(f, schema_pb2.SparseFeature):\n return True\n if f.type == schema_pb2.STRUCT:\n if f.struct_domain.sparse_feature:\n return True\n return _has_sparse_features(f.struct_domain.feature)\n return False", "def check_segment(self, segment, host):\n\n # TODO(ijw): naive - doesn't check host, or configured\n # physnets on the host. Should work out if the binding\n # can't be achieved before accepting it\n\n network_type = segment[api.NETWORK_TYPE]\n if network_type not in self.allowed_network_types:\n LOG.debug(\n 'Network %(network_id)s is %(network_type)s, '\n 'but this driver only supports types '\n '%(allowed_network_types)s. '\n 'The type must be supported if binding is to succeed.',\n {'network_id': segment['id'],\n 'network_type': network_type,\n 'allowed_network_types':\n ', '.join(self.allowed_network_types)}\n )\n return False\n\n if network_type in [plugin_constants.TYPE_FLAT,\n plugin_constants.TYPE_VLAN]:\n physnet = segment[api.PHYSICAL_NETWORK]\n if not self.physnet_known(host, physnet):\n LOG.debug(\n 'Network %(network_id)s is on physical '\n 'network %(physnet)s, but the physical network '\n 'is not one the host %(host)s has attached.',\n {'network_id': segment['id'],\n 'physnet': physnet,\n 'host': host}\n )\n return False\n\n return True", "def __eq__(self, other: Segment) -> bool:\n return any(\n (\n self.start == other.start and self.end == other.end,\n self.start == other.end and self.end == other.start,\n )\n )", "def __contains__(self, idx):\n return idx in self._data", "def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False", "def is_geo_sample(sample):\n biosample_node = ET.fromstring(sample)\n ids = biosample_node.find('Ids')\n if ids is not None:\n for id in ids:\n\n db = id.get('db')\n if db == 'GEO':\n return True\n return False", "def exists(cls, ko):\n if isinstance(ko, BagDocument):\n return ko._key in cls._dbag\n else:\n return ko in cls._dbag", "def has_descriptor(self, uuid):\n # Try to select the descriptor\n # TODO: Probably a better way of doing this that's more efficient.\n return bool(\n self.solr.select(\"%s:%s AND %s:%s\"\n % (self.index_uuid_field, self.index_uuid,\n self.d_uid_field, uuid)).numFound\n )", "def has_entry(self, gate):\n key = Key(name=gate.name, num_qubits=gate.num_qubits)\n\n return key in self._key_to_node_index", "def contains(self, point):\n try:\n p = vector(point)\n except TypeError: # point not iterable or no common ring for elements\n if len(point)>0:\n return False\n else:\n p = vector(self.field(), [])\n\n if len(p)!=self.ambient_dim():\n return False\n \n for H in self.Hrep_generator():\n if not H.contains(p):\n return False\n return True", "def checkFeatureInSet(self, featureSet, currFeature, idxValue):\n\n found = False\n currFeatureID = currFeature[idxValue]\n\n for feature in sorted(featureSet, key=lambda f: f[idxValue]):\n attr = feature.attributes()\n currValue = attr[idxValue]\n\n if currFeatureID == currValue:\n found = True\n return found\n\n return found", "def contains(self, vertex):\n return vertex in self._graph", "def __contains__(self, column):\n for query in self.__queries:\n if column in query:\n return True\n return False", "def has_contig(variant: str) -> bool:\n fields: List[str] = variant.split(\"|\")\n for field in fields:\n field_name_value: List[str] = field.split(\":\")\n field_name: str = field_name_value[0]\n if field_name == \"CTG\":\n field_value: str = field_name_value[1]\n if field_value != \".\":\n return True\n return False", "def liesOnSegment(start, end, point, tolerance=fromMm(0.01)):\n segment = LineString([start, end])\n point = Point(point)\n candidatePoint, _ = nearest_points(segment, point)\n return candidatePoint.distance(point) < tolerance", "def isselected(values, feature, parent):\r\n layername=values[0]\r\n fid = feature.id()\r\n layers = QgsMapLayerRegistry.instance().mapLayers()\r\n try:\r\n layer = layers[layername]\r\n except KeyError:\r\n try:\r\n layer = [l for l in layers.iteritems() if l[1].name() == layername][0][1]\r\n except IndexError:\r\n parent.setEvalErrorString( u'No layer with id or name {} found'.format( layername ) )\r\n return False\r\n\r\n return fid in layer.selectedFeaturesIds()", "def classify(self, tokenized_record):\n\n return bool(set(tokenized_record).intersection(self.bo_markers))", "def _is_in_doc(t: int, d: List[List[str]]) -> bool:\n t = str(t)\n for s in d:\n if t in s:\n return True\n return False", "def contain(self, structure, sentence) -> Bool:\n raise NotImplementedError()", "def contain(self, structure, sentence) -> Bool:\n raise NotImplementedError()", "def has_label(self, label):\n return label in self.get_labels()", "def __contains__(self, point):\n if not isinstance(point, np.ndarray):\n point = np.array(point)\n return any(point in u for u in self.list_poly)", "def has_token_in(status, token):\n if not hasattr(res, status):\n return False\n return token in getattr(res, status)", "def _is_in_datasource(self, data_source, obj):\n q = self.sql_query(\n \"\"\" select @rid from (select expand(in(Owns)) from {obj_rid}) \\\n where @class = 'DataSource' and @rid = {rid}\"\"\".format(\n obj_rid = obj._id, ds_rid = data_source._id))\n return len(q) > 0", "def __contains__(self, point: Point2D) -> bool:\n raise NotImplementedError", "def is_in_hotspot(self):\r\n in_hotspot = False\r\n hotspots = parser.parse_hotspot_bed()\r\n \r\n if hotspots.get(self.chrom): \r\n chrom_hotspots = hotspots[self.chrom]\r\n \r\n for interval in chrom_hotspots: \r\n if interval[0] <= self.pos <= interval[1]:\r\n in_hotspot = True\r\n break\r\n \r\n return in_hotspot", "def has_feature_access_by_id(self, feature_id):\n\n return self.organizinghubfeatureaccess_set.filter(\n feature=feature_id\n ).first() is not None", "def match(self, segment):\n\n match_len = self._win_length\n\n subsegments = [[segment[j+i] for i in range(match_len)] for j in range(len(segment)-match_len+1)]\n\n for subsegment in subsegments:\n if set(subsegment).pop() == self.current_player and len(list(set(subsegment))) == 1:\n return True\n\n return False", "def has(self, id_):\n with self._db_connection() as connection:\n return connection.contains_dataset(id_)", "def exists(cls, token):\n return cls.objects.filter(token=token).exists()", "def _include_feature(self, name):\n return (self._feature_names is None or name in self._feature_names or\n name.startswith(self._neighbor_config.prefix))", "def __contains__(self, pos):\n if pos in self._coordinates:\n return True\n return False", "def exists(self):\n return len(list(self.measures)) > 0", "def __contains__(self, fragment_idx):\n if isinstance(fragment_idx, Fragment):\n return self.fragment_list.__contains__(fragment_idx)\n elif isinstance(fragment_idx, str):\n return self.fragment_dict.__contains__(fragment_idx)\n raise TypeError, fragment_idx" ]
[ "0.6365108", "0.59316486", "0.57967645", "0.5662152", "0.55408937", "0.55408937", "0.5423819", "0.54050773", "0.53360146", "0.5335236", "0.53037214", "0.529815", "0.529553", "0.5255767", "0.52465856", "0.524252", "0.5228098", "0.522067", "0.5215083", "0.5206038", "0.52032894", "0.51871634", "0.51625603", "0.5140333", "0.5125622", "0.5117634", "0.5117141", "0.5112907", "0.5098487", "0.5098251", "0.5096528", "0.5092566", "0.5085396", "0.5068935", "0.5056241", "0.50477225", "0.5044877", "0.5039897", "0.5030727", "0.50269604", "0.5006369", "0.500016", "0.49991727", "0.4995444", "0.49930164", "0.4976609", "0.49763677", "0.49703828", "0.496959", "0.49660012", "0.4961074", "0.49587294", "0.49549785", "0.49451023", "0.4938466", "0.49364427", "0.49346232", "0.4932887", "0.4925568", "0.49134433", "0.4906242", "0.4896301", "0.48954144", "0.48875085", "0.4884035", "0.48802015", "0.48784888", "0.4877994", "0.48762804", "0.4875941", "0.48746637", "0.48729324", "0.4871115", "0.4870978", "0.48705024", "0.48628083", "0.48603165", "0.48400742", "0.4825685", "0.4818423", "0.4817221", "0.48162884", "0.4815551", "0.48141104", "0.48121664", "0.48121664", "0.48115817", "0.48089093", "0.4801791", "0.47943816", "0.47941533", "0.47915268", "0.4787805", "0.47839057", "0.478363", "0.47828382", "0.47793853", "0.47733143", "0.4768479", "0.476767" ]
0.75535554
0
Return a list of segments (as strings) from a word Characters that are not valid segments are included in the list as individual characters.
def segs_safe(self, word, normalize=True): if normalize: word = FeatureTable.normalize(word) return self._segs(word, include_invalid=True, normalize=normalize)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def segment(text: str) -> List[str]:\n\n if not text or not isinstance(text, str):\n return []\n\n return _cut_subword(_cut_etcc.word_tokenize(text))", "def slice(self, word):\n # Short words aren't hyphenated.\n if len(word) <= 4:\n return [word]\n # If the word is an exception, get the stored points.\n if word.lower() in self.exceptions:\n points = self.exceptions[word.lower()]\n else:\n work = '.' + word.lower() + '.'\n points = [0] * (len(work) + 1)\n for i in range(len(work)):\n t = self.tree\n for c in work[i:]:\n if c in t:\n t = t[c]\n if None in t:\n p = t[None]\n for j in range(len(p)):\n points[i + j] = max(points[i + j], p[j])\n else:\n break\n # No hyphens in the first two chars or the last two.\n points[1] = points[2] = points[-2] = points[-3] = 0\n\n # Examine the points to build the pieces list.\n pieces = ['']\n for c, p in zip(word, points[2:]):\n pieces[-1] += c\n if p % 2:\n pieces.append('')\n return pieces", "def _process(self, word: str) -> List[str]:\n # if a blank arrives from splitting, just return an empty list\n if len(word.strip()) == 0:\n return []\n word = self.convert_consonantal_i(word)\n my_word = \" \" + word + \" \"\n letters = list(my_word)\n positions = []\n for dipth in self.diphthongs:\n if dipth in my_word:\n dipth_matcher = re.compile(\"{}\".format(dipth))\n matches = dipth_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n matches = self.kw_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n letters = string_utils.merge_next(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions.clear()\n if not self._contains_vowels(\"\".join(letters)):\n return [\n \"\".join(letters).strip()\n ] # occurs when only 'qu' appears by ellision\n positions = self._starting_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_right(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._starting_consonants_only(letters)\n positions = self._ending_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_left(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._ending_consonants_only(letters)\n positions = self._find_solo_consonant(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_solo_consonant(letters)\n positions = self._find_consonant_cluster(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_consonant_cluster(letters)\n return letters", "def segment_spanish(input_text):\n processed_document = nlp(input_text)\n\n tokens = drop_punctuation_and_numbers([word for word in processed_document])\n\n unique_tokens = set(tokens)\n return list(unique_tokens)", "def subword_tokenize(self, word: str) -> List[str]:\r\n end_idx = min([len(word), self.ngram_max])\r\n sw_tokens = [self.SOW]\r\n start_idx = 0\r\n\r\n while start_idx < len(word):\r\n subword = word[start_idx:end_idx]\r\n if subword in self.bpe_vocab:\r\n sw_tokens.append(subword)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n elif len(subword) == 1:\r\n sw_tokens.append(self.UNK)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n else:\r\n end_idx -= 1\r\n\r\n sw_tokens.append(self.EOW)\r\n return sw_tokens", "def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents", "def make_word_list(start, lines, excluded):\r\n words = []\r\n for line in lines:\r\n word = line.rstrip()\r\n if len(word) == len(start):\r\n if (word == start) or (word not in excluded):\r\n words.append(word)\r\n return words", "def _possible_words(self):\n new_words = []\n for word in self._words:\n if word not in (self._used_words + tuple(self._tried_words)):\n for i in range(len(self._start)):\n if word[:i] + word[i+1:] == self._start[:i] + self._start[i+1:]:\n new_words.append(word)\n new_words.sort()\n return new_words", "def get_words(f: str, letters: List[str]) -> List[str]:\r\n forbidden_letters = [i for i in string.ascii_lowercase]\r\n for i in letters:\r\n try:\r\n forbidden_letters.remove(i)\r\n except:\r\n pass\r\n words_file = open(f)\r\n word_list = []\r\n letstr = \"\"\r\n for i in letters:\r\n letstr += i\r\n for word in words_file:\r\n word = word[:-1].lower()\r\n if len(word) >= 4:\r\n count = 0\r\n for let in word:\r\n if let in forbidden_letters:\r\n count += 1\r\n if word.count(let) > letstr.count(let):\r\n count += 1\r\n if letters[4] not in word:\r\n count += 1\r\n if count == 0:\r\n word_list.append(word)\r\n return word_list", "def segment(text, WORDS) -> List[Word]:\n Pword = Bag(WORDS)\n if not text: \n return []\n else:\n candidates = ([first] + segment(rest, WORDS)\n for (first, rest) in splits(text, 1))\n return max(candidates, key=lambda x: Pwords(x, Pword))", "def to_char_arrays(text, w):\n words = text.split()\n array_list = []\n if words:\n char_arr = words[0] # assign first word\n else:\n char_arr = ''\n for word in words[1:]: # for remaining words\n temp = ' ' + word\n if len(char_arr + temp) <= w: # if second word fits\n char_arr += temp\n else: # add to new array\n array_list.append(char_arr)\n char_arr = word\n\n array_list.append(char_arr)\n return array_list", "def try_split(text, chars=(u'—', '-')):\n for c in chars:\n segments = text.split(c)\n if len(segments) > 1:\n return [s.strip() for s in segments]", "def extract_characters(word):\n char_bbs = []\n column = 0\n char_start = -1\n while column < word.shape[1]:\n while not word[:, column].any():\n if char_start != -1:\n char_bbs.append(np.s_[:, char_start:column])\n char_start = -1\n column += 1\n if char_start == -1:\n char_start = column\n column += 1\n if char_start != -1:\n char_bbs.append(np.s_[:, char_start:column])\n return char_bbs", "def segment(text: str, model: str = \"attacut-sc\") -> List[str]:\n if not text or not isinstance(text, str):\n return []\n\n _tokenizer = AttacutTokenizer(model)\n\n return _tokenizer.tokenize(text)", "def get_word_pos_list(self, raw_text):\n raw_text = raw_text.strip()\n word_list = []\n pos_list = []\n # pdb.set_trace()\n seg_list = jieba.posseg.cut(raw_text,HMM=False) # 默认是精确模式\n for word, flag in seg_list:\n # remove the punctuation, we will keep punctuation as prosodic boundary\n if word in ['「', '」', '.', '-' , '', ' ', '。' , '—' , '?', ':', '、', '…',';',',',',','!']:\n continue\n word_list.append(word)\n pos_list.append(flag)\n return word_list, pos_list", "def filter_string(self, word, normalize=True):\n return ''.join(self.ipa_segs(word, normalize))", "def create_easy_list(self):\n wrong_characters = set(\"-\")\n self.word_list = []\n try:\n f = open(self.index, 'r')\n for line in f:\n if line[0] == 'E' and line[1] == \" \" and line[2] != \" \":\n readout = line[2:].upper()\n has_digit = re.search('\\d', readout)\n # this can be added to if there are more characters that cannot be\n #used in the game\n has_wrong = re.search(\"[-,.' '/!?]\", readout)\n if has_digit is None:\n if has_wrong is None:\n self.word_list.append(readout.strip('\\n'))\n return self.word_list\n except IOError:\n print(\"Cannot open file\")\n raise (IOError)", "def checkWords(line):\n\n words = []\n parts = re.sub('[^a-zA-Z0-9@ ]', '', line)\n parts = parts.lower()\n parts = parts.split(' ')\n for w in parts:\n if w is not '' and len(w) > 4 and len(w) < 15 and w not in commonWords:\n # if w is not '':\n words.append(w)\n\n return words", "def _setup(self, word) -> List[str]:\n if len(word) == 1:\n return [word]\n for prefix in self.constants.PREFIXES:\n if word.startswith(prefix):\n (first, rest) = string_utils.split_on(word, prefix)\n if self._contains_vowels(rest):\n return string_utils.remove_blank_spaces(\n self._process(first) + self._process(rest)\n )\n # a word like pror can happen from ellision\n return string_utils.remove_blank_spaces(self._process(word))\n if word in self.constants.UI_EXCEPTIONS.keys():\n return self.constants.UI_EXCEPTIONS[word]\n return string_utils.remove_blank_spaces(self._process(word))", "def splitWordList(self, text):\n result = list()\n if text is None:\n return result\n\n t = text + \"⁋\"\n t = t.replace('\\n', '⁋')\n t = re.sub(WordListProcessor.REFERENCE_PATTERN, \"\", t)\n t = re.sub(WordListProcessor.SUPERSCRIPT_PATTERN, \"\", t) # TODO: Extract sense!\n t = re.sub(WordListProcessor.HTML_REMOVER, \"\", t)\n t = t.replace(\"&quot\", \"\\\"\")\n t = t.replace(\",\", \"⁋,\")\n t = t.replace(\";\", \"⁋\")\n # print(t)\n # t = re.sub(WordListProcessor.BRACKETED_DELIMITER, \"$1$2$3$4$5$6\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER1, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER2, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER3, \"$1$2\", t)\n t = self.escapeDelimiters(t)\n # print(t)\n t = t.replace(\"⁋;\", \"⁋\")\n t = t.replace(\"⁋,\", \"⁋\")\n t = t.replace(\"]] or [[\", \"]]⁋[[\")\n t = t.replace(\"]] and [[\", \"]]⁋[[\")\n t = t.replace(\" - \", \"⁋\")\n # t = t.replace(\" / \", \"⁋\")\n j = t.find(\" / \") # Use ' / ' only as a delimiter if there are at least two of them!\n if j >= 0:\n j = t.find(\" / \", j)\n if j >= 0:\n t = t.replace(\" / \", \"⁋\")\n # print(t)\n\n # print(t)\n while True:\n delim = t.find('⁋')\n if delim >= 0:\n word = t[0:delim]\n if word:\n # Normalize the word.\n word = word.strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n word = self.deWikify(word).strip()\n word = self.removeBrackets(word).strip()\n word = self.removeTemplates(word).strip()\n word = self.removeComments(word).strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n if word.endswith(\".\"):\n word = word[:-1].strip()\n if word.endswith(\",\"):\n word = word[:-1].strip()\n\n # Check for slashes.\n word = word.replace(\" / \", \"/\")\n word = word.replace(\"/ \", \"/\")\n i = word.find('/')\n if word:\n if i >= 0 and word.find(' ') < 0:\n while True:\n result.append(word[0:i])\n word = word[i + 1:]\n i = word.find('/')\n if i < 0:\n break\n result.append(word)\n else:\n result.append(word)\n\n t = t[delim + 1:]\n\n else:\n break\n\n return result", "def split_by_words(term):\n if not term:\n return []\n # make all chars in lower case\n term = term.lower()\n # main rules\n splitted_by_size = re.findall(re_words, term) or [term]\n # separators\n splitted_by_seps = [re.split(r'[_ @,.\\-()/№\\\"]', word) for word in splitted_by_size]\n # convert to simple array\n flat_list = [word for wordlist in splitted_by_seps for word in wordlist]\n # transliteration\n translitted = []\n for word in flat_list:\n try:\n translitted += custom_transliterate(word)\n translitted.append(word)\n translitted.append(translit(word, reversed=True))\n except Exception as e:\n logging.debug(\"Translit error: %s - %s\", str(e), word)\n # unique\n unique_list = list(set(translitted))\n return unique_list", "def gen_all_strings(word):\n if not word:\n return [\"\"]\n \n all_strings = []\n for string in gen_all_strings(word[1:]):\n for letter_idx in range(len(string) + 1):\n all_strings.append(string[letter_idx:] + word[0] + string[:letter_idx])\n \n return gen_all_strings(word[1:]) + all_strings", "def segment_words(self, string):\n words = []\n\n word_begin = 0\n while word_begin < len(string):\n word_options = self.find_prefixes(string[word_begin:])\n if len(word_options) > 0:\n best_word = self.unigram_provider.get_most_frequent_word(word_options)\n else:\n best_word = string[word_begin:word_begin+1]\n words.append(best_word)\n word_begin += len(best_word)\n\n return words", "def create_hard_list(self):\n word_list = []\n try:\n f = open(self.index, 'r')\n for line in f:\n if line[0] == 'H' and line[1] == \" \" and line[2] != \" \":\n readout = line[2:].upper()\n has_digit = re.search('\\d', readout)\n # this can be added to if there are more characters that cannot be\n # used in the game\n has_wrong = re.search(\"[-,.' '/!?]\", readout)\n if has_digit is None:\n if has_wrong is None:\n word_list.append(readout.strip('\\n'))\n return word_list\n except IOError:\n print(\"Cannot open file\")\n raise (IOError)", "def phoc_levels(word: str, levels=DEFAULT_PHOC_LEVELS):\n if levels <= 1:\n return [word]\n # cut_len = float(len(word)) / float(levels)\n # # length of cuts\n # regions = []\n # c_start = 0\n # list of strings\n substrings = []\n regions = [occ(i, levels) for i in range(levels)]\n # checking for overlapping characters for each individual region\n for reg in regions:\n sub_str = ''\n for idx, char in enumerate(word):\n char_occ = occ(idx, len(word))\n if is_occ(char_occ, reg):\n sub_str += char\n substrings.append(deepcopy(sub_str))\n return phoc_levels(word, levels-1) + substrings", "def getWordList(text):\n\ttmpwordlist = string.split(text)\n\twordlist = []\n\tfor i in range(len(tmpwordlist)):\n\t\tword = puncTrim(tmpwordlist[i])\n\t\tif len(word) > 0:\n\t\t\twordlist.append(word)\n\treturn wordlist", "def gen_all_strings(word):\r\n if len(word) == 0:\r\n return ['']\r\n else:\r\n first = word[0]\r\n rest = gen_all_strings(word[1:])\r\n new = []\r\n for item in rest:\r\n if len(item) > 0:\r\n for pos in range(len(item)):\r\n new.append(item[:pos] + first + item[pos:])\r\n new.append(item + first)\r\n new.append(first)\r\n new.extend(rest)\r\n return new", "def get_word_list(file_name):\n file_ = open(file_name, 'r')\n lines = file_.readlines()\n\n start_line = 0\n while lines[start_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n start_line += 1\n\n lines = lines[start_line+1:]\n\n end_line = 0\n while lines[end_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n end_line += 1\n\n lines = lines[:end_line-3]\n\n list_ = ' '.join(lines)\n list_ = str.lower(list_)\n list_ = list_.translate(None, string.punctuation)\n list_ = list_.split()\n\n return list_", "def make_bag(txt, stopw):\n bow = re.split('\\s',txt.lower())\n new_bow=[]\n for word in bow:\n if word not in stopw and len(word)>0 and not re.search('\\d',word):\n new_bow.append(word)\n return(new_bow)", "def subwords(txt, sub):\n txt = txt.lower()\n txt = txt.replace('’', '\\'')\n sub = sub.lower().replace(' ', '')\n it = 0\n indices = []\n for c in sub:\n try:\n while txt[it] != c:\n it += 1\n indices.append(it)\n except (IndexError):\n print('Cannot find secret in text.')\n return []\n return indices", "def sentence_segment(self, doc, candidate_pos, lower):\n sentences = []\n for sent in doc.sents:\n selected_words = []\n for token in sent:\n # Store words only with cadidate POS tag\n if token.pos_ in candidate_pos and token.is_stop is False:\n if lower is True:\n selected_words.append(token.text.lower())\n else:\n selected_words.append(token.text)\n sentences.append(selected_words)\n return sentences", "def word_to_chars(self, word):\n chars = list()\n if word == self.eos or word == self.sos:\n chars.append(self.char_to_id[word])\n else:\n word = \"^\" + word + \"$\"\n for ch in word:\n flag = 1\n if ch in self.unk_char_list:\n flag = random.randint(0, 1)\n if ch in self.char_to_id and flag == 1:\n chars.append(self.char_to_id[ch])\n else:\n chars.append(self.char_to_id['<unk>'])\n return chars", "def filterPossibleWords(self): \r\n filledInSpaces = []\r\n for i in range(len(self.currentBoard)):\r\n if self.currentBoard[i] != '_':\r\n filledInSpaces.append( (i, self.currentBoard[i]) )\r\n \r\n self.wordList = list(filter(lambda word: self.viableWord(word, filledInSpaces), self.wordList))", "def split_word(word):\n return [(word[:i], word[i:]) for i in range(len(word) + 1)]", "def words_with_consecutive_a_or_e(words):\n return [word for word in words if re.match(r'\\w*(aa|ee)\\w*', word)]", "def words(self):\n punctuation = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n lst = []\n for lines in self.lines:\n words = lines.split(' ')\n for word in words:\n no_punc = ''\n for c in word:\n if c not in punctuation:\n no_punc += c.lower()\n if no_punc != '' and no_punc != '\\n':\n lst.append(no_punc.strip('\\n'))\n return lst\n #no_punc += word.lower()\n #for word in no_punc.split(' ')[:-1]:\n #for word in no_punc:\n # lst.append(word)\n #line = lines.strip(os.linesep) # strips away spaces, \\t (tabs), and \\n (new-lines/enter)\n #print(no_punc)\n #print(lst)", "def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def check_word(self, line, sp_ch='*'):\n valid = []\n le = cross.get_length()\n for n in range(le):\n t1 = \"\".join(line)\n t2 = cross.get_word(n)\n if len(t1) == len(t2):\n z = zip(t1, t2)\n for x in z:\n if (x[0] != x[1]) and (x[0] != sp_ch):\n break\n else:\n word_as_list = list(t2)\n valid.append(word_as_list)\n return valid", "def sliptText(text):\n\treturn [char for char in text]", "def allPossibleWords(Rack):\n def checkWord(word):\n return stringInRack(word,Rack)\n return filter(checkWord, Dictionary)", "def getTWordList(text):\n\ttmpwordlist = string.split(text)\n\twordlist = [ [] ]\n\tpos = 0\n\tfor i in range(len(tmpwordlist)):\n\t\tword = getBrownWords(tmpwordlist[i])\n\t\tword[0] = puncTrim(word[0])\n\t\tif len(word[0]) > 0:\n\t\t\twordlist[pos].append(word)\n\t\telse:\n\t\t\tpos += 1\n\t\t\twordlist.append([])\n\treturn wordlist", "def gen_all_strings(word):\n if len(word) == 0:\n return [\"\"]\n elif len(word) == 1:\n return [\"\",word]\n else:\n result_strings = []\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n new_strings = []\n for rest_string in rest_strings:\n for dummy_index in range(len(rest_string)):\n #在首位插入\n if dummy_index == 0:\n new_string = first + rest_string\n new_strings.append(new_string)\n #在中间插入 \n else:\n new_string = rest_string[0:dummy_index] + first + rest_string[dummy_index:]\n new_strings.append(new_string)\n #在末尾插入\n new_strings.append(rest_string + first)\n \n result_strings.extend(rest_strings)\n result_strings.extend(new_strings)\n \n return result_strings", "def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def _get_word_list(text):\n return re.findall('\\w+', text)", "def gen_all_strings(word):\n if word == '':\n return ['']\n else:\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n all_words = []\n for string in rest_strings:\n for leter in range(len(string)+1):\n all_words.append(string[0:leter]+first+string[leter:])\n\n return rest_strings + all_words", "def doubletwochars(word: str) -> Iterator[str]:\n\n if len(word) < 5:\n return\n\n # TODO: 1) for vacacation yields \"vacation\" twice, hunspell's algo kinda wiser\n # 2) maybe just use regexp?..\n for i in range(2, len(word)):\n if word[i-2] == word[i] and word[i-3] == word[i-1]:\n yield word[:i-1] + word[i+1:]", "def segment(text):\n if not text: return []\n candidates = ([first]+segment(rest) for first,rest in splits(text))\n return max(candidates, key=Pwords)", "def getBrownWords(word):\n\tglobal compiled\n\tbrownsplit = compiled.match(word)\n\tif brownsplit == None:\n\t\treturn [ \"\", \"\"]\n\treturn [ brownsplit.group(1), brownsplit.group(2) ]", "def sub_words(word):\n sub_words_lst = []\n for i in range(len(word)):\n sub_word = word[:i]+word[i+1:]\n sub_words_lst.append(sub_word)\n return sub_words_lst", "def get_word_list(file_name):\n\tbook = get_file_text(file_name)\n\tbook = strip_header(book)\n\tbook = strip_punctuation(book)\n\tbook = book.lower()\n\twords = re.split(r'\\s+', book)\n\treturn words", "def split_word_in_all_comps(self, term: str) -> List[str]:\n all_stems = []\n\n words = term.split()\n for word in words:\n stems = self.decompound(word)\n all_stems.extend(stems)\n\n for stem in stems:\n more_stems = self.split_word_in_all_comps(stem)\n all_stems.extend(more_stems)\n\n return all_stems", "def get_word_list(sentence):\n sentence = space1.sub(r'\\1 \\2', sentence)\n sentence = space2.sub(r\"\\1 \\2\", sentence)\n sentence = space3.split(sentence)\n sentence = \" \".join(sentence)\n wordlist = [i for i in sentence.split()]\n return \" \".join(wordlist)", "def extract_words(self):\n str = self.text.lower()\n words = re.sub(r'[?|—|:|\"|,|\\.\\n|\\.|\\s|\\n|\\t|\\v|\\f|\\r]+', \"*\", str)\n self.word_list = words.split(\"*\")", "def getWords(self, text):\n\t\ttextWithoutPunctuation = self.removePunctuation(text)\n\t\treturn [word for word in textWithoutPunctuation.split() if len(word) >= 1]", "def list_of_words(self):\n\t\treturn str.split(re.sub(r'\\W+', ' ', self.body.encode('ascii', 'replace')))", "def get_words(self, chars = None):\n if not self.branches.keys():\n return ['']\n\n if self.pre:\n def apre(word,letter):\n return letter + word\n else:\n def apre(word,letter):\n return word + letter\n\n if chars:\n sub = self.check(chars)\n if sub:\n return [apre(x,chars) for x in sub.get_words()]\n else:\n return []\n\n # If this node marks an existing word, pass back empty string to parent\n # nodes to rebuild this word separately from any derived compound words\n if self.exists:\n selfwordmarker = ['']\n else:\n selfwordmarker = []\n\n return [word for sublist in \\\n [[apre(word,key) for word in self.branches[key].get_words()]\\\n for key in self.branches.keys()]\\\n for word in sublist] + selfwordmarker", "def tokenize_into_words(myblob):\n set_constraint = re.compile(r'[^a-zA-Z0-9]')\n tokenize_to_text = set_constraint.split(myblob) # The blob is spilt into words and the given constraints are applied\n words = [word for word in tokenize_to_text if word]\n return words", "def words_with_pt_es(words):\n return [word for word in words if re.match(r'\\w*(pt|es)\\w*', word)]", "def GetWords(phrase):\n # Remove special characters regex\n # It works faster than the standard \\w+ pattern\n regex = re.compile(r'([^\\d\\`\\~\\!\\@\\#\\$\\%\\^\\&\\*\\(\\)\\+\\=\\[\\{\\]\\}\\|\\\\\\'\\<\\,\\.\\>\\?\\/\\\"\"\\;\\:\\s]+)+',\n re.UNICODE)\n return re.findall(regex,phrase.lower())", "def create_word_list(self):\n return set(self.split(self.title)+self.split(self.conditions)+self.split(self.interventions))", "def getWords(speech):\r\n return speech.split()", "def filter_chants_without_word_boundary(chants, logger=None):\n constains_word_boundary = chants.volpiano.str.contains('---')\n return chants[constains_word_boundary]", "def tokenize(document):\n terms = document.lower().split()\n space = ' '\n return [term.strip(characters) for term in terms if term not in space]", "def interestingWords(self):\n words = set([])\n for token in self.importantTokenList():\n if token.isStopWord() == False:\n words.add(token.text.lower())\n return words", "def get_word_list(text_string):\n\ttext_no_punc = ''\n\ttext = text_string[600:] #kill the header\n\tfor char in text: #killing punctuation\n\t\tif not is_punct_char(char):\n\t\t\ttext_no_punc = text_no_punc+char #so extend the string everytime we run into a letter\n\ttext_no_punc_lower = string.lower(text_no_punc)\n\tlist_of_words = []\n\tlist_of_words = text_no_punc_lower.split( ) #splitting the string into the list\n\treturn list_of_words", "def non_std_words(work):\n dictionary = enchant.Dict(\"en_US\")\n non_std_word = []\n\n for elem in work:\n lyrics = [item for sublist in elem[1] for item in sublist]\n lyrics = [i for i in lyrics if i[0] not in [',', '.', \"'\", '?', '!', '’', '&', '#', ':']]\n word_count = 1\n not_word_count = 1\n for tuples in lyrics:\n if dictionary.check(tuples[0]):\n word_count += 1\n else:\n not_word_count += 1\n\n non_std_word.append((not_word_count/(not_word_count+word_count), elem[0]))\n\n return non_std_word", "def _get_replacement_words(self, word):\n\n if len(word) <= 1:\n return []\n\n candidate_words = []\n\n start_idx = 1 if self.skip_first_char else 0\n end_idx = (len(word) - 2) if self.skip_last_char else (len(word) - 1)\n\n if start_idx >= end_idx:\n return []\n\n if self.random_one:\n i = np.random.randint(start_idx, end_idx)\n candidate_word = word[:i] + word[i + 1] + word[i] + word[i + 2 :]\n candidate_words.append(candidate_word)\n else:\n for i in range(start_idx, end_idx):\n candidate_word = word[:i] + word[i + 1] + word[i] + word[i + 2 :]\n candidate_words.append(candidate_word)\n\n return candidate_words", "def _split(self):\n \n self._words = []\n \n # (1) Expand contractions\n text = self._text.replace(\"'m \", \" am \")\n text = text.replace(\"'d \", \" would \")\n text = text.replace(\"'ll \", \" will \")\n text = text.replace(\"'ve \", \" have \")\n text = text.replace(\"'re \", \" are \")\n text = text.replace(\"can't \", \"can not \")\n text = text.replace(\"won't \", \"will not \")\n text = text.replace(\"n't \", \" not \")\n # Assume possesives are contractions of is\n text = text.replace(\"'s \", \" is \")\n text = text.replace(\"s' \", \"s \")\n \n # (2) Replace newlines, carriage returns, tabs, form feed with space.\n text = re.sub('[\\r\\n\\t\\f]', ' ', text)\n \n # (3) remove duplicate spaces\n text = re.sub(' +', ' ', text.strip())\n \n # Empty text\n if len(text) == 0:\n return \n \n # (4) Split text by whitespace (tokenize).\n words = text.split(' ')\n \n # (5) Separate out punctuation\n for word in words:\n length = len(word)\n \n begin = 0\n for i in range(0,length):\n if not word[i].isdigit() and not word[i].isalpha():\n # decimal, thousandths, fraction symbol\n if word[i] in ['.', ',', '/'] and i < length-1 and word[i+1].isdigit():\n continue\n # degree\n if word[i] in ['°'] and i < length-1 and word[i+1] in [ 'f', 'F', 'c', 'C']:\n continue\n # sign symbol\n if word[i] in ['-', '+'] and i < length-1 and (word[i+1].isdigit() or word[i+1] in ['.', ',']):\n # first char or exponent\n if begin == i or word[i-1] in ['e', 'E']:\n continue\n \n if begin != i:\n self._words.append( { 'word': word[begin:i], 'tag': Vocabulary.UNTAG } )\n if word[i] in [ '.', '?', '!', ',', ':', ';', '(', ')', '[', ']', '\"', '\\'', '¿', '¡']:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.PUNCT } )\n # non-printable ascii\n elif (ord(word[i]) >= 0 and ord(word[i]) <= 7) or (ord(word[i]) >= 14 and ord(word[i]) <= 31):\n pass\n else:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.SYMBOL } )\n begin = i + 1\n if begin < length:\n self._words.append( { 'word': word[begin:], 'tag': Vocabulary.UNTAG } )", "def get_words(doc):\n splitter = re.compile('\\\\W*')\n # Split the words by non-alpha characters\n words = [s.lower() for s in splitter.split(doc) \n if len(s)>2 and len(s)<20]\n # Return the unique set of words only\n return dict([(w,1) for w in words])", "def generatorToList(generator):\n # segs, postags, nertags\n\n\n '''\n words = []\n i = 0\n while i < len(segs):\n\n seg, postag, nertag = segs[i], postags[i], nertags[i]\n if postag == 'ws':\n currWord = seg\n while (i+1) < len(segs) and postags[i+1] == 'ws':\n currWord += segs[i+1]\n i += 1\n words.append((currWord, 'eng'))\n\n elif nertag == 'O':\n words.append((seg, postag))\n i += 1\n else:\n words.append((seg, nertag))\n i += 1\n return words\n '''\n words = []\n for word, flag in generator:\n words.append([word, flag])\n return words", "def full_words(word, string, sensitive=True):\n temp_word = ''\n o = []\n start = 0\n if not sensitive:\n word = word.lower()\n string = string.lower()\n for i, char in enumerate(string):\n if char != ' ':\n temp_word += char\n if i == 0:\n start = 0\n else:\n if string[i - 1] == ' ':\n start = i\n if i == len(string) - 1:\n if temp_word == word:\n o.append([start, start + len(word)])\n else:\n if temp_word == word:\n o.append([start, start + len(word)])\n temp_word = ''\n return o", "def non_match_word(self, matched_loc: list):\n sort_com = [value for index, value in sorted(enumerate(matched_loc), key=lambda matched_loc: matched_loc[1])]\n subs = []\n end = 0\n for i in sort_com:\n start = i[0]\n false_word = self.input_str[end:start]\n end = i[1]\n subs.append(false_word)\n subs.append(self.input_str[end:])\n while '' in subs:\n subs.remove('')\n return subs", "def tokenizer(seq: str, strip_char: str, stop_word: List[str]) -> List[str]:\n seq = seq.split(\" \")\n seq_split = list()\n\n for s in seq:\n if len(s.splitlines()) > 1:\n for i in s.splitlines():\n if i not in stop_word:\n seq_split.append(i)\n else:\n if s not in stop_word:\n seq_split.append(s)\n seq = [i.strip(strip_char).lower() for i in seq_split if i.strip(strip_char) != '']\n return seq", "def crossword_words(crossword: list) -> list:\n pass", "def tag_words (lx, wds):\n if (wds == []):\n return [[]]\n else:\n tag_first = tag_word (lx, wds[0])\n tag_rest = tag_words (lx, wds[1:])\n return [[fst] + rst for fst in tag_first for rst in tag_rest]", "def txt_to_word_list(text):\r\n return [w for w in text.split()]", "def ladder(word: str) -> List[str]:\n found_words = set()\n for i in range(len(word)):\n pattern = list(word)\n pattern[i] = '.'\n search_results = search(\"^\" + \"\".join(pattern) + \"$\")\n for result in search_results:\n if result != word:\n found_words.add(result)\n return found_words", "def twowords(word: str) -> Iterator[List[str]]:\n\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]", "def get_deletes_list(self, w):\n\n deletes = []\n queue = [w]\n for d in range(self.max_edit_distance):\n temp_queue = []\n for word in queue:\n if len(word) > 1:\n for c in range(len(word)): # character index\n word_minus_c = word[:c] + word[c + 1:]\n if word_minus_c not in deletes:\n deletes.append(word_minus_c)\n if word_minus_c not in temp_queue:\n temp_queue.append(word_minus_c)\n queue = temp_queue\n\n return deletes", "def get_deletes_list(self, w):\n\n deletes = []\n queue = [w]\n for d in range(self.max_edit_distance):\n temp_queue = []\n for word in queue:\n if len(word) > 1:\n for c in range(len(word)): # character index\n word_minus_c = word[:c] + word[c + 1:]\n if word_minus_c not in deletes:\n deletes.append(word_minus_c)\n if word_minus_c not in temp_queue:\n temp_queue.append(word_minus_c)\n queue = temp_queue\n\n return deletes", "def create_medium_list(self):\n word_list = []\n try:\n f = open(self.index, 'r')\n for line in f:\n if line[0] == 'M' and line[1] == \" \" and line[2] != \" \":\n readout = line[2:].upper()\n has_digit = re.search('\\d', readout)\n # this can be added to if there are more characters that cannot be\n # used in the game\n has_wrong = re.search(\"[-,.' '/!?]\", readout)\n if has_digit is None:\n if has_wrong is None:\n word_list.append(readout.strip('\\n'))\n return word_list\n except IOError:\n print(\"Cannot open file\")\n raise (IOError)", "def search_trimmers(seq: str) -> str:\n return [seq[i:i+3] for i in range(len(seq)-2)]", "def _get_characters(sentence: str, whitespace: bool) ->List[str]:\n if whitespace:\n return list(sentence)\n return list(sentence.strip().replace(' ', ''))", "def distillToWholeWords(value):\n\n if value == None or value.isspace():\n return None\n\n if len(value) <= 1:\n return [value]\n\n value = toPascelCase(value)\n value = transformCaseToSeparator(value, defaultCharSeparator)\n #need to perserve order\n outList = list()\n for p in value.split(defaultCharSeparator):\n if not p in outList:\n outList.append(p)\n \n return outList", "def get_words(line):\n try:\n alphabet = \"abcdefghijklmnopqrstuvwqyz \"\n lineL = list(line.lower())\n for word in lineL:\n if word not in alphabet:\n lineL.remove(word)\n lineS = \"\".join(lineL)\n lineL = lineS.split(\" \")\n for word in lineL:\n if word == \"\":\n lineL.remove(word)\n return lineL\n except:\n print(\"Error get_words()\")", "def get_from_word_edges(self, word: str) -> Set[str]:\n all_edges = set()\n\n for def_dict in self.word_dictionary[word]:\n processed_def = self.get_filtered_set_tokens(\n definition=def_dict[\"definition\"]\n )\n\n if self.drop_self_cycles:\n if word not in processed_def:\n all_edges = all_edges.union(processed_def)\n else:\n all_edges = all_edges.union(processed_def)\n\n return all_edges", "def split_words(value: str) -> List[str]:\n words: List[str] = []\n buffer: List[str] = []\n previous = None\n\n def flush():\n if buffer:\n words.append(\"\".join(buffer))\n buffer.clear()\n\n for char in value:\n tp = classify(char)\n if tp == StringType.OTHER:\n flush()\n elif not previous or tp == previous:\n buffer.append(char)\n elif tp == StringType.UPPER and previous != StringType.UPPER:\n flush()\n buffer.append(char)\n else:\n buffer.append(char)\n\n previous = tp\n\n flush()\n return words", "def get_words():\n # words\n words_list = list()\n for i in range(1, 114+1):\n sura = quran.get_sura(i)\n for aya in sura:\n wordsList = aya.split(' ')\n for word in wordsList:\n words_list.append(word)\n\n return words_list", "def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs", "def filterWords(text):\n words = text.split()\n out = []\n for w in words:\n w = normalizeOrRemoveWord(w)\n if w != None:\n out.append(w)\n return out", "def remove(sectence):\n status = 0\n lst = list()\n word = \"\"\n\n for i in sectence:\n if i == \"<\":\n status = 1\n if word != \"\":\n lst += word.strip().split()\n word = \"\"\n elif i == \">\":\n status = 0\n elif status == 0:\n word += i\n if word != \"\":\n lst += word.strip().split()\n print(lst)", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def easy_words(a_list):\n\n easy_list = [word for word in a_list if len(word) in range(4,7)]\n return easy_list", "def break_words(stuff):\r\n #parte la cadena cada vez que encuentra un espacio\r\n words = stuff.split(' ') \r\n return words", "def lookup_stress_patterns_for_word(word: Text) -> Sequence[Sequence[Stress]]:\n return [\n word.pf.stress_pattern for word in EnglishUtils.all_possible_forms_for(word)\n ]", "def get_word_list(file_name):\n # Read the file specified\n f = open(file_name,'r')\n lines = f.readlines()\n \n # Remove header text from lines\n curr_line = 0\n while lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line += 1\n lines = lines[curr_line + 1:]\n\n # Remove footer text from lines\n curr_line = -1\n while lines[curr_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line -= 1\n lines = lines[: curr_line]\n\n # Strip lines into words\n words = []\n for i in range(len(lines)):\n # Remove punctuation\n next_line = lines[i].translate(string.maketrans(\"\",\"\"), string.punctuation)\n next_line = next_line.lower()\n words += next_line.split()\n \n return words", "def getWords(docstr):\n # get rid of digits and non-alphanumeric chars\n # and split on spaces\n wds = re.sub('\\d', ' ', docstr)\n wds = re.sub('[\\W_]', ' ', wds)\n wds = wds.split()\n\n # convert to lowercase and get rid of stop words\n wordlist = [w.lower() for w in wds]\n wordlist = [w for w in wordlist if w not in stopWords]\n wordlist = [w for w in wordlist if len(w) >= 3]\n\n return wordlist", "def word_to_ngrams(self, word):\n encoding = list()\n n = self.n\n if word == self.eos or word == self.sos:\n encoding.append(self.ngram_to_id[word])\n else:\n _word = '^' + word + '$'\n for i in range(len(_word) - n + 1):\n ngram = _word[i:i + n]\n if ngram in self.ngram_to_id:\n encoding.append(self.ngram_to_id[ngram])\n else:\n for ch in ngram:\n flag = 1\n if ch in self.unk_char_list:\n flag = random.randint(0, 1)\n if ch in self.ngram_to_id and flag == 1:\n encoding.append(self.ngram_to_id[ch])\n else:\n encoding.append(self.ngram_to_id['<unk>'])\n return encoding", "def getSequence(word):\n\n # We'll construct our structure in sequence, and store\n # info about last character, consonant runs, and vowel runs.\n sequence = str()\n c_run = bool()\n v_run = bool()\n word = word.lower()\n\n vowels = set(['a', 'e', 'i', 'o', 'u'])\n\n for i, char in enumerate(word):\n # Handle vowels with y corner-cases.\n # If 'y' and preceded by a consonant, or a pure vowel\n if (c_run and char == 'y') or (char in vowels):\n # Account for the consonant run\n if c_run:\n sequence += 'C'\n c_run = False\n v_run = True\n # Else char is a consonant\n else:\n # Account for the vowel run\n if v_run:\n sequence += 'V'\n v_run = False\n c_run = True\n # Now account for the last character in the loop\n sequence += 'C' if c_run else 'V'\n\n return sequence" ]
[ "0.68789977", "0.6741519", "0.65025", "0.6010337", "0.6006612", "0.5996679", "0.5950866", "0.5939301", "0.5936155", "0.5838023", "0.58029926", "0.5797689", "0.5774421", "0.572342", "0.5713739", "0.57017034", "0.5683238", "0.56335175", "0.5613616", "0.5602126", "0.55915654", "0.5563632", "0.55571246", "0.55445915", "0.5541603", "0.5535799", "0.55327773", "0.5522362", "0.55152565", "0.5511968", "0.55066454", "0.5505802", "0.5505579", "0.550409", "0.54900795", "0.5485956", "0.5472635", "0.546121", "0.5460567", "0.54487914", "0.5444262", "0.54404736", "0.54385096", "0.5434293", "0.5430368", "0.5430302", "0.5425996", "0.54225147", "0.54185957", "0.54150933", "0.54103726", "0.54053307", "0.5401717", "0.5391253", "0.5382208", "0.53675884", "0.53657657", "0.5365406", "0.5364293", "0.536116", "0.5360641", "0.535229", "0.5351693", "0.53482074", "0.5343144", "0.53410316", "0.5337772", "0.53368986", "0.53353006", "0.5334968", "0.53276426", "0.5323709", "0.5321104", "0.5317709", "0.5317255", "0.5317215", "0.530789", "0.53053844", "0.52990454", "0.52990454", "0.5297911", "0.52934146", "0.52823913", "0.5281828", "0.5281826", "0.52816653", "0.52787864", "0.5277331", "0.52757555", "0.527521", "0.52703005", "0.52645886", "0.52645886", "0.5262904", "0.525842", "0.52566755", "0.5256599", "0.5244352", "0.5242529", "0.5241139" ]
0.59464043
7
Given list of strings, return only those which are valid segments
def filter_segs(self, segs, normalize=True): return list(filter(lambda seg: self.seg_known(seg, normalize), segs))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def illegal_indirect_horizontal_intervals(a_list):\n allowed_intervals = ['1', 'b2', '2', 'b3', '3', '4', '5', 'b6', '6']\n intervals = indirect_horizontal_intervals(a_list)\n return [x for x in intervals if x[0][0] not in allowed_intervals]", "def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents", "def is_segment(pattern):\n return (type(pattern) is list\n and pattern\n and len(pattern[0]) > 2\n and pattern[0][0] == '?'\n and pattern[0][1] == '*'\n and pattern[0][2] in string.ascii_letters\n and ' ' not in pattern[0])", "def condition_segment(segment):\n # 1. If the start and end points are the same, done and one\n if segment[0][0] == segment[-1][0] and segment[0][1] == segment[-1][1]:\n if len(segment) == 2:\n LOG.warning(\" REJECTING two point segment, both equal\")\n return None\n return [segment]\n # 2. If point start and end points are inside the conus and they are closer\n # to each other than the CONUS bounds, then close off polygon\n if all(not point_outside_conus(Point(segment[i])) for i in [0, -1]):\n pt0 = Point(segment[0])\n pt1 = Point(segment[-1])\n cpt0 = get_conus_point(pt0)\n cpt1 = get_conus_point(pt1)\n cdist0 = cpt0.distance(pt0)\n cdist1 = cpt1.distance(pt1)\n if pt0.distance(pt1) < 0.5 * min([cdist0, cdist1]):\n LOG.warning(\" non-closed polygon assumed unclosed in error.\")\n segment.append(segment[0])\n return [segment]\n # 3. If the line intersects the CONUS 3+ times, split the line\n ls = ensure_outside_conus(LineString(segment))\n # Examine how our linestring intersects the CONUS polygon\n res = ls.intersection(CONUS[\"poly\"])\n if isinstance(res, LineString):\n return [ls.coords]\n # We got multiple linestrings\n # pylint: disable=no-member\n res = [r for r in res.geoms if r.length > 0.2]\n if len(res) == 1:\n LOG.warning(\" was able to filter out very short lines\")\n return [ensure_outside_conus(res[0]).coords]\n LOG.warning(\" returning a MultiLineString len=%s\", len(res))\n return [ensure_outside_conus(x).coords for x in res]", "def _segments_match(segments_to_match, arguments):\n\n segments_to_match = set(segments_to_match)\n for arg in arguments:\n for segment in list(segments_to_match):\n if segment in arg:\n segments_to_match.remove(segment)\n if not segments_to_match:\n return True\n return not segments_to_match", "def filt(seq, lst):\n regex = \"(\" + \")|(\".join(seq) + \")\"\n regex = re.compile(regex)\n slst = list(filter(regex.search, lst))\n return slst\n\n\n # still need a checkUsername function ", "def merge_segments(lst):\n ii = 0\n while True:\n jj = ii + 1\n if len(lst) <= jj:\n return lst\n seg1 = lst[ii]\n seg2 = lst[jj]\n if seg1.merge(seg2):\n if seg2.empty():\n del lst[jj]\n else:\n ii += 1\n else:\n ii += 1\n return lst", "def check_series(text_list, set_list):\n in_list = []\n for word in text_list:\n all_words = re.sub('\\(.*?\\)', ',', word).split(',')\n all_words = list(filter(None, all_words))\n component_in_list = [component.strip(' ') in set_list for component in all_words]\n this_word_in_list = all(component_in_list)\n in_list.append(this_word_in_list)\n return in_list", "def _select_simple_chainparts(chain_parts):\n\n for cp in chain_parts:\n if reject_substr_res.search(cp['chainPartName']):\n return False\n\n return True", "def consequence_filter(line, index, consequence_list):\n consequence = re.split(r'\\t+', line.rstrip('\\t'))[index]\n if not any(variant_type in consequence for variant_type in\n consequence_list):\n return True\n else:\n return False", "def words_with_consecutive_a_or_e(words):\n return [word for word in words if re.match(r'\\w*(aa|ee)\\w*', word)]", "def check_valid(indexes):\n # Check if any substrings has any versions that are the opposite of it anywhere in that line.\n valid = False\n for k, v in indexes.items():\n swapped = k[::-1]\n other = indexes.get(swapped)\n # Check to see if the swapped version exists in the dictionary.\n if other:\n # 'aaa' case, these are invalid matches, don't bother checking further.\n if k == swapped:\n continue\n # single occurence case\n if len(v) == 1 and len(other) == 1:\n # Case where both occur inside or outsid brackets.\n if (int(v[0]) % 2) == (int(other[0]) % 2):\n continue\n else:\n valid = True\n else:\n # Use sets to eliminate duplicates in the same chunk.\n v_s = set(v)\n other_s = set(other)\n possible_combinations = [(x % 2, y % 2) for x in v_s for y in other_s]\n # For a pairing to be valid, one part needs to be in an even chunk and the other in an odd ([]) chunk.\n if (1, 0) in possible_combinations or (0, 1) in possible_combinations:\n valid = True\n return valid", "def solve_conflicts(phrase_list, useSuperSetToken=True):\n to_add = []\n to_remove = []\n length = len(phrase_list)\n for i, entry in enumerate(phrase_list):\n if (entry in to_remove\n or entry in to_add):\n continue\n to_add.append(entry)\n for j in range(i + 1, length):\n if overlap(to_add[-1], phrase_list[j]):\n if (a_contain_b(phrase_list[j], to_add[-1])\n and useSuperSetToken):\n to_remove.append(to_add.pop())\n to_add.append(phrase_list[j])\n else:\n to_remove.append(phrase_list[j])\n to_add = sorted(to_add, key=lambda x: x['start'])\n return to_add", "def test_parse_restricted_tags():\n invalid_tags = {'*', '**', '***', 'a*', '*a', 'a*a*', '*a*a', '*aa*', 'a**a', '}'}\n combined_tags = valid_tags | invalid_tags\n\n # Function under test\n resultant_tags = searchtag.parse_restricted_tags(\" \".join(combined_tags))\n\n # Verify that we have the tags in the valid list\n assert resultant_tags == valid_tags", "def search_trimmers(seq: str) -> str:\n return [seq[i:i+3] for i in range(len(seq)-2)]", "def _check_list(self, input_list, switch_list):\n\n return_list = []\n for vid in input_list:\n if str(vid) in switch_list:\n return_list.append(vid)\n return return_list", "def find_overlapping_cds_simple(v_start, v_stop, cds_begins, strand):\n # cds_start = cds_begin[0]\n if strand == '+':\n return list(filter(lambda x: x[0] >= v_start and x[0] < v_stop, cds_begins))\n else:\n return list(filter(lambda x: x[0] > v_start and x[0] <= v_stop, cds_begins))", "def convert_segments(segments):\n polygons = []\n interiors = []\n linestrings = []\n for segment in segments:\n ls = LineString(segment)\n if segment[0][0] == segment[-1][0] and segment[0][1] == segment[-1][1]:\n lr = LinearRing(ls)\n if not lr.is_ccw:\n polygons.append(Polygon(segment))\n else:\n interiors.append(lr)\n continue\n linestrings.append(ls)\n\n return polygons, interiors, linestrings", "def parse_list(value: str) -> list[str]:\n segments = _QUOTED_SEGMENT_RE.findall(value)\n for segment in segments:\n left, match, right = value.partition(segment)\n value = ''.join([left, match.replace(',', '\\000'), right])\n return [_dequote(x.strip()).replace('\\000', ',') for x in value.split(',')]", "def filter_some_usages(EN):\n bad_markers = [\n # 'ecclesiastical', actually not a good idea:\n # reachtaire\n # - rector (ecclesiastical)\n # - master of ceremonies\n ]\n ret = '\\n'.join([line for line in EN.split('\\n') if\n (not line.endswith(')')\n or\n line.rsplit('(', 1)[1].rstrip(')')\n not in bad_markers)])\n if ret:\n return ret\n return EN", "def illegal_vertical_intervals(a_list, b_list):\n allowed_intervals = ['1', 'b3', '3', '4', '5', 'b6', '6']\n pairs = vertical_intervals(a_list, b_list)\n return [(i, t) for i, t in pairs if i[0] not in allowed_intervals]", "def illegal_parallel_intervals(a_list, b_list):\n allowed_parallel_intervals = ['3', '6']\n consecutives = parallel_motion(a_list, b_list)\n\n return [\n c for c in consecutives\n if c[0][0][0] not in allowed_parallel_intervals\n ]", "def substring_in_list(s, varlist):\n if varlist is None:\n return False\n is_sub = False\n for v in varlist:\n if v in s:\n is_sub = True\n break\n return is_sub", "def collect_str_fragments(self, fragment_list):\n raise NotImplementedError()", "def string_is_index_list(inp: str):\n inp = inp.strip()\n return len((inp)) > 0 and all([x in [\" \", \":\", \"-\"] or x.isdigit() for x in inp])", "def load_strands(raw_strands):\n allowed = \"{}{}{}{}\".format(Bases.Adenine, Bases.Cytosine, Bases.Thymine, Bases.Guanine)\n converted_strands = []\n\n for raw_strand in raw_strands:\n lower_strand = raw_strand.lower().strip(\"\\n\\t \")\n if not set(lower_strand) <= set(allowed):\n raise ValueError(\"Strand contains invalid base name. Remember: DNA not RNA, so no Uracil.\")\n # If we reach this point, the strand is valid\n converted_strands.append(lower_strand)\n return converted_strands", "def _should_be_pair(s_list):\n assert(isa(s_list, List))\n return str(s_list).find(' . ') > 0", "def list_check(listz):\n isValid = False\n x = 0\n position = ''\n\n #checking if characters contains 023 and extracting them \n\n while (x < len(listz)):\n if(listz.__contains__(0)):\n position = position + str(listz[listz.index(0)])\n if(listz.__contains__(2)):\n position = position + str(listz[listz.index(2)])\n if(listz.__contains__(3)):\n position = position + str(listz[listz.index(3)])\n x = len(listz) + 1\n\n#making sure its the requered sequence\n\n if(position == '023'):\n isValid = True\n x = x + 1\n return isValid", "def check_word_in_list_in_string(list, string):\n stuff = [string for word in list if(word in string)]\n return stuff", "def _all_valid_strings(td: fst.Fst) -> List[Tuple[List[int], float]]:\n if td.start() == -1:\n return []\n stack = [(td.start(), [])]\n complete_emissions = []\n while stack:\n state, output = stack.pop()\n final_weight = float(td.final(state))\n if np.isfinite(final_weight):\n complete_emissions.append((output, final_weight))\n stack += [(a.nextstate, output + [a.olabel]) for a in td.arcs(state)]\n return complete_emissions", "def validate_X(X: List[str]):\n _check_string_list(X)", "def parse_range(seq: str) -> list[int]:\n seq = seq.split(\",\")\n acc = []\n for i in seq:\n m = re.match(r\" *(?P<start>\\d+) *(- *(?P<end>\\d+))? *\", i)\n\n if not m:\n continue\n\n a = [m.group(\"start\"), m.group(\"end\")]\n a = [int(x) for x in a if x]\n\n if len(a) > 1:\n a = range(int(a[0]), int(a[1] + 1))\n\n acc.append(a)\n\n return list(\n set([x for x in list(itertools.chain.from_iterable(acc)) if x])\n )", "def transcript_filter(line, transcript_list):\n if any(transcript in line for transcript in transcript_list):\n return True\n else:\n return False", "def filter_one_v_all(description):\n brain_parts = [\"forebrain\", \"midbrain\", \"hindbrain\"]\n for part in brain_parts:\n if part in description:\n return True\n return False", "def _ExtractPathParamsFromRouteList(route_comps: Collection[str]) -> Set[str]:\n return set(filter(_IsPathParameter, route_comps))", "def Excluded(entry, list_to_exclude):\n excluded = False\n for rule in list_to_exclude:\n # Entries wrapped in /.../ are regex format.\n if rule.startswith('/') and rule.endswith('/'):\n # Strip the first and last chars.\n rule = rule[1:-1]\n if re.search(rule, entry, re.IGNORECASE):\n excluded = True\n break\n # If entry is not regex, do a plain-text compare.\n else:\n lcentry = entry.lower()\n if lcentry.find(rule) >= 0:\n excluded = True\n break\n return excluded", "def known(words: list[str]) -> list[str]:\n return [z for z in list(set(words)) if z in self.words]", "def isWordPartOf(self,word,wordlist):\n\t\tfor w in wordlist:\n\t\t\tif w in self._part_of_badword: \n\t\t\t\treturn True \t \n\t\t\t\tif w.startswith(word) or w.endswith(word):\n\t\t\t\t\tself._part_of_badword[w] = True \n\t\t\t\t\treturn True\n\t\treturn False", "def findsegments(id1, seq1, id2, seq2, minlen):\n\n segments = \"\"\n\n # Initialize list of corresponding residues.\n correspondances = []\n for res in seq1:\n correspondances.append([])\n \n # Main loop.\n for i in range(len(seq1)-minlen):\n seg1 = seq1[i:i+minlen]\n for j in range(len(seq2)-minlen):\n if j not in correspondances[i]:\n seg2 = seq2[j:j+minlen]\n if seg1 == seg2:\n # Look if the segment is longer than minlen.\n segments_equal = True\n prev1 = seg1\n prev2 = seg2\n extend = 1\n while segments_equal == True:\n i_end = i+minlen+extend\n j_end = j+minlen+extend\n ext1 = seq1[i:i_end]\n ext2 = seq2[j:j_end]\n if i_end > len(seq1) or j_end > len(seq2):\n seqend = True\n else:\n seqend = False\n if ext1 != ext2 or seqend == True:\n segments_equal = False\n segments += \"{} \".format(prev1)\n segments += \"{} [{}, {}] \".format(id1, i, i_end-2)\n segments += \" \"\n segments += \"{} [{}, {}] \".format(id2, j, j_end-2)\n segments += \"\\n\"\n # Add residues to correspondance list.\n for k in range(minlen+extend-1):\n l = i+k\n m = j+k\n correspondances[l].append(m)\n prev1 = ext1\n prev2 = ext2\n extend += 1\n\n return segments", "def get_all_segments(edfFiles):\n\n segments = []\n preprocessor = Preprocessor(config_startShift,\n config_endShift,\n config_powerLineFreq,\n config_bandLowCut,\n config_bandHighCut)\n for edf in edfFiles:\n print(\"getting the labeled segments from the recording \", str(edf.filename))\n segments.extend(get_segments_from_edf(edf, preprocessor))\n if edfFiles.index(edf) == 20: break\n return segments", "def label_to_segments(utters, labels):\n segment_list = []\n for i, utterence in enumerate(utters):\n segments = []\n seg = \"\"\n for j, char in enumerate(utterence):\n if labels[i][j] >= 0.5:\n if len(seg) > 0:\n segments.append(seg)\n seg = \"\"\n seg = seg + char\n else:\n seg = seg + char\n if j == (len(utterence) - 1):\n segments.append(seg)\n segment_list.append(segments)\n return segment_list", "def is_valid(text):\n return is_all_word_segment_in_text(WORDS, text)", "def keep_lowercase(str_list):", "def _filter_out_bad_segments(img1, seg1, img2, seg2):\n minval = tf.reduce_min(tf.reduce_sum(seg1, [0,1])*tf.reduce_sum(seg2, [0,1]))\n if minval < 0.5:\n warnings.warn(\"filtering bad segment\")\n return False\n else:\n return True", "def find_valid_final_lines(pot_ids, adj_list_ids, prev_stanza_words):\n\n # make pairings of two potential tweets\n combos = list(combinations(pot_ids, 6))\n\n # for each pair:\n # get combination of words, check if equals stanza_words\n valid = []\n for stanza in combos:\n words = list(chain.from_iterable([adj_list_ids[line] for line in stanza]))\n if sorted(words) == sorted(prev_stanza_words):\n valid.append(stanza)\n return valid", "def optimal_points(segments):\n points = []\n segments.sort(key=lambda x: x.end)\n\n while len(segments) != 0:\n s = segments[0]\n points.append(s.end)\n j = 0\n while j < len(segments):\n temp = segments[j]\n if temp.start <= s.end and temp.end >= s.end:\n segments.remove(temp)\n else:\n j += 1\n return points", "def validate_filter(port_type_list):\n diff_set = set(port_type_list) - set(port.part for port in PORT_TYPES)\n if diff_set:\n return \"{} not among known port part numbers\".format(diff_set)\n return \"\"", "def findLongestCommonSubstringManyStrings(listOfStrings):", "def course_ids_between(start_word, end_word):\r\n\r\n valid_courses = []\r\n for course in modulestore().get_courses():\r\n course_id = course.id.to_deprecated_string()\r\n if start_word.lower() <= course_id.lower() <= end_word.lower():\r\n valid_courses.append(course.id)\r\n return valid_courses", "def dzs_are_in(dz_string, substring1, substring2):\n if substring1 not in dz_string:\n return 0\n elif substring2 not in dz_string:\n return 0\n else:\n return 1", "def get_substrings(self, length):\n if self.dot is None:\n raise Exception(\"There is no structure given for this molecule.\")\n else:\n valid = []\n for i in range(len(self.seq) - length + 1):\n substring = self.seq[i:i + length]\n subdot = self.dot[i:i + length]\n ctr = 0\n for j in range(length):\n if subdot[j] == '(':\n ctr += 1\n if subdot[j] == ')':\n ctr -= 1\n if ctr < 0:\n break\n if ctr == 0:\n valid.append(Molecule(substring, subdot))\n return valid", "def parse_range(rangelist):\n oklist = set([])\n excludelist = set([])\n\n rangelist = rangelist.replace(' ', '')\n rangelist = rangelist.split(',')\n\n # item is single value or range\n for item in rangelist:\n item = item.split(':')\n\n # change to ints\n try:\n int_item = [int(ii) for ii in item]\n except ValueError:\n print(repr(':'.join(item)), 'not convertable to integer')\n raise\n\n if 1 == len(int_item):\n # single inclusive or exclusive item\n if int_item[0] < 0:\n excludelist.add(abs(int_item[0]))\n else:\n oklist.add(int_item[0])\n\n elif 2 == len(int_item):\n # range\n if int_item[0] <= int_item[1]:\n if int_item[0] < 0:\n print(item[0], ',', item[1], 'must start with a ')\n 'non-negative number'\n return []\n\n if int_item[0] == int_item[1]:\n thisrange = [int_item[0]]\n else:\n thisrange = range(int_item[0], int_item[1]+1)\n\n for ii in thisrange:\n oklist.add(ii)\n else:\n print(item[0], ',', item[1], 'needs to be in increasing ')\n 'order'\n raise\n else:\n print(item, 'has more than 2 values')\n\n for exitem in excludelist:\n try:\n oklist.remove(exitem)\n except(KeyError):\n oklist = [str(item) for item in oklist]\n print('ERROR: excluded item', exitem, 'does not exist in '\n + 'inclusive range')\n raise\n\n return sorted(list(oklist))", "def filter_for_semester(files_data: List, semester: str) -> List:\n final = list()\n for x in files_data:\n y = x.split(DELIMITER)\n if semester in y[9]: # NOTE This will always work, provided the semester string is given correctly.\n final.append(x)\n return final", "def addresses( data ) :\n return list( set(chain.from_iterable( [ re.sub(r'\\[.*?\\]\\s+','',x['C1']).split('; ') for x in data ] )))", "def clean_path(path):\n return [endpoint for endpoint in path if len(endpoint) > 23]", "def morsePartialDecode(inputStringList):\r\n\tpossible_letters_list_dot = []\r\n\tpossible_letters_list_hyphen = []\r\n\tvalid_words = []\r\n\tfor partialMorseLetter in inputStringList:\r\n\t\tpartialMorseLetterDot = re.sub('[x]', '.', partialMorseLetter)\r\n\t\tpartialMorseLetterDotTranslated = morseDecode([partialMorseLetterDot]) #sub it into morse decode\r\n\t\tpossible_letters_list_dot.append(partialMorseLetterDotTranslated) #append the MORSE translations where x=. to a list\r\n\r\n\t\tpartialMorseLetterHyphen = re.sub('[x]', '-', partialMorseLetter)\r\n\t\tpartialMorseLetterHyphenTranslated = morseDecode([partialMorseLetterHyphen])\r\n\t\tpossible_letters_list_hyphen.append(partialMorseLetterHyphenTranslated) #append the MORSE translations where x=- to a list\r\n\r\n\tpossible_letters_list = list(zip(possible_letters_list_dot, possible_letters_list_hyphen))\r\n\tpossible_words_separated = list(itertools.product(*possible_letters_list))\r\n\tpossible_words = [''.join(x) for x in possible_words_separated] #Possible words generated from the provided letters\r\n\r\n\tfor word in dictionary_words:\r\n\t\tif word in possible_words:\r\n\t\t\tvalid_words.append(word)\r\n\r\n\treturn valid_words", "def test_str_in_str_list(self):\n # compact ver sion (env variables)\n assert_that(Condition.is_valid(\n '\"{{ env.BRANCH_NAME }}\" in [\"dev\", \"prod\"]'), equal_to(True))\n # more spaces around are allowed (env variables)\n assert_that(Condition.is_valid(\n ' \"{{ env.BRANCH_NAME }}\" in [ \"dev\", \"prod\" ] '), equal_to(True))\n # compact version (task variables)\n assert_that(Condition.is_valid(\n '\"{{ variables.cpu_count }}\" in [\"1\", \"2\"]'), equal_to(True))", "def valid_list(cls):\n cls.initialize()\n return [x for x in cls.mapping.keys() if isinstance(x, str)]", "def filter_nonsense_sequences(sequences):\n\n # filter sequences if contains at least one 'N' character\n good_index = []\n filter_sequences = []\n for i, seq in enumerate(sequences):\n if \"N\" not in seq.upper():\n good_index.append(i)\n filter_sequences.append(seq)\n return np.array(filter_sequences), np.array(good_index)", "def _filter_invalid_sequences(self, pos_sequences, morphemes):\n\n if not morphemes:\n return pos_sequences\n if self.extract_morphemes_from_rules_corpus:\n return pos_sequences\n valid_elements = set(morphemes.keys() + self.delimiters)\n new_pos_sequences = set()\n for pos_sequence in pos_sequences:\n pos_sequence_set = set(pos_sequence)\n if pos_sequence_set & valid_elements == pos_sequence_set:\n new_pos_sequences.add(pos_sequence)\n return new_pos_sequences", "def findStrings(self, addressSet: ghidra.program.model.address.AddressSetView, minimumStringLength: int, alignment: int, requireNullTermination: bool, includeAllCharWidths: bool) -> List[ghidra.program.util.string.FoundString]:\n ...", "def pieces(relatorlist):\n F,rels=fg.parseinputwords(relatorlist)\n if not all(r==F.cyclic_reduce(r) for r in rels):\n raise ValueError(\"Relators are not cyclically reduced.\")\n pieces=set()\n irels=[rel for rel in itertools.chain.from_iterable(zip([w() for w in rels],[(w**(-1))() for w in rels]))] # arrange relators and inverses in a list of the form relator1, inverse of relator1, relator2, inverse of relator2,...\n drels=[x+x for x in irels]\n for relatorindex in range(len(rels)): # only need to search relators for candidate pieces, since a piece contained in inverse will be inverse of piece contained in relator\n relator=irels[2*relatorindex]\n for L in range(1,1+len(relator)):\n for startingindex in range(len(relator)):\n p=(relator+relator)[startingindex:startingindex+L] # the subword of length L starting at index i in reltaor as a cyclic word\n # now we need to check if p is a piece\n # we do not need to check lower relatorindices, because we already scanned those relators for pieces\n if any(p in x for x in [(relator+relator)[startingindex+1:len(relator)+startingindex+L-1]]+[drels[i] for i in range(2*relatorindex+1,len(drels))]):# found a matching subword, p is a piece\n pieces.add(p)\n pieces.add(''.join(reversed(p.swapcase())))\n return pieces", "def segment(text: str) -> List[str]:\n\n if not text or not isinstance(text, str):\n return []\n\n return _cut_subword(_cut_etcc.word_tokenize(text))", "def check(self, s: str, mem: dict):\n dp = [False for _ in range(len(s)+1)]\n dp[0] = True\n for i in range(1, len(s)+1):\n for j in range(i):\n if dp[j] and s[j:i] in mem:\n dp[i] = True\n return dp[-1]", "def create_exclusions_list(ips_str_list: Any | None) -> list:\n ips_list = argToList(ips_str_list, ',')\n exclusion_list = []\n for ip in ips_list:\n ip_type = auto_detect_indicator_type(ip)\n if ip_type not in ('IP', 'IPv6'):\n raise DemistoException(f'Invalid ip address - {ip}')\n is_v4 = ip_type == 'IP'\n ip_data = {\"ip\": ip, \"v4\": is_v4}\n exclusion_list.append(ip_data)\n return exclusion_list", "def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()", "def parseHostList( ipstring ):\r\n\r\n # ideally, we should be able to handle these cases:\r\n # w.x.y.z, .x.y.z, .y.z, .z\r\n # w.x.y.a-b, .x.y.a-b, .x.a-b, .a-b\r\n # w.x.y.z-a.b.c.d, w.x.y-a.b.c, w.x-a.b, w-a\r\n # we also need to be able to parse CIDR ranges. Urgh. w.x.y.z/0\r\n \r\n # ...but for the sake of simplicity we'll implement a subset, consisting of these cases:\r\n # 1. w.x.y.z\r\n # 2. w.x.y.z1-zN\r\n # 3. .z1-.zN\r\n\r\n currentNetwork = '0.0.0'\r\n groups = ipstring.split(',') \r\n iplist = []\r\n for i in groups:\r\n\r\n octets = i.split('.')\r\n if len(octets) == 4: # cases 1 and 2\r\n currentNetwork = \"%s.%s.%s\" % (octets[0],octets[1],octets[2])\r\n iprange = getRange(octets[3])\r\n ips = [\"%s.%s\" % (currentNetwork,i) for i in iprange]\r\n\r\n elif len(octets) == 2: # case 3\r\n network = currentNetwork\r\n iprange = getRange(octets[1])\r\n ips = [\"%s.%s\" % (currentNetwork,i) for i in iprange]\r\n \r\n else:\r\n print 'syntax error in specifying host list!'\r\n sys.exit(1)\r\n \r\n iplist += ips\r\n\r\n return uniq(iplist) # get rid of repeats\r", "def illegal_horizontal_intervals(a_list):\n allowed_movements = ['1', 'b2', '2', 'b3', '3', '4', '5', 'b6', '6']\n intervals = horizontal_intervals(a_list)\n return [(i, a_list[x+1]) for x,i in enumerate(intervals) if i[0] not in allowed_movements]", "def contains_tokens(pattern):\n return type(pattern) is list and len(pattern) > 0", "def containsAll(str, set):\n return 0 not in [c in str for c in set]", "def containsAll(str, set):\n return 0 not in [c in str for c in set]", "def getguardlist(dataset):\n guards = []\n for line in dataset:\n if '#' in line:\n parts = line.split()\n for sec in parts:\n if '#' in sec:\n if sec not in guards:\n guards.append(sec)\n return guards", "def short_whitelist(whitelist):\n for x in [\"guid-4\", \"guid-5\"]:\n whitelist.remove(x)\n return whitelist", "def test_str_not_in_str_list(self):\n # compact ver sion (env variables)\n assert_that(Condition.is_valid(\n '\"{{ env.BRANCH_NAME }}\" not in [\"dev\", \"prod\"]'), equal_to(True))\n # more spaces around are allowed (env variables)\n assert_that(Condition.is_valid(\n ' \"{{ env.BRANCH_NAME }}\" not in [ \"dev\", \"prod\" ] '), equal_to(True))\n # compact version (task variables)\n assert_that(Condition.is_valid(\n '\"{{ variables.cpu_count }}\" not in [\"1\", \"2\"]'), equal_to(True))", "def filter_dots_in_strip(strip, points):\n return [point for point in points if strip[0] <= point.x <= strip[1]]", "def _check_common_start(self, valid_list):\n start_list = list(\n set([item.coords[\"time\"].values[0] for item in valid_list])\n )\n if len(start_list) != 1:\n return False\n return True", "def filterBySequence(wordlist, sequence):\n filtered_list = [entry[\"word\"] for entry in wordlist if entry[\"sequence\"].startswith(sequence)]\n return filtered_list", "def isValid(self, start, end):\n for s in self.skip:\n if start <= s[0] <= end or start <= s[1] <= end:\n return False\n return True", "def snake_case_split(string_list):\n result = []\n for string in string_list:\n result.extend([x.lower() for x in string.split('_') if x])\n return result", "def check_spacers(\n raw_signal_array,\n set_of_spacer_marks\n ):\n \n temp =[i for i, state in enumerate(raw_signal_array)\n if re.search('^\\|$', state)]\n # Build the set of spacers for uniformity in rendering.\n # if a space is missedin Excel, it will be forced on\n # the wave.\n if any((set_of_spacer_marks - set(temp))):\n logging.warning('{1} Possible missing spacers, Wave will be overwritten with spacers at columns-{0}'.format(sorted(set_of_spacer_marks), raw_signal_array[0]))\n set_of_spacer_marks = set_of_spacer_marks | set(temp)\n return set_of_spacer_marks", "def complete_set(self, text, line, begidx, endidx):\n tokens = split(line[:begidx])\n if len(tokens) == 1:\n return [i for i in ('filter ', 'default ', 'time-format ') if i.startswith(text)]\n if len(tokens) == 2 and tokens[1] == 'time-format':\n return [i for i in ('long', 'short') if i.startswith(text)]\n return []", "def cleave(arr: list, substr: str) -> (list, list):\n\n cleav_pat = re.compile(\"^(.*){}(.*)$\".format(substr))\n pre_arr = []\n post_arr = []\n for istr in arr:\n tl, tr = cleav_pat.match(istr).groups()\n if tl:\n pre_arr.append(tl)\n if tr:\n post_arr.append(tr)\n\n return (pre_arr, post_arr)", "def try_split(text, chars=(u'—', '-')):\n for c in chars:\n segments = text.split(c)\n if len(segments) > 1:\n return [s.strip() for s in segments]", "def longwords_Li_Comp(strings):\n return [string for string in strings if len(string)>4 ]", "def match_start_string(list_to_search, substring):\n # Whitespace is stripped before and after the substring,\n # but not within (e.g. \" New York City \" -> \"New York City\").\n clean_substring = substring.lstrip().rstrip().lower()\n items_found = []\n ([items_found.append(item) for item in list_to_search\n if clean_substring == item[:len(clean_substring)].lower()])\n return items_found", "def generate_path_segments():\n\n # Constrain path characters to alphanumeric and\n # skip lookalikes (1, i, l, 0, o)\n allowed = \\\n 'abcdefghjkmnpqrstuvwxyz' \\\n 'ABCDEFGHJKMNPQRSTUVWXYZ' \\\n '23456789'\n\n segs = []\n for index in range(3):\n # I will allow API path segments to be 3-8 characters\n str_len = random.randrange(3, 9)\n segs.append(''.join(random.choice(allowed) for i in range(str_len)))\n return segs", "def words_with_pt_es(words):\n return [word for word in words if re.match(r'\\w*(pt|es)\\w*', word)]", "def isValid(self, s: str) -> bool:\n st = []\n\n for char in s:\n if (len(st) != 0):\n e = st[-1]\n if (self.isValidPair(e,char)):\n st.pop()\n continue\n st.append(char)\n return (len(st)==0)", "def split_precondition(\n tokens: Sequence[str], words: Sequence[str], word_ends: Sequence[str]\n) -> bool:\n duplicated_word_ends = []\n for end1, end2 in zip(word_ends, word_ends[1:]):\n if end1 == end2:\n duplicated_word_ends.append(end1)\n\n if not duplicated_word_ends:\n return False\n\n duplicate_not_word = False\n for duplicate in duplicated_word_ends:\n if duplicate not in words:\n duplicate_not_word = True\n break\n\n if not duplicate_not_word:\n return False\n\n return True", "def clean_plant_list(plant_list_in):\r\n\tfull_plants = [plant for plant in plants if '.' not in plant] #remove abbreviation\r\n\t#print(full_plants)\r\n\r\n\treturn list(set(full_plants)) # return unique names as list\r", "def find_intersection(snp_name):\n intersect = set(snp_name[0])\n for i in range(1,len(snp_name)):\n intersect = intersect.intersection(set(snp_name[i]))\n return list(intersect)", "def get_possible_stresses(stress_pattern: str) -> List[str]:\n possible_stresses = []\n for index in range(len(stress_pattern)):\n possible_stresses.append(stress_pattern[:index + 1])\n return possible_stresses", "def filter_words_list(words,pattern,wrong_guess_lst):\r\n list_hints = same_length(words,pattern)\r\n list_hints = if_in_wrong_guess(wrong_guess_lst,list_hints)\r\n list_hints = same_pattern(list_hints,pattern)\r\n list_hints = lst_and_pattern(list_hints,pattern)\r\n return list_hints", "def remove_phrases(words: list) -> list:\n # We need to sanitize synsets from phrasal expressions that contain \"_\"\n phrasal_expression_slices = set()\n phrasal_expressions = set()\n\n # Get all phrasal expressions (that contain '_')\n for word in words:\n if '_' in word:\n split_word = word.split(\"_\")\n for w in split_word:\n phrasal_expression_slices.add(w)\n phrasal_expressions.add(word)\n\n valid_members = list()\n # Get all the words that are in the synset but not part of the phrasal expression:\n for word in words:\n if word not in phrasal_expression_slices and word not in phrasal_expressions:\n valid_members.append(word)\n return valid_members", "def inj_seg(self, exclude_coinc_flags=None):\n\n if exclude_coinc_flags is None:\n exclude_coinc_flags = []\n\n tmp_list = segments.segmentlist([])\n for key in self.exc_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.exc_dict[key])\n for key in self.seg_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.seg_dict[key])\n for key in self.bitmask_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.bitmask_dict[key])\n if self.schedule_time:\n seg = segments.segment(self.schedule_time, self.schedule_time + 1)\n seg_list = segments.segmentlist([seg])\n tmp_list.extend(seg_list)\n for time in self.gracedb_time:\n seg = segments.segment(time, time + 1)\n seg_list = segments.segmentlist([seg])\n tmp_list.extend(seg_list)\n return tmp_list", "def get_matching_emails(all_the_email,addrlist):\n l_addrlist = map(unicode.lower,addrlist)\n return [ e for e in all_the_email if e.l_address in l_addrlist ]", "def find_final_stanzas_from_stanzas(stanzas, adj_list_ids, adj_list_words):\n\n # create combos generator\n all_combos = combinations(stanzas, 3)\n\n # filtered generator (all lines unique)\n combos = (c for c in all_combos if len(set().union(*c)) == 12)\n\n all_valid = []\n for combo in tqdm(combos):\n valid = find_final_stanzas(*combo, adj_list_ids, adj_list_words)\n if valid:\n all_valid.append((combo, valid))\n return all_valid", "def sensors_list(s):\n slist = s.split(' ')\n pat = re.compile(r'es\\d{2}$|121f\\d{2}$', re.IGNORECASE)\n sensors = [se for se in slist if re.match(pat, se)]\n if len(sensors) == 0:\n raise argparse.ArgumentError('\"%s\" does not appear to contain any valid sensor strings (e.g. es09 or 121f02)')\n return sensors", "def remove_short_tokens(tokens):\n return [token for token in tokens if len(token) > 3]", "def start_with_a_end_with_b(words):\n return [word for word in words if re.match(r\"a\\w*b$\", word)]" ]
[ "0.60316664", "0.57880795", "0.5696511", "0.56766254", "0.5634419", "0.5569252", "0.55591166", "0.54737127", "0.5471725", "0.5424363", "0.5422917", "0.5396598", "0.52901965", "0.52557975", "0.5253024", "0.5238155", "0.52132607", "0.52100533", "0.5209123", "0.5185486", "0.5150733", "0.51327056", "0.5103269", "0.508818", "0.50836736", "0.507843", "0.5074022", "0.5072101", "0.5068017", "0.5062258", "0.5046044", "0.50311697", "0.50263685", "0.5020818", "0.50183076", "0.50144875", "0.49790326", "0.49748722", "0.49736142", "0.4972898", "0.49592233", "0.49566758", "0.49565262", "0.49533105", "0.4948647", "0.4945525", "0.49444646", "0.4941675", "0.49387845", "0.49323884", "0.49323446", "0.49316028", "0.49122876", "0.49114317", "0.49110532", "0.49054018", "0.48895472", "0.48823282", "0.48807883", "0.48769814", "0.4874839", "0.48731664", "0.48677438", "0.48658538", "0.48647478", "0.48555738", "0.48495203", "0.48492533", "0.48468494", "0.484203", "0.484203", "0.48400193", "0.48393783", "0.48343375", "0.48318335", "0.48257107", "0.48236322", "0.4815697", "0.4811803", "0.48102614", "0.48094055", "0.48091966", "0.48074496", "0.48059767", "0.48027357", "0.479672", "0.4795111", "0.4791759", "0.47863173", "0.47803995", "0.47802132", "0.477388", "0.47713134", "0.47706106", "0.47694346", "0.4763438", "0.47522023", "0.4752047", "0.474977", "0.47467563" ]
0.59074974
1
Return a string like the input but containing only legal IPA segments
def filter_string(self, word, normalize=True): return ''.join(self.ipa_segs(word, normalize))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compact(number):\n number = clean(number, ' ').upper().strip()\n if number.startswith('AL'):\n number = number[2:]\n if number.startswith('(AL)'):\n number = number[4:]\n return number", "def dummy_junction14():\n return \"junction:chr1:176-324:+\"", "def defangIPaddr(address):\n address_as_list = list(address)\n length_of_address = len(address_as_list)\n for i in range(length_of_address):\n if address_as_list[i] == \".\":\n address_as_list[i] = \"[.]\"\n return \"\".join(address_as_list)", "def cleanCode(si):\n while len(si) < 4: si += 'x' # fill out the length of the code string\n so = \"\"\n for ii in range(4):\n if si[ii] in \"1234567890abcdefxyABCDEFX\": # check if this is a valid character\n# [0-9a-fA-FxyX]\n so += si[ii] # valid character\n else:\n so += \"xxxx\" # fill the string with 'x'\n ii = 4 # hit a bad one, stop checking string\n return so[:4] # clean code is 4 characters long", "def safe_addr(ip_addr):\n return '.'.join(ip_addr.split('.')[:2] + ['xxx', 'xxx'])", "def _UnicornObfuscated(self, text):\n obfuscated = ''\n separator = text.find('@')\n for i in range(separator-1):\n if i in [4, 5]:\n obfuscated += text[i]\n else:\n obfuscated += '*'\n obfuscated += text[separator-1:]\n return obfuscated", "def dummy_junction12():\n return \"junction:chr1:176-224:+\"", "def dummy_junction24():\n return 'junction:chr1:251-399:+'", "def fmt(cls, n):\n return ''.join(c for c in n if c in cls.ALLOWED).lower()", "def get_sequence_without_gaps_or_padding(sequence: str) -> str:\n return sequence.replace(dc_constants.GAP_OR_PAD,\n '').replace(dc_constants.GAP_OR_PAD, '')", "def chars_to_preserve(\n self,\n sentence: str,\n ) -> str:\n try:\n tokenized = re.findall(self.whitelist, sentence, re.IGNORECASE)\n return \" \".join(tokenized)\n except Exception as error:\n print(\n textwrap.dedent(\n f\"\"\"\n Bad characters range {self.whitelist},\n {error}\n \"\"\"\n )\n )\n raise", "def dummy_junction13():\n return 'junction:chr1:176-299:+'", "def dummy_junction23():\n return 'junction:chr1:251-299:+'", "def dummy_junction34():\n return 'junction:chr1:351-399:+'", "def transform_seq(seq):\n # TODO add character checking based on ASCII code\n return \"\".join(\"\" if aa in msa_characters else aa for aa in seq)", "def ainvs_to_string(ainvs):\n return ainvs if type(ainvs)==type('') else \";\".join([NFelt(ai) for ai in ainvs])", "def clip(st,length):\n if len(st) > length:\n return st[:length] + \"...\"\n else:\n return st", "def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s", "def makeAntString(ants):\n st = \"Antenna\"\n antlist = makeAntList(ants)\n if len(antlist) > 1: st += \"s\"\n st += \" %s\"%helpers.formatAsRanges( antlist )\n return st", "def search_trimmers(seq: str) -> str:\n return [seq[i:i+3] for i in range(len(seq)-2)]", "def superstring(g):\n substrings = []\n last_overlap = 0\n i = source(g)\n while True:\n substrings.append(g.vertex_label(i)[last_overlap:])\n if g.outdegree(i) > 0:\n j = g.out_edges(i)[0][1]\n last_overlap = g.edge_weight(i, j)\n i = j\n else:\n break\n return \"\".join(substrings)", "def mask_sequence(seq, gaps):\n seq = [i.upper() for i in seq]\n for gap in gaps:\n for i in range(gap[0] - 1, gap[1]):\n try:\n seq[i] = seq[i].lower()\n except:\n continue\n return ''.join(seq)", "def _get_legal(token):\n valid = re.split(r'[^]a-zA-Z0-0![,. {}@#$%^&*-_+=;:<>?/~\\'\\\\`]', token)\n return ''.join(valid).strip()", "def _abbrev_program(program: Program, max_len=10):\n program_lines = program.out().splitlines()\n if max_len is not None and len(program_lines) > max_len:\n first_n = max_len // 2\n last_n = max_len - first_n\n excluded = len(program_lines) - max_len\n program_lines = (program_lines[:first_n] + [f'... {excluded} instrs not shown ...']\n + program_lines[-last_n:])\n\n return '; '.join(program_lines)", "def _filter_codesign_output(codesign_output):\n filtered_lines = []\n for line in codesign_output.splitlines():\n if line and not _BENIGN_CODESIGN_OUTPUT_REGEX.search(line):\n filtered_lines.append(line)\n return \"\\n\".join(filtered_lines)", "def get_mask_from_alignment(al):\n alignment_str = str(al).split(\"\\n\")[1]\n return alignment_str.replace(\"|\", \"+\")", "def convert_physician_diagnoses_code(diagnoses_code):\n if diagnoses_code in ICD_9_DEFAULT_CODES_FOR_DIAGNOSIS:\n diagnoses_icd_9_code = \\\n ICD_9_DEFAULT_CODES_FOR_DIAGNOSIS.get(diagnoses_code)\n if diagnoses_icd_9_code in \\\n (\"Blank\", \"Blank diagnosis\", \"Diagnosis of 'none'\",\n \"Noncodable diagnosis\", \"Noncodable\", \"Illegible diagnosis\"):\n return \"\"\n return diagnoses_icd_9_code\n\n # 1975-76 - Instead of a \"Y\" to prefix codes in the supplementary\n # classification, an ampersand (&) was used\n # 1977 - 78 - Same as above, except that the prefix character is a dash(-)\n # For year 1973 till 1978 `diagnoses_code` is 4 length character\n if len(diagnoses_code) < 5 and (\n diagnoses_code.startswith(\"&\") or diagnoses_code.startswith(\"-\")\n or diagnoses_code.startswith(\"Y\")\n ):\n diagnoses_code = \"V{}\".format(diagnoses_code[1:])\n\n # Character format\n # For inapplicable fourth or fifth digits, a dash is inserted.\n # 0010[-] - V829[-] = 001.0[0]-V82.9[0]\n elif \"-\" in diagnoses_code[3:]:\n diagnoses_code = diagnoses_code.replace(\"-\", \"0\")\n # Reference from documentation:\n # -9 = Blank\n elif \"-00009\" in diagnoses_code:\n return \"\"\n\n # The prefix “1” preceding the 3-digit diagnostic codes represents\n # diagnoses 001-999, e.g. ‘1381’=’381’=otitis media. And “138100”=”381.00”\n if diagnoses_code.startswith(\"1\"):\n diagnoses_code = diagnoses_code.lstrip(\"1\")\n\n # The prefix “2” preceding the 3 - digit diagnostic codes represents \"V\"\n # code diagnoses VO1 - V82, e.g., ‘2010’=’V10’ and “201081” = “V10.81”\n elif diagnoses_code.startswith(\"2\"):\n if diagnoses_code.startswith(\"20\"):\n diagnoses_code = \"V{}\".format(diagnoses_code[2:])\n else:\n diagnoses_code = \"V{}\".format(diagnoses_code[1:])\n\n # There is an implied decimal between the third and fourth digits\n diagnoses_icd_9_code = \"{}.{}\".format(\n diagnoses_code[:3], diagnoses_code[3:]\n )\n\n return diagnoses_icd_9_code", "def cleaning_sequence_regex(sequence):\n amb = re.compile(r\"[^ACGT]\")\n return amb.sub(\"\", sequence)", "def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]", "def get_AA_subs(s):\r\n test_seq = s.toseq()[70:217].translate() #Translate the mutated region\r\n substitutions = []\r\n \r\n for i in range(len(test_seq)):\r\n if test_seq[i] != align_temp[i]:\r\n substitutions.append(''.join([str(align_temp[i]),\r\n str(i+48),\r\n str(test_seq[i]),\r\n ' ']))\r\n \r\n return ''.join(substitutions).strip()", "def prepseq(self, seq):\n\n wtf = re.sub(r'\\*$', '', seq)\n return wtf", "def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents", "def cleaning_ambiguous_bases(seq):\n # compile the regex with all ambiguous bases\n pat = re.compile(r'[NRYWXSKM]')\n # look for the ambiguous bases and replace by\n # nothing\n return re.sub(pat, '', seq)", "def squeeze_seq(seq):\r\n\r\n return sub(r'([AGCTacgt])\\1+', '\\\\1', seq)", "def padded_area_code(phone_number):\r\n area_code = grab_area_code(phone_number)\r\n return area_code + \"*******\"", "def hgvs2single(s):\n _validate_str(s)\n t = re_protein.findall(s)\n return [\"{}{}{}\".format(AA_CODES[m[1]], m[2], AA_CODES[m[3]]) for m in t]", "def clip_string_list(a, max_len, continue_str='…'):\n return [x if len(x) <= max_len else x[:max_len - len(continue_str)] + '…' for x in a]", "def compact(number):\n number = clean(number).strip().replace(' ', '-').split('-')\n if len(number) == 4:\n # zero pad the different sections if they are found\n lengths = (2, 4, 7, 3)\n return ''.join(n.zfill(l) for n, l in zip(number, lengths))\n else:\n # otherwise zero pad the account type\n number = ''.join(number)\n return number[:13] + number[13:].zfill(3)", "def segment(text: str) -> str:\n import regex\n\n # Chinese\n text = regex.sub(r\"(\\p{Han})\", r\" \\1 \", text)\n # Korean\n text = regex.sub(r\"(\\p{Hangul})\", r\" \\1 \", text)\n # Japenese\n text = regex.sub(r\"(\\p{Hiragana})\", r\" \\1 \", text)\n text = regex.sub(r\"(\\p{Katakana})\", r\" \\1 \", text)\n\n text = text.replace(\" \", \" \").strip()\n return text;", "def filter_some_usages(EN):\n bad_markers = [\n # 'ecclesiastical', actually not a good idea:\n # reachtaire\n # - rector (ecclesiastical)\n # - master of ceremonies\n ]\n ret = '\\n'.join([line for line in EN.split('\\n') if\n (not line.endswith(')')\n or\n line.rsplit('(', 1)[1].rstrip(')')\n not in bad_markers)])\n if ret:\n return ret\n return EN", "def anything_but_chars(*args:List[str]) -> str:\n # TODO uniq\n chars = \"\".join(args)\n return f\"[^{chars}]\"", "def translate(dna):\n rna = dna.replace('T', 'U')\n startIndex = dna.find('AUG') + 1\n aminoAcidsSeq = \"\"\n for i in range(startIndex, len(rna), 3):\n # codon = rna[i: i+3]\n aminoAcidsSeq += code[rna[i: i+3]]\n if aminoAcidsSeq[len(aminoAcidsSeq) - 1] == '*':\n aminoAcidsSeq = aminoAcidsSeq[:-1]\n break\n return aminoAcidsSeq", "def ipa2hash(ipa):\n return clean(ipa.translate(CHAR_TO_CODE))", "def idaline_to_string( idaline ):\r\n\ti = 0\r\n\tnew = \"\"\r\n\twhile i < len(idaline):\r\n\t\tif idaline[i] == '\\x01' or idaline[i] == '\\x02':\r\n\t\t\ti = i + 1\r\n\t\telse:\r\n\t\t\tnew += idaline[i]\r\n\t\ti = i + 1\r\n\treturn new", "def _sanitize(bytesin):\n # Used for converting raw byte data to a string. If the byte isn't a tame ASCII character, use . instead.\n return \"\".join([x if 0x7f > ord(x) > 0x1f else '.' for x in bytesin])", "def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad_and_gaps(), \"\"\n )\n self.assertEqual(\n self.RNA(\"aaa ggg ---!ccc\", check=False).strip_bad_and_gaps(), \"AAAGGGCCC\"\n )", "def iscc_clean(i):\n return i.split(\":\")[-1].strip().replace(\"-\", \"\")", "def clean_legislature(string):\n\n roman_numerals = {\n 'I': 1, 'II': 2, 'III': 3, 'IV': 4, 'V': 5,\n 'VI': 6, 'VII': 7, 'VIII': 8, 'IX': 9, 'X': 10,\n 'XI': 11, 'XII': 12, 'XIII': 13, 'XIV': 14, 'XV': 15,\n 'XVI': 16, 'XVII': 17, 'XVIII': 18, 'XIX': 19, 'XX': 20,\n 'XXI': 21, 'XXII': 22, 'XXIII': 23, 'XXIV': 24, 'XXV': 25,\n }\n\n string = string.replace('&nbsp;', '')\n number, dates = string.split('[')\n number = roman_numerals[number.strip()]\n dates = dates.strip(' ]')\n if len(dates.split(' a ')) == 2:\n start, end = dates.split(' a ')\n else:\n start = dates.split(' a ')[0]\n end = ''\n if start.endswith(' a'):\n start = start.replace(' a', '')\n return number, start, end", "def filter_chants_with_nonvolpiano_chars(chants, logger=None):\n volpiano_chars = (\n r'3456712\\(\\)'\n r'ABCDEFGHJKLMNOPQRSIWXYZ89'\n r'abcdefghjklmnopqrsiwxyz'\n r'\\.\\,\\-\\[\\]\\{\\¶')\n pattern = f'^[{volpiano_chars}]*$'\n contains_no_other_chars = chants.volpiano.str.match(pattern) == True\n return chants[contains_no_other_chars]", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def raw(self) -> str:\n return \"\".join(seg.raw for seg in self.segments)", "def _getLilyAccidental(self):\n return \"\"", "def disperse_string(solid_string):\r\n normal_list = list(solid_string)\r\n return list(itertools.chain.from_iterable(zip(normal_list, [0] * len(normal_list))))", "def complement(seq):\n if PY3:\n table = str.maketrans('ACTGNactg', 'TGACNtgac')\n elif PY2:\n table = string.maketrans('ACTGNactg', 'TGACNtgac')\n return str(seq).translate(table)", "def clean_venue(venue):\n\n return venue.lower().strip('?:!.,;- ')", "def only_ascii(item):\n checked = [i for i in item if allowed(i)]\n return ''.join(checked).lower()", "def prettyDecode(self,seq):\n s = \"\".join(self.decode(seq))\n s = s.replace(\"_EOS\", \"\" )\n s = s.replace(\"_PAD\", \"\" )\n s = s.replace(\"_\", \" \" )\n return s", "def normalize_issn(val):\n val = val.replace(\" \", \"\").replace(\"-\", \"\").strip().upper()\n return \"{0}-{1}\".format(val[:4], val[4:])", "def short(text):\n rep = {\n ' *health *center': '',\n ' *health *ceanter': '',\n ' +H[./]*C': '',\n ' *health *post': '',\n ' *heslth *post': '',\n ' *Haelth *Post': '',\n ' *Health *Poat': '',\n ' *hospital': '',\n ' +h[./]*p': '',\n ' {2,}': ''}\n\n return reduce(lambda a, kv: re.sub(*kv, a, flags=re.I), rep.items(), text)", "def get_masked_string(s, p):\r\n return (fromstring(s, dtype=uint8))[p].tostring()", "def get_masked_string(s, p):\n return (fromstring(s,dtype=uint8))[p].tostring()", "def trim(s):\n return s if len(s) <= 80 else s[:77] + \"...\"", "def trim(s):\n return s if len(s) <= 80 else s[:77] + \"...\"", "def trim(s):\n return s if len(s) <= 80 else s[:77] + \"...\"", "def trim(s):\n return s if len(s) <= 80 else s[:77] + \"...\"", "def shorten_rtept(rtept):\n return rtept.upper()[:6].strip()", "def clean_exception(v):\n v = re.sub(r\"\\[\\[[^]|]*\\|([^]]*)\\]\\]\", r\"\\1\", v)\n v = re.sub(r\"\\[\\[\", \"\", v)\n v = re.sub(r\"\\]\\]\", \"\", v)\n v = re.sub(r\"``+\", \"\", v)\n v = re.sub(r\"''+\", \"\", v)\n v = re.sub(r\"(?is)<sup>.*?</sup>\", \"\", v)\n v = re.sub(r\"<[^>]*>\", \"\", v)\n v = re.sub(\"\\u2019\", \"'\", v) # Note: no r\"...\" here!\n v = re.sub(r\" abbr. .*\", \"\", v)\n v = re.sub(r\"\\s+\", \" \", v)\n return v.strip()", "def _canonify(self, rut):\n rut = smart_unicode(rut).replace(' ', '').replace('.', '').replace('-', '')\n return rut[:-1], rut[-1]", "def _pretty_iban(self, iban: str) -> str:\n\n new_iban = \"\"\n for i in range(len(iban)):\n if i % 4 == 0 and i != 0:\n new_iban += \" \"\n new_iban += iban[i]\n return new_iban", "def clean(input):\n return re.sub(r\"<.*?>\", \"\", re.sub(r\"\\!.\", \"\", input))", "def clean_path(path):\n return [endpoint for endpoint in path if len(endpoint) > 23]", "def filter_DNA(c):\n if c in \"ACGTacgt\":\n return True\n else:\n return False", "def removeLabels(str2: str):\n str2_arr = []\n last_seen_bracket = []\n for char in str2:\n if char == \"(\" or char == \"[\":\n last_seen_bracket.append(char)\n str2_arr.append(\"-\")\n elif char == \")\" or char == \"]\":\n if len(last_seen_bracket) >= 1:\n last_seen_bracket.pop()\n else:\n continue\n elif char == \"-\" or char == '$':\n continue\n elif len(last_seen_bracket) >= 1:\n continue\n else:\n str2_arr.append(char)\n\n if len(str2_arr) > 1:\n for i in range(len(str2_arr)):\n try:\n if str2_arr[i] == \"-\" and str2_arr[i - 1] == \"-\":\n str2_arr.pop(i - 1)\n # Some segments have dual purpose, so this removes dual dashes that result from this\n except IndexError:\n continue\n\n if str2_arr[len(str2_arr) - 1] == \"\\n\":\n str2_arr.pop()\n\n return \"\".join(str2_arr).rstrip(\"-\").lstrip(\"-\")", "def formatBarcode(barcode):\r\n barcodelist = [barcode[0:4],barcode[4:8],barcode[8:12],barcode[12]]\r\n delimitedbarcode = '-'.join(barcodelist)\r\n return delimitedbarcode", "def filter_invalid_characters(self, string):\n valid_chars = \"abcdefghijklmnopqrstuvwxyz0123456789-.\"\n newstring = \"\"\n for char in string:\n use_char = char\n if char not in valid_chars:\n use_char = '-'\n newstring = newstring + use_char\n\n return newstring", "def ps_filter(val):\n if isinstance(val, Undefined):\n return UNDEFINED_LABEL\n escaped = []\n for char in str(val):\n if char in \"`$#'\\\"\":\n char = \"`\" + char\n elif char == '\\0':\n char = \"`0\"\n elif char == '\\a':\n char = \"`a\"\n elif char == '\\b':\n char = \"`b\"\n elif char == '\\f':\n char = \"`f\"\n elif char == '\\n':\n char = \"`n\"\n elif char == '\\r':\n char = \"`r\"\n elif char == '\\t':\n char = \"`t\"\n elif char == '\\v':\n char = \"`v\"\n escaped.append(char)\n return ''.join(escaped)", "def get_ips_ored(self, app_name):\n lst =''\n for i in range(1,len(self.apps[app_name])):\n lst += ' OR '.join(self.apps[app_name][i])\n return lst", "def calc_conservation_string(aln):\n\n percids = calc_conservation(aln)\n\n # find identity positions\n identity = \"\"\n for pid in percids:\n if pid == 1:\n identity += \"*\"\n elif pid > .5:\n identity += \".\"\n else:\n identity += \" \"\n\n return identity", "def to_pinyin(s: str) -> str:\n if s == '山西':\n return 'Shan1xi'\n elif s == '陕西':\n return 'Shan3xi'\n pylist = lazy_pinyin(s)\n py = ''.join(pylist)\n return py", "def process_address(text):\n return sanitize(text[9:])", "def remove_mask_when_empty(self, text):\n if text in ['()-', '.-', '..-']:\n return ''\n else:\n return text", "def collapse(L):\n output = \"\"\n for s in L:\n output = output + s\n return output\n\n\n \"\"\"1) Computes the Protein encoded by a sequence of DNA. This function\n does not check for start and stop codons (it assumes that the input\n DNA sequence represents an protein coding region).\n \n dna: a DNA sequence represented as a string\n returns: a string containing the sequence of amino acids encoded by the\n the input DNA fragment\n \"\"\"", "def pkcs5_unpad(self,s):\n return s[0:-ord(s[-1])]", "def bipa(sequence):\n return [_token2clts(segment)[0] for segment in sequence]", "def sequence_cleaner(sequence, alphabet):\n seq = sequence.upper()\n sequence = [base for base in seq if base in alphabet]\n return ''.join(sequence)", "def test_squeeze_seq(self):\r\n\r\n seq = \"AAAGGGAAACCCGGGA\"\r\n self.assertEqual(squeeze_seq(seq), \"AGACGA\")\r\n self.assertEqual(squeeze_seq(\"AAAATATTTAGGC\"), \"ATATAGC\")\r\n self.assertEqual(squeeze_seq(\"\"), \"\")\r\n self.assertEqual(squeeze_seq(\"ATGCATGCATGC\"), \"ATGCATGCATGC\")", "def remove_cc(s):\n\n return \"\".join(ch for ch in s if unicodedata.category(ch)[0]!=\"C\")", "def normalise_str(in_dna):\n if in_dna == None or len(in_dna) == 0:\n return ''\n all_possible = []\n # Circularly permute original sequence and reverse complement\n for seq in self_and_rev_complement(in_dna):\n for permuted_seq in circular_permuted(seq): # Switch to faster permutation (6)\n all_possible.append(permuted_seq)\n\n # Sort and take the first\n all_possible.sort()\n return(all_possible[0])", "def str_prefix__(self):\n s = str(self.avp_code)\n if self.is_vendor:\n s+= \".v\"\n if self.is_mandatory:\n s+= \".m\"\n if self.is_protected:\n s+= \".p\"\n if self.vendor_id!=0:\n s+= \":\"+str(self.vendor_id)\n return s", "def get_complement(s):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return ''.join(filter(None, [ dna_complement[c.upper()] if c.upper() in dna_complement else '' for c in s ] ))", "def remove_non_narration_strings(transcription_row):\n sentence = transcription_row[\"text\"]\n # filter out (CAPITALIZED WORD) and \"CAPITALIZED WORD\". These are not enunciated in the voiceover, but rather\n # indicate noise/words from the original audio track that get interspersed into the voice\n # Might contain special characters\n # Update: Capitalization etc are inconsistent. But all follow the pattern \"text\" and (text). Remove these instead\n crosstalk_pattern = '\\(.*?\\)|\\\".*?\\\"'\n # crosstalk_findings = re.findall(crosstalk_pattern, sentence)\n # print(\"Crosstalk: \"+str(crosstalk_findings))\n sentence = re.sub(crosstalk_pattern, \" \", sentence)\n # filter out ' s ' ' Ss ' etc\n s_pattern = r'\\b[sS]+\\b'\n s_pattern_findings = re.findall(s_pattern, sentence)\n # if len(s_pattern_findings) > 0:\n # print(\"S-pattern: \"+str(s_pattern_findings))\n sentence = re.sub(s_pattern, \" \", sentence)\n transcription_row[\"text\"] = sentence\n return transcription_row", "def make_degen_str(aln):\n\n degens = find_degen(aln)\n degenmap = {\n -1: \" \",\n 0: \"0\",\n 1: \"1\",\n 2: \"2\",\n 3: \"3\",\n 4: \"4\",\n }\n\n return \"\".join(util.mget(degenmap, degens))", "def process_sequence(seq, whitelist):\n sym = ''.join(seq)\n out = validate_symbol(sym, whitelist)\n return out", "def disp_sec_str(aa_seq):\n return re.sub(\"(.{80})\", \"\\\\1\\n\", aa_seq, 0, re.DOTALL)", "def sn(string):\n\n return re.sub('[^A-Za-z0-9_.\\\\-/]', '.', string)", "def sparse_substrings(text: str):\n groups = re.findall(r\"\\D{1,}\", text)\n\n out = text\n # print(f\"{out} - Cleanup starting...\")\n for substring in groups:\n sparsed = f\" {squash(substring)} \"\n out = out.replace(substring, sparsed)\n # print(f\"{out} - (Found r: '{result}' -> '{sparsed}')\")\n\n # print(f\"{out} - after sparse \")\n return out", "def sufix(pattern):\n return pattern[1:len(pattern)]", "def str_for_file(xs):\n # Encode to ASCII.\n xs = xs.encode('ascii', 'ignore').decode()\n\n # Convert characters.\n convert = {':': ',',\n ';': ','}\n for char_from, char_to in convert.items():\n print(xs, char_from, char_to)\n xs = xs.replace(char_from, char_to)\n\n # Finally, whitelist characters.\n allowed = uppercase + lowercase + digits + '- !()-_=+\\'\",.'\n return ''.join(filter(lambda x: x in allowed, xs))", "def translate(nuc):\n\tfrom Bio import Seq\n\ttry:\n\t\ttmp_aa = Seq.translate(nuc.replace('-','N')) #returns string when argument is a string, Bio.Seq otherwise\n\texcept:\n\t\tprint(\"translation failed\",nuc)\n\t\ttmp_aa = 'X'*len(nuc)//3\n\taa_seq = \"\"\n\tfor i,aa in enumerate(tmp_aa):\n\t\tif nuc[i*3:(i+1)*3]=='---':\n\t\t\taa_seq+='-'\n\t\telse:\n\t\t\taa_seq+=aa\n\treturn aa_seq" ]
[ "0.54550743", "0.54391927", "0.53663737", "0.5322689", "0.5241737", "0.52362967", "0.5214989", "0.52047104", "0.519561", "0.51782346", "0.5152315", "0.51449424", "0.51403505", "0.5121869", "0.50963056", "0.506933", "0.50592506", "0.5054341", "0.5052699", "0.5051405", "0.50455904", "0.5028864", "0.5022986", "0.49869007", "0.49866816", "0.49604258", "0.49585265", "0.49564317", "0.49481565", "0.4947496", "0.49179265", "0.4908446", "0.4893773", "0.48723337", "0.48646927", "0.48630178", "0.4845092", "0.4838737", "0.48384774", "0.48241112", "0.4823283", "0.48198676", "0.480559", "0.4800959", "0.47997943", "0.479211", "0.4777611", "0.47713283", "0.47700724", "0.4768615", "0.4768615", "0.47577438", "0.47488216", "0.47455516", "0.47444057", "0.4743776", "0.47296983", "0.47233215", "0.4718336", "0.47076195", "0.47057277", "0.4703018", "0.46934676", "0.46934676", "0.46934676", "0.46934676", "0.4690306", "0.46875927", "0.4685597", "0.46774152", "0.4676022", "0.46754107", "0.46722227", "0.4671542", "0.4659024", "0.46514687", "0.46482474", "0.46449825", "0.4644025", "0.4642586", "0.46415207", "0.46406645", "0.4636303", "0.46342513", "0.46309963", "0.46194676", "0.46188447", "0.46175143", "0.46163064", "0.4614049", "0.4612258", "0.46097767", "0.4607544", "0.46021912", "0.46019658", "0.45926434", "0.45922387", "0.4587197", "0.4585084", "0.45848414" ]
0.5909749
0
Return a Segment object containing the features shared by all segments
def fts_intersection(self, segs, normalize=True): return reduce(lambda a, b: a & b, [self.fts(s, normalize) for s in self.filter_segs(segs, normalize)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def segments(self):\r\n return Segments(self)", "def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))", "def get_all(self):\n return self._segments", "def segments(self):\n L = len(self.vertices)\n return itertools.chain((self._subset((i,i+1)) for i in range(len(self)-1)),\n (self._subset((L-1,0)),))", "def pairwise_algorithm(segments):\n \n segment_pairs = [(x, y) for x in segments for y in segments if x[\"name\"] < y[\"name\"]]\n \n # key is a segment name, value is a set of those features that are contrastive\n # for that segment\n contrastive_features = defaultdict(set)\n \n for x, y in segment_pairs:\n assert x.keys() == y.keys()\n contrastive_feature = None\n for k, v in x.items():\n if k != \"name\" and v != y[k]:\n if contrastive_feature is None:\n contrastive_feature = k\n else:\n contrastive_feature = None\n break\n if contrastive_feature:\n contrastive_features[x[\"name\"]].add(contrastive_feature)\n contrastive_features[y[\"name\"]].add(contrastive_feature)\n \n return contrastive_features", "def obtain_geometries(self):\n\n assert isinstance(self.ts, TS)\n\n \n symbol_dict = {\n 17: \"Cl\",\n 9: \"F\",\n 8: \"O\",\n 7: \"N\",\n 6: \"C\",\n 1: \"H\",\n }\n atoms = []\n\n parser = ccread(self.log_file, loglevel=logging.ERROR)\n\n for atom_num, coords in zip(parser.atomnos, parser.atomcoords[-1]):\n atoms.append(Atom(symbol=symbol_dict[atom_num], position=coords))\n \n self.ts._ase_molecule = Atoms(atoms)\n self.ts.update_coords_from(\"ase\")\n\n self.pre_geometry = self.ts.ase_molecule.copy()\n self.post_geometry = self.ts.ase_molecule.copy()\n\n for vib, displacements in self.vibrations:\n if vib < 0: # Finding the imaginary frequency\n self.post_geometry.arrays[\"positions\"] -= displacements\n\n return self.pre_geometry, self.post_geometry", "def sharedVertices(self):\n return self._sharedVertices", "def segments(self):\n return self._segments", "def combineSegments(self):\n\n remaining_segments = list(self.segments)\n if not remaining_segments:\n return []\n\n chains = []\n # @TODO: Why is count computed this way?\n max_count = len(remaining_segments) * 2\n count = 0\n while remaining_segments and count < max_count:\n if chains and linked_a_chain_from(chains, remaining_segments):\n count += 1\n continue\n\n chains.append([remaining_segments.pop()])\n\n # grab the vertex indicies for each chain (aka face)\n newFaces = [[segment[2] for segment in chain] for chain in chains]\n self.faces.extend(newFaces)\n\n # lets compute some textureCoords for these new faces\n # based on their vertex coords in world space.\n # this works well for floors and ceilings.\n # flats are always 64x64 aligned to world coords\n [self.textureCoords.append(\n [(segment[0].x/64., segment[0].y/64.) for segment in chain])\n for chain in chains]", "def getSegments(self):\n l = len(self.points)\n return [Segment(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]", "def intersect(self, other: Line | Segment) -> list[Point]:\n return list(distinct(self.faces.intersect(other)))", "def copy(self):\n return Segment([p.copy() for p in self.endpoints])", "def consolidate_instances_all_way(self, stats, segmented_instances):\n\n img = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n #get all pixel labels in the segmented_instances mask\n segment_numbers = np.unique(segmented_instances)\n\n # remove the background label\n segment_numbers=segment_numbers[segment_numbers!=0]\n\n end_points = np.empty((len(segment_numbers),),dtype=np.object_)\n end_points.fill([])\n\n for curr_segment in segment_numbers:\n idx=[]\n i=curr_segment-1\n if curr_segment!=0:\n #Show all segments of curr_segment. Only useful to view results\n img[segmented_instances== curr_segment]= 255\n #get indeces of the segments for curr_segment\n idx = np.argwhere(segmented_instances == curr_segment)\n if len(idx>0):\n end_points[i]= self._get_end_points(segmented_instances, i, \\\n stats, idx)\n # add point markers and lines connecting each end point to centroid.\n # useful only to view results\n \"\"\"for pt_num, pt in enumerate(end_points[i]):\n cv2.circle(img, (pt[0],pt[1]), 3, 100, -1)\n cv2.line(img,(pt[0],pt[1]),\\\n (stats['centroid'][i,0], stats['centroid'][i,1]),150,2)\n cv2.circle(img, (stats['centroid'][i,0], stats['centroid'][i,1]), 3, 200, -1)\"\"\"\n #self.showme(img, 'line '+str(i))\n\n # cluster segments into stem instances\n cluster_mask, clustered_instances = self._cluster_segments_all_way(segmented_instances,\\\n segment_numbers, end_points, \\\n stats)\n\n #put all instances in one layer\n if len(cluster_mask)>0:\n single_layer_cluster_mask=np.zeros(cluster_mask[0].shape)\n for i in xrange(len(cluster_mask)):\n single_layer_cluster_mask[cluster_mask[i]>0]= i+1\n\n # self.showObjects(clustered_instances);\n return single_layer_cluster_mask, clustered_instances", "def _shared_segs(self, g1, g2):\n\n # detect potential shared paths between two linestrings\n try:\n fw_bw = shared_paths(g1, g2)\n except ValueError:\n self._valerr = True\n fw_bw = False\n\n # continue if any shared path was detected\n if fw_bw and not fw_bw.is_empty:\n\n forward = fw_bw[0]\n backward = fw_bw[1]\n\n if backward.is_empty:\n # only contains forward objects\n shared_segments = forward\n elif forward.is_empty:\n # only contains backward objects\n shared_segments = backward\n else:\n # both backward and forward contains objects, so combine\n forward = self._validate_linemerge(linemerge(forward))\n backward = self._validate_linemerge(linemerge(backward))\n\n shared_segments = geometry.MultiLineString(forward + backward)\n\n # add shared paths to segments\n self._segments.extend([list(shared_segments)])\n\n # also add the first coordinates of both geoms as a vertice to segments\n p1_g1 = geometry.Point([g1.xy[0][0], g1.xy[1][0]])\n p1_g2 = geometry.Point([g2.xy[0][0], g2.xy[1][0]])\n ls_p1_g1g2 = geometry.LineString([p1_g1, p1_g2])\n self._segments.extend([[ls_p1_g1g2]])", "def mergeGeometries(self):\n self.geometry = reduce(lambda p1,p2 : p1.union(p2) ,map(lambda tax : tax.biomeGeometry,self.taxonomies))\n return self.geometry", "def edges(self) -> Segment:\n return Segment(self._edges, copy=False)", "def segment(self, intersect=True):\n from scipy.spatial import Voronoi\n from shapely.geometry import LineString\n from shapely.ops import polygonize\n\n vor = Voronoi(self._centered_coords)\n lines = [LineString(vor.vertices[line]) for line in vor.ridge_vertices if -1 not in line]\n poly_generator = polygonize(lines)\n\n series = gpd.GeoSeries(poly_generator)\n series = self.translate(series)\n series.crs = self.detection_base.height_model.crs\n\n if intersect:\n series = series.intersection(self.detection_base.height_model._bounding_box_poly)\n\n return series", "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def shared_segs(self, g1, g2):\n\n # detect potential shared paths between two linestrings\n try:\n fw_bw = shared_paths(g1, g2)\n except ValueError:\n self.valerr = True\n fw_bw = False\n # fw_bw = shared_paths(snap(g1, g2, tolerance=6), g2)\n\n # continue if any shared path was detected\n if fw_bw and not fw_bw.is_empty:\n\n forward = fw_bw[0]\n backward = fw_bw[1]\n\n if backward.is_empty:\n # only contains forward objects\n shared_segments = forward\n elif forward.is_empty:\n # only contains backward objects\n shared_segments = backward\n else:\n # both backward and forward contains objects, so combine\n shared_segments = geometry.MultiLineString(\n [linemerge(forward), linemerge(backward)]\n )\n\n # add shared paths to segments\n self.segments.extend([list(shared_segments)])\n\n # also add the first coordinates of both geoms as a vertice to segments\n p1_g1 = geometry.Point([g1.xy[0][0], g1.xy[1][0]])\n p1_g2 = geometry.Point([g2.xy[0][0], g2.xy[1][0]])\n ls_p1_g1g2 = geometry.LineString([p1_g1, p1_g2])\n self.segments.extend([[ls_p1_g1g2]])", "def get_segments(self):\n\t\tos.chdir(self.segment_path)\n\t\tfor path in glob.glob(\"%s/*.seg\" % self.segment_path):\n\t\t\t_file = os.path.split(path)[1]\n\t\t\tdae = DiscreetArchiveElement(self,_file,element_type='segment')\n\t\t\tself.elements.append(dae)\n\t\treturn True", "def overlaps(self,region):\n fs = FeatureSet()\n for f in self:\n if( f.overlaps(region) ):\n fs.append(f)\n return fs", "def _cluster_segments_all_way(self, segmented_instances, labels, \\\n end_points, stats, cluster_thresh=0.5):\n\n #self.showme(segmented_instances, 'main img')\n segment_association_list = []\n max_num_end_points= 0\n\n # for each stem segment\n for i in range(0, len(labels)):\n # each end point in the current segment i\n if max_num_end_points < len(end_points[i]):\n max_num_end_points = len(end_points[i])\n for k in range(0, len(end_points[i])):\n angle_list=[]\n # find the segment that is most likely connected to segment i at end point[i][k]\n for j in range(0, len(labels)):\n # make sure we are not trying to connect the segment to itself\n if i!= j:\n # angle calculates the angle between the line stats['centroid'][i]-end_points[i][k]\n # and stats['centroid'][i]-stats['centroid'][j]\n\n angle = self._ang([stats['centroid'][i],end_points[i][k]], \\\n [stats['centroid'][i], stats['centroid'][j]] )\n # if the angle value is within the acceptable range of +/- angle_thresh\n if angle<=self.angle_thresh or angle>=360-self.angle_thresh:\n other_angle, other_seg_section, end_point_dist = self._get_best_fit(segmented_instances, \\\n len(labels), \\\n stats, end_points,\\\n i, j, k, pos_angle=angle<=self.angle_thresh)\n # if the best fit segment also has a small angle between its\n # end point-centroid line and centroid-centroid line,\n # add it to segments connected to segment i\n if other_angle!=None and other_angle<=self.angle_thresh:\n angle_list.append((j, other_seg_section, other_angle, end_point_dist, angle))\n #Sort the list of stem segments connected to i by end_point_dist\n angle_list = sorted(angle_list, key=lambda x:x[3])\n #Sorting by the Euclidian distance of the end_point_dist and the other_angle does not change end result\n #angle_list = sorted(angle_list, key=lambda x:(math.sqrt(x[3]**2.0+x[2]**2.0)))\n # the angle value reflects how far segment k is from the straight line\n # going through the centroids\n if len(angle_list)>0:\n # (i, j, k, l, angle between i and centroid line, angle between j and centroid line, distance between closest end points k in seg i and l in seg j)\n segment_association_list.append((i,angle_list[0][0],k, angle_list[0][1], angle_list[0][4], angle_list[0][2], angle_list[0][3]))\n\n\n # sort slope differences in an increasing order\n segment_association_list = sorted(segment_association_list,key=lambda x:(x[6]))\n\n # find best match by iteretively selecting the smallest difference\n # and adding it to the ith cluster\n cluster_list = []\n cluster = np.full(len(labels),None)\n colored_clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n #clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n # initialize cluster list to single clusters contianing only each individual segment\n for i in range(0, len(labels)):\n cluster[i]=i\n cluster_list.append([i])\n #self.showme(clusterImg, str(i))\n\n visited=np.full((len(labels),max_num_end_points), False)\n\n #cluster=np.frompyfunc(list,1,1)(cluster) # allows us to append to only the specified list end_points[i]\n new_cluster_num=0\n color_offset=len(labels)\n\n # for each pair of segments in our list of best fit segments\n for curr_tuple in segment_association_list:\n img = np.zeros(segmented_instances.shape)\n i = curr_tuple[0] # index of first segment\n j = curr_tuple[1] # index of second segment in the tuple\n i_section = curr_tuple[2] #end point number in segment i\n j_section = curr_tuple[3] #end point number in segment j\n angle = curr_tuple[4]\n other_angle = curr_tuple[5]\n end_point_dist = curr_tuple[6] #distance between the connecting end points of segments i and j\n img[segmented_instances== i]= 255\n img[segmented_instances== j]= 255\n if (visited[i][i_section]==False)and(visited[j][j_section]==False):\n #cv2.line(clusterImg,(end_points[i][i_section][0],end_points[i][i_section][1]),\\\n # (end_points[j][j_section][0], end_points[j][j_section][1]),150,2)\n #self.showme(clusterImg, str(i))\n visited[i][i_section]=True\n visited[j][j_section]=True\n cluster_num = cluster[i]\n if cluster[i]!=cluster[j]:\n other_cluster_num = cluster[j]\n cluster_list[cluster_num] = list(set(cluster_list[cluster_num]+\\\n copy.deepcopy(cluster_list[other_cluster_num])))\n # update cluster numbers for all segments moved into new cluster\n for seg in cluster_list[other_cluster_num]:\n cluster[seg]=cluster_num\n # update cluster numbers for clusters larger than cluster to be removed\n for idx in range(0, len(cluster)):\n if (cluster[idx]>other_cluster_num):\n cluster[idx]= cluster[idx]-1\n del cluster_list[other_cluster_num]\n\n\n #show clustered segments\n color = 0\n cluster_num = 0\n cluster_mask=[]\n\n for c in cluster_list:\n color = color+0.1\n cluster_mask.append(np.zeros(segmented_instances.shape).astype(np.uint8))\n\n for i in c:\n cluster_mask[cluster_num][(segmented_instances == labels[i])]=1\n colored_clusterImg[(segmented_instances == labels[i])]= int(color*255)\n \"\"\"if self.key in ['../data/images/image1672', '../data/images/image1289']:\n self.showme(colored_clusterImg)\"\"\"\n cluster_num +=1\n\n return cluster_mask, colored_clusterImg", "def components(self):\n skel = self.clone()\n forest = self._compute_components(skel)\n \n if len(forest) == 0:\n return []\n elif len(forest) == 1:\n return [ skel ]\n\n skeletons = []\n for edge_list in forest:\n edge_list = np.array(edge_list, dtype=np.uint32)\n vert_idx = fastremap.unique(edge_list)\n\n vert_list = skel.vertices[vert_idx]\n radii = skel.radii[vert_idx]\n vtypes = skel.vertex_types[vert_idx]\n\n remap = { vid: i for i, vid in enumerate(vert_idx) }\n edge_list = fastremap.remap(edge_list, remap, in_place=True)\n\n skeletons.append(\n Skeleton(vert_list, edge_list, radii, vtypes, skel.id)\n )\n\n return skeletons", "def get_intersections(self):\n return self.intersection_list", "def __load_segments(self):\r\n self.__segments = []\r\n if len(self.points) > 1:\r\n s = self.points[0]\r\n k = 1\r\n while k < len(self.points):\r\n e = self.points[k]\r\n self.__segments.append(Segment(s, e))\r\n s = e \r\n k += 1\r\n e = self.points[0]\r\n self.__segments.append(Segment(s, e))", "def get_features(self):\n return []", "def take_common_features(feat1,feat2):\n common=np.intersect1d(feat1,feat2) # sorted\n ind1=find_indices(common,feat1)\n ind2=find_indices(common,feat2)\n return common,ind1,ind2", "def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def cluster(self):\n logger.debug(\"Beginning feature based clustering on %d clusters.\" % len(self.c2b))\n # Merge the two nearest clusters until we can't.\n #\n while self.mergeNearestClusters():\n pass\n logger.debug(\"After clustering, there are now %d clusters remaining.\" % len(self.c2b))\n return self.c2b.values()", "def __feature_set__(self):\r\n import numpy as np\r\n import datetime\r\n import time\r\n cols_norm = [col for col in self.columns]\r\n cols_lower = [col.lower() for col in self.columns]\r\n fields = []\r\n features = []\r\n date_fields = []\r\n _geom_types = {\r\n arcgis.geometry._types.Point : \"esriGeometryPoint\",\r\n arcgis.geometry._types.Polyline : \"esriGeometryPolyline\",\r\n arcgis.geometry._types.MultiPoint : \"esriGeometryMultipoint\",\r\n arcgis.geometry._types.Polygon : \"esriGeometryPolygon\"\r\n }\r\n if self.sr is None:\r\n sr = {'wkid' : 4326}\r\n else:\r\n sr = self.sr\r\n fs = {\r\n \"objectIdFieldName\" : \"\",\r\n \"globalIdFieldName\" : \"\",\r\n \"displayFieldName\" : \"\",\r\n \"geometryType\" : _geom_types[type(self.geometry[self.geometry.first_valid_index()])],\r\n \"spatialReference\" : sr,\r\n \"fields\" : [],\r\n \"features\" : []\r\n }\r\n if 'objectid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n elif 'fid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('fid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('fid')]\r\n elif 'oid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('oid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('oid')]\r\n else:\r\n self['OBJECTID'] = list(range(1, self.shape[0] + 1))\r\n res = self.__feature_set__\r\n del self['OBJECTID']\r\n return res\r\n if 'objectIdFieldName' in fs:\r\n fields.append({\r\n \"name\" : fs['objectIdFieldName'],\r\n \"type\" : \"esriFieldTypeOID\",\r\n \"alias\" : fs['objectIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['objectIdFieldName']))\r\n if 'globalIdFieldName' in fs and len(fs['globalIdFieldName']) > 0:\r\n fields.append({\r\n \"name\" : fs['globalIdFieldName'],\r\n \"type\" : \"esriFieldTypeGlobalID\",\r\n \"alias\" : fs['globalIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['globalIdFieldName']))\r\n elif 'globalIdFieldName' in fs and \\\r\n len(fs['globalIdFieldName']) == 0:\r\n del fs['globalIdFieldName']\r\n if self._geometry_column_name in cols_norm:\r\n cols_norm.pop(cols_norm.index(self._geometry_column_name))\r\n for col in cols_norm:\r\n try:\r\n idx = self[col].first_valid_index()\r\n col_val = self[col].loc[idx]\r\n except:\r\n col_val = \"\"\r\n if isinstance(col_val, (str, np.str)):\r\n l = self[col].str.len().max()\r\n if str(l) == 'nan':\r\n l = 255\r\n\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeString\",\r\n \"length\" : int(l),\r\n \"alias\" : col\r\n })\r\n if fs['displayFieldName'] == \"\":\r\n fs['displayFieldName'] = col\r\n elif isinstance(col_val, (datetime.datetime,\r\n pd.Timestamp,\r\n np.datetime64,\r\n pd.datetime)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDate\",\r\n \"alias\" : col\r\n })\r\n date_fields.append(col)\r\n elif isinstance(col_val, (np.int32, np.int16, np.int8)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSmallInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (int, np.int, np.int64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (float, np.float64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDouble\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (np.float32)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSingle\",\r\n \"alias\" : col\r\n })\r\n fs['fields'] = fields\r\n for row in self.to_dict('records'):\r\n geom = {}\r\n if self._geometry_column_name in row:\r\n geom = row[self._geometry_column_name]\r\n del row[self._geometry_column_name]\r\n for f in date_fields:\r\n try:\r\n row[f] = int(row[f].to_pydatetime().timestamp() * 1000)\r\n except:\r\n row[f] = None\r\n features.append(\r\n {\r\n \"geometry\" : dict(geom),\r\n \"attributes\" : row\r\n }\r\n )\r\n del row\r\n del geom\r\n fs['features'] = features\r\n return fs", "def contained(self):\n seen = set()\n return [l.to_segment for l in self.edges_to_contained \\\n if id(l) not in seen and not seen.add(id(l))]", "def get_segment_colour_map(self, features):\n\n hashList = {'1' : 'Grey',\n '2':'Red',\n '3':'Green',\n '4':'greenyellow',\n '5':'Pink',\n '6':'Orange',\n '7':'goldenrod',\n '8':'indianred',\n '9':'peachpuff',\n '10':'deepskyblue',\n '11':'firebrick',\n '12':'orchid',\n '13': 'moccasin',\n '14':'slateblue',\n '15':'turquoise',\n '16':'tomato',\n '17':'darkmagenta',\n '18':'olivedrab'}\n return hashList", "def get_segments(self, sets=None):\n if sets is None:\n if self.sets is not None:\n sets = self.sets\n else:\n raise ValueError(\"sets and self.sets attributes are None, \\\n you need either to pass an origin argument to get_segments or \\\n to use get_filtration method before\")\n segments = []\n for s in sets:\n if self.epsilon <= s.getRelevance():\n t, a, b = s.getPosition()\n for i, seg in enumerate(segments):\n tp, ap, bp = seg\n if t >= tp and bp > a:\n bp = a\n elif t <= tp and ap < b:\n ap = b\n segments[i] = (tp, ap, bp)\n segments.append((t, a, b))\n return segments", "def segment_tuples(self):\n return ((self.vertices[i], self.vertices[i+1])\n for i in range(len(self.vertices)-1))", "def features_from_labels(audio_file, segments):\n segments_features = []\n #for each segment\n for segment in segments:\n features = features_from_label(audio_file, segment)\n #and append it to the list\n segments_features.append(features)\n return segments_features", "def segments(seg_type=None):\n\n for index in xrange(idaapi.get_segm_qty()):\n seg = Segment(index=index)\n if (seg_type is None) or (seg.type == seg_type):\n yield Segment(index=index)", "def get_segmentations(self, aids):\n sseg_list = []\n for aid in aids:\n ann = self.dset.anns[aid]\n coco_sseg = ann.get('segmentation', None)\n if coco_sseg is None:\n sseg = None\n else:\n sseg = kwimage.MultiPolygon.coerce(coco_sseg)\n sseg_list.append(sseg)\n return sseg_list", "def intersection(self, s2):\n s1 = self\n if s1.is_full:\n return s2\n if s2.is_full:\n return s1\n return IdSet(s1._set.intersection(s2._set))", "def intersect(self, other: Segment) -> Optional[Intersection]:\n # Short-circuit for parallel segments\n if self.orientation == other.orientation:\n return\n\n # Deconstruct segments into a series of Coordinates & find the set intersection\n intersection = self.to_set().intersection(other.to_set())\n if not intersection:\n return\n\n x, y = list(intersection)[0]\n return Intersection(location=Coordinate(x=x, y=y))", "def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def segment_tuples(self):\n return ((self.vertices[i-1], self.vertices[i])\n for i in range(len(self.vertices)))", "def get_market_segments(self):\r\n return self._market_segments", "def copy(self) -> \"BaseSegment\":\n new_seg = copy(self)\n # Position markers are immutable, and it's important that we keep\n # a reference to the same TemplatedFile, so keep the same position\n # marker.\n new_seg.pos_marker = self.pos_marker\n if self.segments:\n new_seg.segments = tuple(seg.copy() for seg in self.segments)\n return new_seg", "def get_cars_at_intersection(self):\n return self.cars_at_intersection", "def get_cars_at_intersection(self):\n return self.cars_at_intersection", "def get_cars_at_intersection(self):\n return self.cars_at_intersection", "def get_comp_spanrels(self):", "def intersection(self):\n return Intersection(self.source, self)", "def index_feats_dict(self):\n doc_features_dict = {}\n\n for index, doc in zip(self.index, self.series):\n # Sets for a doc and feature words\n doc_set = set(doc.split())\n feat_set = set(self.features)\n\n # Shared words between the two sets\n interset_words = doc_set.intersection(feat_set)\n\n # Append to doc_features_dict\n doc_features_dict[index] = list(interset_words)\n\n return doc_features_dict", "def get_common_features(self, exclude=None):\n feats = self.get_feature_sets()\n\n common_feats = set.intersection(*feats)\n\n if exclude:\n for feat in exclude:\n common_feats.remove(feat)\n\n return list(common_feats)", "def segmentation(self):\r\n return resources.Segmentation(self)", "def getIntersections(self):\n\t\treturn self.intersections", "def multi_line_intersect(segment, segmentsAll):\n intersects = np.array([False])\n if (len(segmentsAll) > 0):\n d3 = segmentsAll[:, 1] - segmentsAll[:, 0]\n d1 = segment[1, :] - segment[0, :]\n c1x = np.cross(d3, segment[0, :] - segmentsAll[:, 0])\n c1y = np.cross(d3, segment[1, :] - segmentsAll[:, 0])\n c3x = np.cross(d1, segmentsAll[:, 0] - segment[0, :])\n c3y = np.cross(d1, segmentsAll[:, 1] - segment[0, :])\n intersects = np.logical_and(c1x * c1y < 0, c3x * c3y < 0)\n return(intersects)", "def segment_func1(self):\n # computing neighboors graph\n A = self.normal_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels", "def segmented_intersections(lines):\r\n\r\n intersections = []\r\n for i, group in enumerate(lines[:-1]):\r\n for next_group in lines[i+1:]:\r\n for line1 in group:\r\n for line2 in next_group:\r\n intersections.append(intersection(line1, line2)) \r\n\r\n return intersections", "def segment_func2(self):\n # computing neighboors graph\n A = self.boundaryprob_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels", "def to_segmentation_task(\n self, keep_geometries: Optional[List] = [Polygon, Bitmap], target_classes=None\n ) -> Tuple[ProjectMeta, Dict[ObjClass, ObjClass]]:\n mapping = {}\n res_classes = []\n for obj_class in self.obj_classes:\n obj_class: ObjClass\n\n if target_classes is None or obj_class.name in target_classes:\n if obj_class.geometry_type in keep_geometries:\n if obj_class.geometry_type == Bitmap:\n mapping[obj_class] = obj_class\n res_classes.append(obj_class)\n else:\n new_class = obj_class.clone(geometry_type=Bitmap)\n mapping[obj_class] = new_class\n res_classes.append(new_class)\n else:\n mapping[obj_class] = None\n else:\n mapping[obj_class] = None\n\n res_meta = self.clone(obj_classes=ObjClassCollection(res_classes))\n return res_meta, mapping", "def segment(self):\n start = self.alignment.matching_function_startpoint(self.idx)\n end = self.alignment.matching_function_endpoint(self.idx)\n return [start, end]", "def lists_and_segments(self):\n response = self._get(self.uri_for(\"listsandsegments\"))\n return json_to_py(response)", "def get_features(self, request, **kwargs):\n if hasattr(request, 'GET'):\n reference, start, stop = parse_das_segment(request)\n query_seg = {'id': reference, 'start':start, 'stop':stop}\n if 'chrom' in self.fields:\n pass\n try:\n reference = int(reference)\n except ValueError:\n reference = reference\n self.is_authenticated(request)\n # :TODO put this throught the regular filter\n try:\n if start:\n base_object_list = self.get_object_list(request).filter(\n Q(start__range=(start, stop)) |\\\n Q(end__range=(start, stop)),\n chrom__exact = reference)\n else:\n base_object_list = self.get_object_list(request).filter(\n chrom__exact = reference)\n # :TODO authorization check\n except ValueError:\n raise ValueError('Invalid Request')\n bundles = [self.build_bundle(obj=obj, request=request) for obj in\\\n base_object_list]\n to_be_serialized = [self.full_dehydrate(bundle) for bundle in bundles]\n # passing reqeust into options is, maybe I should pass in the whole\n # request? \n options = {'query': query_seg, \n 'method': self._meta.method, \n 'request_string': request.META['QUERY_STRING'],\n 'request_path': request.path,\n }\n content = self.serialize(request, to_be_serialized, 'xml',\n options=options)\n response = HttpResponse(content = content,\n content_type = 'application/xml')\n response = add_das_headers(response)\n return response", "def getSegments(self) -> List[int]:\n ...", "def get_all_features(self) :\n raise NotImplementedError", "def all_objects():\n objs = {}\n objs['Section'] = list(h.all_sec())\n objs['Segment'] = []\n for sec in objs['Section']:\n objs['Segment'].extend(list(sec.allseg()))\n objs['PointProcess'] = []\n for seg in objs['Segment']:\n objs['PointProcess'].extend(list(seg.point_processes()))\n \n return objs", "def inside(self,region):\n fs = FeatureSet()\n for f in self:\n if(f.isContainedWithin(region)):\n fs.append(f)\n return fs", "def consolidate_instances(self, stats, segmented_instances, idx_map):\n\n img = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n labels = np.unique(segmented_instances)\n labels=labels[labels!=0]\n reverse_idx_map = np.zeros(len(idx_map)).astype(np.int)\n for l in labels:\n reverse_idx_map[idx_map[l]]=np.int(l)\n\n #calculate slope of line between centroids.\n # TO DO: make this more efficient.\n centroid_slopes = self._calc_centroid_slopes(segmented_instances, labels, stats, idx_map)\n seg_slopes = np.zeros(len(labels))\n #for each instance i\n for i in range(0, len(labels)):\n idx=[]\n curr_label = reverse_idx_map[i]\n if curr_label!=0:\n #Show all segments of curr_label\n img[segmented_instances== curr_label]= 255\n #calculate slope m of instance i\n idx = np.argwhere(segmented_instances == curr_label)\n if len(idx>0):\n max_y= max(idx[:,0])\n min_y= min(idx[:,0])\n x_for_max = idx[idx[:,0]==max_y, 1][0]\n x_for_min = idx[idx[:,0]==min_y, 1][0]\n if x_for_max < x_for_min:\n x1= x_for_max\n y1= max_y\n x2= x_for_min\n y2= min_y\n else:\n x1= x_for_min\n y1= min_y\n x2= x_for_max\n y2= max_y\n m = self._slope(x1,y1,x2,y2)\n seg_slopes[i]=m\n cv2.line(img,(x1, y1),(x2, y2),(0,100,0),4)\n cv2.circle(img, (stats['centroid'][i,0], stats['centroid'][i,1]), 3, (200, 0, 0), -1)\n #self.showme(img, 'line '+str(i))\n\n # cluster segments\n clusters, clustered_instances = self._cluster_segments(segmented_instances, centroid_slopes, seg_slopes, reverse_idx_map)\n #find the closest centroid to a line with slope m that starts at the instances centroid\n # self.showObjects(clustered_instances);\n return clusters, clustered_instances", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def get_feature_instance_segmentation(\n self, split, curr_id):\n\n bitmask_file = os.path.join(\n BDD_100K_PAN_SEG,\n self.instance_seg_file.format(self.split_name(split), curr_id))\n\n if not tf.io.gfile.exists(bitmask_file):\n return self.feature_utils.get_fake_feature('instance_segmentation'), False\n\n bit = utils.load_image(bitmask_file)\n # Description: https://doc.bdd100k.com/format.html#bitmask:\n # the B channel and A channel store the “ann_id” for instance segmentation\n # and “ann_id” for segmentation tracking, respectively, which can be\n # computed as (B << 8) + A:\n instance_ids = bit[:, :, 2] * 256 + bit[:, :, 3]\n instance_mask = np.unique(instance_ids, return_inverse=True)[1]\n desired_shape = list(instance_ids.shape[:2]) + [1]\n instance_mask = np.reshape(instance_mask, desired_shape)\n return instance_mask.astype(np.uint16), True", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}", "def triple_features(cls):\n def _feature_impl(agg, record):\n if not agg: agg = {}\n p = record['p']\n o = record['o']\n if not agg.has_key(p): agg[p] = [ ]\n agg[p].append(o)\n return agg\n return _feature_impl", "def mergedWithNext(self, doRefit=True):\n spoints = numpy.concatenate([self.points,self.next.points])\n\n if doRefit:\n newSeg = fitSingleSegment(spoints)\n else:\n newSeg = Segment.fromCenterAndDir(barycenter(spoints), self.unitv, spoints)\n \n newSeg = Path.mergedWithNext(self, newSeg)\n return newSeg", "def segmented_intersections(lines):\n\n intersections = []\n for i, group in enumerate(lines[:-1]):\n for next_group in lines[i+1:]:\n for line1 in group:\n for line2 in next_group:\n intersections.append(intersection(line1, line2)) \n\n return intersections", "def segment_similarity(A, B, T=CLOSE_DISTANCE_THRESHOLD):\n l_a = len(A.points)\n l_b = len(B.points)\n\n idx = index.Index()\n dex = 0\n for i in range(l_a-1):\n idx.insert(dex, bounding_box_from(A.points, i, i+1, T), obj=[A.points[i], A.points[i+1]])\n dex = dex + 1\n\n prox_acc = []\n\n for i in range(l_b-1):\n ti = B.points[i].gen2arr()\n ti1 = B.points[i+1].gen2arr()\n bb = bounding_box_from(B.points, i, i+1, T)\n intersects = idx.intersection(bb, objects=True)\n n_prox = []\n i_prox = 0\n a = 0\n for x in intersects:\n a = a + 1\n pi = x.object[0].gen2arr()\n pi1 = x.object[1].gen2arr()\n prox = line_similarity(ti, ti1, pi, pi1, T)\n i_prox = i_prox + prox\n n_prox.append(prox)\n\n if a != 0:\n prox_acc.append(i_prox / a)\n # prox_acc.append(max(n_prox))\n else:\n prox_acc.append(0)\n\n return np.mean(prox_acc), prox_acc", "def edges(self) -> list[Segment]:\n result = self._edges\n return list(distinct(Segment(result[idx], copy=False) for idx in np.ndindex(*self.shape[:2])))", "def feature_extract(self, CT_pairs):\n instances = []\n for pair in CT_pairs:\n config = pair[0]\n label = pair[1]\n data = []\n featureset = {}\n \n # for nltk NaiveBayes feature selection stuff when doing MaxEnt decoding parser commit this\n# featureset[\"topOfBuffer\"] = self.token_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.token_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = (self.token_dict[config.sigma.top()], self.token_dict[config.beta.top()])\n# featureset[\"topOfBuffer\"] = self.POS_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.POS_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = tuple((self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]))\n \n # add the (StackTopPOS,BufferTopPOS,bufferchildren_POS) feature\n #value_set = tuple([self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]] + [self.POS_dict[child] for child in self.getBufferChildren(config.beta.top())])\n #featureset[\"bufferStackbufferChildrenPair\"] = value_set\n \n # for MaxEnt decoding stuff\n # token variants\n data.append((\"topOfBuffer\",self.token_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.token_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.token_dict[config.sigma.top()],self.token_dict[config.beta.top()]))\n #POS variants\n data.append((\"topOfBuffer\",self.POS_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.POS_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.POS_dict[config.sigma.top()],self.POS_dict[config.beta.top()]))\n ins = Instance(label=label, data=data)\n #ins = Instance(label=label, data=featureset)\n instances.append(ins)\n \n return instances", "def clusters(self):\n\t\tif self._record is None:\n\t\t return []\n\t\tclusters = [i for i in self._record.features if i.type == 'cluster']\n\t\treturn clusters", "def get_pairing_feature(self):\n\n if self.pair_chain_feature:\n\n self.pair_indexes = []\n start = 0\n for feat_type, feat_names in self.select_feature.items():\n nfeat = len(feat_names)\n if '_ind' in feat_type:\n self.pair_indexes += [\n [i, i + 1] for i in range(start, start + nfeat, 2)]\n else:\n self.pair_indexes += [\n [i] for i in range(start, start + nfeat)]\n start += nfeat", "def getMultiGeometry(geometry):\n geom = arcpy.Array()\n for feature in geometry:\n array = arcpy.Array()\n for point in feature:\n point = arcpy.Point(float(point[0]), float(point[1]))\n array.add(point)\n geom.add(array)\n return geom", "def _compute_soffsets(self):\n self.soffsets = [ [] for i in self.doffsets ]\n for idx,dofs in enumerate(self.doffsets):\n for o in dofs:\n self.soffsets[(idx + o) % self.p].append(-o)", "def join(cls, feature_sets, distinguish=True):\n if distinguish:\n return JoinedFeatures(feature_sets)\n else:\n feats = Features()\n for feat_set in feature_sets:\n for feat, val in feat_set.features.iteritems():\n feats.features[feat] += val\n return feats", "def segment_analyzer(self):\n sa = Segment_Analyzer.SegmentAnalyzer(self.log, self.args)\n sa.chromosome_ploidy(permutation=True)\n\n return sa", "def segment(self):\n\n #Run the marker selection GUI\n self.ps.startGUI()\n self.numSegments = self.ps.numSegments\n markerPoints = self.ps.result\n if(markerPoints == 0):\n print(\"No markers, exiting watershed...\")\n return False\n\n markers = np.zeros(self.imgShape, dtype = np.uint8)\n \n #Format the markers to matrix\n for i in range(0,len(markerPoints)):\n for j in range(0,len(markerPoints[i])):\n x = markerPoints[i][j][0]\n y = markerPoints[i][j][1]\n\n markers[x,y] = (i+1)\n\n watershed = markers.copy().astype(np.int32)\n self.segmentedImg = cv2.watershed(self.img,watershed)\n return self.segmentedImg", "def __getGeometryComponents(self):\n # get dagPath and member components of skined shape\n # the deformerSet pretty controls which vertex is deformed by the skinCluster\n # the deformerSet will allows us to pull out that components(vertex) mobject that we need\n fnSet = openmaya.MFnSet(self.fn.deformerSet())\n members = openmaya.MSelectionList()\n # the MSelectionList contains the vertex information in the deformerSet above\n fnSet.getMembers(members, False)\n\n dagPath = openmaya.MDagPath()\n components = openmaya.MObject()\n\n # dagPath: dagPath of influence objects(joint)\n # components: mesh components(vertex)\n members.getDagPath(0, dagPath, components)\n\n return dagPath, components", "def speed_map_segs_to_geojson(seg_list):\n # Initialize a new GeoJSON object\n new_geojson = {\n 'type': 'FeatureCollection',\n 'features': []\n }\n\n # Dont work on the input list\n seg_list_copy = copy.deepcopy(seg_list)\n\n # Iterativley build the features of the new GeoJSON object\n for i, seg in enumerate(seg_list_copy):\n # Prepare the feature properties\n del seg['fromStop']\n del seg['toStop']\n\n # New attribute, can be used to identify segments\n seg['order'] = i\n\n # Prepare the feature geometry coordinates\n pathLocs = seg.pop('pathLocs')\n coords = [[p['lon'], p['lat']] for p in pathLocs]\n\n # Construct feature\n new_feature = {\n 'type': 'Feature',\n 'geometry': {'type': 'LineString', 'coordinates': coords},\n 'properties': seg\n }\n\n # Append feature to the list of features in GeoJSON object\n new_geojson['features'].append(new_feature)\n\n return new_geojson", "def iter_segments(self):\n return\n yield", "def getSegments(points):\n return _identifyStrokes(points)[1]", "def getSegment(self):\n return self.segment", "def compute_intersecting(voxel, R, kdt, max_segment): \n\tsubset = np.unique(si[kdt.query_radius(voxel, r=R+max_segment)[0]]).astype(np.int)\n\treturn subset[np.array([track_roi_intersection_check(s, voxel, sq_dist_thr=R**2) for s in tracks[subset]])]", "def intersect(self, match):\n intersection = set()\n for m in self.matches:\n intersection.add(m.intersect(match))\n return FlowSpace(intersection)", "def _get_segments(img, mean_scale=1000, num_samples=16, return_enough_segments=False):\n # randomly choose the segmentation scale\n scale = np.random.uniform(0.5*mean_scale, 1.5*mean_scale)\n # run heuristic segmentation\n segments = skimage.segmentation.felzenszwalb(img, scale=scale,\n min_size=int(scale))\n # sample a set of segmentations to use; bias toward larger ones\n max_segment = segments.max()\n indices = np.arange(max_segment+1)\n seg_count = np.array([(segments == i).sum()+1 for i in indices])\n p = seg_count/seg_count.sum()\n # try this for error correction?\n if num_samples <= max_segment:\n sampled_indices = np.random.choice(indices, p=p, size=num_samples,\n replace=False)\n else:\n warnings.warn(\"not enough unique segments; sampling WITH replacement\")\n sampled_indices = np.random.choice(indices, size=num_samples, replace=True)\n # build normalized segment occupancy masks for each segment we choose\n seg_tensor = np.stack([(segments == i)/seg_count[i] for i in sampled_indices],\n -1).astype(np.float32)\n\n if return_enough_segments:\n enough_segs = num_samples <= max_segment\n return seg_tensor, enough_segs\n return seg_tensor", "def findFeatures(self):\n\t\tpass", "def get_features(self):\n return self._features", "def intersects(geometry, sr=None):\r\n\r\n return _filter(geometry, sr, 'esriSpatialRelIntersects')", "def getServicesSegmented(self, offset, limit):\n try:\n SVF.validate_offset_limit(offset=offset, limit=limit)\n except ValueError as ve:\n return jsonify(Error=str(ve)), 400\n\n dao = ServiceDAO()\n\n services = dao.getServicesSegmented(offset=offset, limit=limit)\n if not services:\n response = {'services': None}\n else:\n service_list = []\n for row in services:\n service_list.append(_buildCoreServiceResponse(row))\n response = {'services': service_list}\n return jsonify(response)", "def split_at_nodes(shp):\n nodes = find_nodes(shp)\n nodeIds = list(nodes)\n nodeIds.sort()\n nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])\n \n for road in shp:\n vrts = road.vertices\n midVrts = set(road.vertices[1:-1]) #we know end points are nodes\n midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.\n midIdx = [vrts.index(node) for node in midNodes] # Get their indices\n midIdx.sort()\n if midIdx:\n #print vrts\n starts = [0]+midIdx\n stops = [x+1 for x in midIdx]+[None]\n for start,stop in zip(starts,stops):\n feat = pysal.cg.Chain(vrts[start:stop])\n rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)\n yield feat,rec\n else:\n rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)\n yield road,rec", "def join_featuresets(featureset1, featureset2):\n joined_instances = {}\n names = []\n for audio_name in featureset1.keys():\n if audio_name in featureset2:\n names.append(audio_name)\n for name in names:\n joined_vec = join_feature_vectors(featureset1[name], featureset2[name])\n joined_instances[name] = joined_vec\n return joined_instances", "def get_segments(input_path):\n with open(input_path, 'r') as segments_file:\n segments = []\n for line in segments_file:\n words = line.split('\\t')\n sg_dict = {}\n sg_dict['start'] = float(words[0].replace(',', '.'))\n sg_dict['end'] = float(words[1].replace(',', '.'))\n sg_dict['class'] = words[2][:-1]\n segments.append(sg_dict)\n return segments", "def partition_Basic(segfile):\n scenelist = Recording.read_segs(segfile)\n segcount = 0\n for l in scenelist.values():\n segcount += len(l)\n return scenelist, segcount", "def __init__(self):\n self.g_sect = []", "def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"", "def intersection_list(self):\n return self._intersection_list" ]
[ "0.6147264", "0.5819718", "0.5776346", "0.56910527", "0.5669777", "0.5641783", "0.5624703", "0.55444694", "0.5516832", "0.5491789", "0.54492235", "0.543769", "0.5422729", "0.53894347", "0.5386809", "0.53521913", "0.53349394", "0.5276735", "0.52644706", "0.5250516", "0.5229773", "0.52234274", "0.52051556", "0.5196215", "0.5176299", "0.51668864", "0.5124512", "0.5112327", "0.508792", "0.50833493", "0.50815594", "0.50812835", "0.5080056", "0.50662583", "0.50498265", "0.50331503", "0.50176895", "0.49970335", "0.49798614", "0.4971839", "0.49682766", "0.49632812", "0.4956481", "0.49532825", "0.49532825", "0.49532825", "0.49519885", "0.49510783", "0.4948293", "0.49479094", "0.49293894", "0.49274766", "0.49105382", "0.49022883", "0.4898131", "0.48962557", "0.48949867", "0.4888462", "0.48882985", "0.48711938", "0.48638785", "0.4860855", "0.48604643", "0.48411348", "0.48389173", "0.48349112", "0.48331475", "0.48312914", "0.48293108", "0.47968206", "0.47875786", "0.47825998", "0.4779776", "0.47728798", "0.47723448", "0.47720963", "0.47708815", "0.47658417", "0.4763124", "0.47626227", "0.47533622", "0.47522137", "0.47513738", "0.47405314", "0.47385427", "0.47312722", "0.47282413", "0.47273737", "0.4723429", "0.47179583", "0.4709212", "0.47068486", "0.47066605", "0.47055203", "0.47003698", "0.46944073", "0.46933416", "0.46876836", "0.4687662", "0.46784517" ]
0.556224
7
Return `True` if all segments in `inv` matches the features in fts
def fts_match_all(self, fts, inv, normalize=True): return all([self.fts(s, normalize) >= fts for s in inv])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fts_match_any(self, fts, inv, normalize=True):\n return any([self.fts(s, normalize) >= fts for s in inv])", "def fts_intersection(self, segs, normalize=True):\n return reduce(lambda a, b: a & b,\n [self.fts(s, normalize) for s in self.filter_segs(segs, normalize)])", "def fts_contrast(self, fs, ft_name, inv, normalize=True):\n inv_segs = filter(lambda x: x >= fs, map(lambda seg: self.fts(seg, normalize), inv))\n for a in inv_segs:\n for b in inv_segs:\n if a != b:\n if a.differing_specs(b) == [ft_name]:\n return True\n return False", "def fts_count(self, fts, inv, normalize=True):\n return len(list(filter(lambda s: self.fts(s, normalize) >= fts, inv)))", "def match_features(phone_feats, other_feats):\n for feat in other_feats.keys():\n if phone_feats[feat] != other_feats[feat] and other_feats[feat] != UNDEF:\n return False\n return True", "def match(self, sentence) -> bool:\r\n if (any(word[0] in sentence.lower() for word in self.word_list if word[1] == \"partial\") or any(\r\n word[0].lower() == sentence.lower() for word in self.word_list if word[1] == \"full\")) and not any(\r\n word[0] in sentence.lower() for word in self.word_list if word[1] == \"not\"):\r\n return True\r\n else:\r\n return False", "def all_segs_matching_fts(self, ft_mask):\n matching_segs = [ipa for (ipa, fts) in self.segments if fts >= ft_mask]\n return sorted(matching_segs, key=lambda x: len(x), reverse=True)", "def matches(self, feature):\n pass", "def hasIntersectedWith(self, f):\n try:\n return f in self.hasIntersected\n except AttributeError:\n return False", "def has_match(trajs_0, trajs_1):\n for i in range(len(trajs_0)):\n for j in range(len(trajs_1)):\n R = (trajs_0[i].get_slice()[:,:2] == trajs_1[j].get_slice()[:,:2])\n if isinstance(R, bool):\n if R:\n return True \n elif R.all():\n return True \n else:\n pass \n return False", "def email_features(word_indices):\n\n # Total number of words in the dictionary\n n = 1899\n\n vector = np.arange(1, n + 1).reshape(-1, 1)\n\n return np.in1d(vector, word_indices)", "def has_vectored_fields(self):\r\n return any(ftype.vector for ftype in self._by_number)", "def test_contains_returns_true_for_partial_word_in_multi_word_trie(multi_trie):\n assert multi_trie.contains(\"hell\") is True", "def vsone_feature_matching(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict={},\n flann1=None, flann2=None, verbose=None):\n import vtool as vt\n import pyflann\n from vtool import spatial_verification as sver\n #import vtool as vt\n sver_xy_thresh = cfgdict.get('sver_xy_thresh', .01)\n ratio_thresh = cfgdict.get('ratio_thresh', .625)\n refine_method = cfgdict.get('refine_method', 'homog')\n symmetric = cfgdict.get('symmetric', False)\n K = cfgdict.get('K', 1)\n Knorm = cfgdict.get('Knorm', 1)\n #ratio_thresh = .99\n # GET NEAREST NEIGHBORS\n checks = 800\n #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2\n #pseudo_max_dist_sqrd = 2 * (512 ** 2)\n if verbose is None:\n verbose = True\n\n flann_params = {'algorithm': 'kdtree', 'trees': 8}\n if flann1 is None:\n flann1 = vt.flann_cache(vecs1, flann_params=flann_params, verbose=verbose)\n\n #print('symmetric = %r' % (symmetric,))\n if symmetric:\n if flann2 is None:\n flann2 = vt.flann_cache(vecs2, flann_params=flann_params, verbose=verbose)\n\n try:\n try:\n num_neighbors = K + Knorm\n fx2_to_fx1, fx2_to_dist = normalized_nearest_neighbors(flann1, vecs2, num_neighbors, checks)\n #fx2_to_fx1, _fx2_to_dist = flann1.nn_index(vecs2, num_neighbors=K, checks=checks)\n if symmetric:\n fx1_to_fx2, fx1_to_dist = normalized_nearest_neighbors(flann2, vecs1, K, checks)\n\n except pyflann.FLANNException:\n print('vecs1.shape = %r' % (vecs1.shape,))\n print('vecs2.shape = %r' % (vecs2.shape,))\n print('vecs1.dtype = %r' % (vecs1.dtype,))\n print('vecs2.dtype = %r' % (vecs2.dtype,))\n raise\n if symmetric:\n is_symmetric = flag_symmetric_matches(fx2_to_fx1, fx1_to_fx2)\n fx2_to_fx1 = fx2_to_fx1.compress(is_symmetric, axis=0)\n fx2_to_dist = fx2_to_dist.compress(is_symmetric, axis=0)\n\n assigntup = assign_unconstrained_matches(fx2_to_fx1, fx2_to_dist)\n\n fx2_match, fx1_match, fx1_norm, match_dist, norm_dist = assigntup\n fm_ORIG = np.vstack((fx1_match, fx2_match)).T\n fs_ORIG = 1 - np.divide(match_dist, norm_dist)\n # APPLY RATIO TEST\n fm_RAT, fs_RAT, fm_norm_RAT = ratio_test(fx2_match, fx1_match, fx1_norm,\n match_dist, norm_dist,\n ratio_thresh)\n\n # SPATIAL VERIFICATION FILTER\n #with ut.EmbedOnException():\n match_weights = np.ones(len(fm_RAT))\n svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_RAT, sver_xy_thresh,\n dlen_sqrd2, match_weights=match_weights,\n refine_method=refine_method)\n if svtup is not None:\n (homog_inliers, homog_errors, H_RAT) = svtup[0:3]\n else:\n H_RAT = np.eye(3)\n homog_inliers = []\n fm_RAT_SV = fm_RAT.take(homog_inliers, axis=0)\n fs_RAT_SV = fs_RAT.take(homog_inliers, axis=0)\n fm_norm_RAT_SV = fm_norm_RAT[homog_inliers]\n\n top_percent = .5\n top_idx = ut.take_percentile(fx2_to_dist.T[0].argsort(), top_percent)\n fm_TOP = fm_ORIG.take(top_idx, axis=0)\n fs_TOP = fx2_to_dist.T[0].take(top_idx)\n #match_weights = np.ones(len(fm_TOP))\n #match_weights = (np.exp(fs_TOP) / np.sqrt(np.pi * 2))\n match_weights = 1 - fs_TOP\n #match_weights = np.ones(len(fm_TOP))\n svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_TOP, sver_xy_thresh,\n dlen_sqrd2, match_weights=match_weights,\n refine_method=refine_method)\n if svtup is not None:\n (homog_inliers, homog_errors, H_TOP) = svtup[0:3]\n np.sqrt(homog_errors[0] / dlen_sqrd2)\n else:\n H_TOP = np.eye(3)\n homog_inliers = []\n fm_TOP_SV = fm_TOP.take(homog_inliers, axis=0)\n fs_TOP_SV = fs_TOP.take(homog_inliers, axis=0)\n\n matches = {\n 'ORIG' : MatchTup2(fm_ORIG, fs_ORIG),\n 'RAT' : MatchTup3(fm_RAT, fs_RAT, fm_norm_RAT),\n 'RAT+SV' : MatchTup3(fm_RAT_SV, fs_RAT_SV, fm_norm_RAT_SV),\n 'TOP' : MatchTup2(fm_TOP, fs_TOP),\n 'TOP+SV' : MatchTup2(fm_TOP_SV, fs_TOP_SV),\n }\n output_metdata = {\n 'H_RAT': H_RAT,\n 'H_TOP': H_TOP,\n }\n\n except MatchingError:\n fm_ERR = np.empty((0, 2), dtype=np.int32)\n fs_ERR = np.empty((0, 1), dtype=np.float32)\n H_ERR = np.eye(3)\n matches = {\n 'ORIG' : MatchTup2(fm_ERR, fs_ERR),\n 'RAT' : MatchTup3(fm_ERR, fs_ERR, fm_ERR),\n 'RAT+SV' : MatchTup3(fm_ERR, fs_ERR, fm_ERR),\n 'TOP' : MatchTup2(fm_ERR, fs_ERR),\n 'TOP+SV' : MatchTup2(fm_ERR, fs_ERR),\n }\n output_metdata = {\n 'H_RAT': H_ERR,\n 'H_TOP': H_ERR,\n }\n\n return matches, output_metdata", "def __check_features(f_list, stopwords):\n ok = True\n for f in f_list:\n if not(__check_feature(f,stopwords)):\n return False\n return True", "def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)", "def __contains__(self, sentence):\n return sentence in self._sentences", "def flag_features_in_associate_clause(self, docFeatList):\n for feat in docFeatList:\n tlink = feat.getTlink()\n if not tlink or tlink.getType()!='ASSOCIATE':\n continue\n \n sentNum = feat.getSentNum()\n sentence = self.sentences[sentNum]\n sent_start = self.sentence_startPos[sentNum]\n startClause = tlink.getTimexes()[1].getStartPos()\n endPos = sentence[startClause - sent_start:].find(',')\n if endPos < 0: #: No comma is found, skip to be safe\n continue\n else:\n endClause = startClause + endPos\n \n zone = (startClause, endClause, sentNum)\n if not zone in self.clauseZones:\n self.clauseZones.append(zone)\n \n if feat.getStartPos() > startClause and feat.getStartPos() < endClause:\n feat.setInClause(True)\n feat.setTlink(None)\n \n return docFeatList", "def seg_known(self, segment, normalize=True):\n if normalize:\n segment = FeatureTable.normalize(segment)\n return segment in self.seg_dict", "def __contains__(self, ngram):\n return ngram in self.root", "def partialSetMatchAnnotated(self, annotatedMention):\n aWords = annotatedMention.importantWords()\n dWords = self.importantWords()\n \n if dWords.intersection(aWords) == dWords:\n # this mention is a subset of the annotated mention\n if dWords == aWords:\n return True # exact match\n if len(annotatedMention.shortSets) > 0:\n # annotated mention has short sections, try to if one is included\n # in the detected mention\n for ss in annotatedMention.shortSets:\n if ss.intersection(dWords) == ss:\n # detected mention contains all of the words in a short section\n return True\n \n return False", "def _get_same_sentence_features(words, indexes):\n roots = [(i, w) for i, w in enumerate(words) if int(w.index) == 1]\n points = [0, 0, 0]\n for i, w in roots:\n for j in range(len(indexes)):\n if i <= indexes[j]:\n points[0] += 1\n feature_list = [\n points[0] == points[1],\n points[0] == points[2],\n points[1] == points[2],\n ]\n return feature_list", "def test_hindi(doc):\n hindi_dictionary = ['kai','hai','dhaan','dhan','jhona','pili','jankari','saaf','mela','narma','raja','brahma','jai','parbhani','sangli','jana']\n flag = any(hindi in doc for hindi in hindi_dictionary)\n return(flag)", "def __contains__(self, v):\n for i in self:\n if v in i:\n return True\n False", "def any_within_poly(self, poly):\n for pt in self:\n if poly.contains(pt):\n return True\n return False", "def match(self, sentence) -> bool:\r\n for word in self.word_list:\r\n if word.lower() in sentence.lower():\r\n return True\r\n return False", "def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass", "def pertenece(self,v):\n return v in self.vertices.keys()", "def __contains__(self, f) :\n if self.__disc is infinity :\n return True\n \n (s, l) = f\n\n (a, _, c) = apply_GL_to_form(self.__p1list[l], s)\n if not c % self.__level == 0 :\n return False\n\n return a + c < self.index()", "def __contains__(self, feature):\n return feature in self.features", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def isIntersection(self, v):\n return (any(inter.v == v for inter in self.inter1) or\n any(inter.v == v for inter in self.inter2))", "def in_field(self, vec):\n return (abs(vec[0]) + abs(vec[1]) + abs(vec[2])) <= 2 * self.n", "def test_contains_returns_true_when_word_in_trie(full_trie):\n assert full_trie.contains(\"hey\") is True", "def can_recept(self, text, *args, **kwargs):\n for each_cur in self.flat_norm.keys():\n if each_cur.lower() in text.lower():\n return True\n\n else:\n return False", "def contained_in(self, smiles):\n return MolFromSmiles(smiles).HasSubstructMatch(self.rdmol)", "def __contains__(self, ngram):\n return ngram in self._ngrams", "def has_subroutines(otf: ttLib.TTFont) -> bool:\n table_tag = _sniff_cff_table_format(otf)\n top_dict = otf[table_tag].cff.topDictIndex[0]\n all_subrs = [top_dict.GlobalSubrs]\n if hasattr(top_dict, \"FDArray\"):\n all_subrs.extend(\n fd.Private.Subrs for fd in top_dict.FDArray if hasattr(fd.Private, \"Subrs\")\n )\n elif hasattr(top_dict.Private, \"Subrs\"):\n all_subrs.append(top_dict.Private.Subrs)\n return any(all_subrs)", "def conj(fs):\n def feature(s, i):\n return all(f(s, i) for f in fs)\n return feature", "def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))", "def _contains(self, df: pandas.DataFrame, mapped_triples: MappedTriples, invert: bool = False) -> numpy.ndarray:\n raise NotImplementedError", "def vectorize(self,text):\r\n \r\n lv_active = set()\r\n words = word_tokenize(text)\r\n for word in words:\r\n if word in self.tree:\r\n ancestors = self.tree.word_ancestors(word)\r\n lv_active.update(ancestors)\r\n \r\n return self.nl.isin(lv_active).values", "def _is_in_doc(t: int, d: List[List[str]]) -> bool:\n t = str(t)\n for s in d:\n if t in s:\n return True\n return False", "def _attentive_matching(self, h1, h2, cosine_matrix, w):\n # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n h1 = self._time_distributed_multiply(h1, w)\n # attentive vector (batch_size, h1_timesteps, embedding_szie)\n attentive_vec = self._mean_attentive_vectors(h2, cosine_matrix)\n # attentive_vec * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n attentive_vec = self._time_distributed_multiply(attentive_vec, w)\n # matching vector, (batch_size, h1_timesteps, mp_dim)\n matching = self._cosine_similarity(h1, attentive_vec)\n return matching", "def test_distribution_with_many_clauses(self):\n spi_search = \"find a mele and brooks and holtkamp and o'connell\"\n inv_search = \"author:mele author:brooks author:holtkamp author:o'connell\"\n self._compare_searches(inv_search, spi_search)", "def contains(self, smiles):\n return self.rdmol.HasSubstructMatch(MolFromSmiles(str(smiles)))", "def test_match_in(self, subdocument):\n assert subdocument.match({\"hello\": {\"$in\": [\"there\", \"here\"]}})\n assert not subdocument.match({\"hello\": {\"$in\": [\"ici\", \"here\"]}})", "def route_is_contained_in_other_route(route,target):\n id_route = 0\n id_target = 0\n found = True\n while found and id_route < len(route) and id_target < len(target):\n found = False\n while not found and id_target < len(target):\n if route[id_route] == target[id_target]:\n found = True\n else:\n id_target += 1\n id_route += 1\n return found", "def matches(self, tgt_residence_dir: str) -> bool:", "def vsone_matching(metadata, cfgdict={}, verbose=None):\n # import vtool as vt\n #assert isinstance(metadata, ut.LazyDict), 'type(metadata)=%r' % (type(metadata),)\n\n annot1 = metadata['annot1']\n annot2 = metadata['annot2']\n\n ensure_metadata_feats(annot1, cfgdict=cfgdict)\n ensure_metadata_feats(annot2, cfgdict=cfgdict)\n\n if 'dlen_sqrd' not in annot2:\n def eval_dlen_sqrd(annot):\n rchip = annot['rchip']\n dlen_sqrd = rchip.shape[0] ** 2 + rchip.shape[1] ** 2\n return dlen_sqrd\n annot2.set_lazy_func('dlen_sqrd', lambda: eval_dlen_sqrd(annot2))\n\n # Exceute relevant dependencies\n kpts1 = annot1['kpts']\n vecs1 = annot1['vecs']\n kpts2 = annot2['kpts']\n vecs2 = annot2['vecs']\n dlen_sqrd2 = annot2['dlen_sqrd']\n flann1 = annot1.get('flann', None)\n flann2 = annot2.get('flann', None)\n\n matches, output_metdata = vsone_feature_matching(\n kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict=cfgdict,\n flann1=flann1, flann2=flann2, verbose=verbose)\n metadata.update(output_metdata)\n return matches, metadata", "def __contains__(self, val):\n return val in [i[0] for i in self.registered_intents]", "def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex(): \n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def interior_contains(self, Vobj):\n return False", "def interior_contains(self, Vobj):\n return False", "def __contains__(self, seq):\n return bool(libhts.faidx_has_seq(self._fai, seq))", "def contains(self, Vobj):\n return self.polyhedron()._is_zero( self.eval(Vobj) )", "def contains(self, Vobj):\n return self.polyhedron()._is_zero( self.eval(Vobj) )", "def __contains__(self, fragment):\n return fragment in self._items", "def __contains__(self, doc_label):\n return doc_label in self.docs", "def __contains__(self, key):\n return key in self.vertList", "def search(self):\n\n term = self.substitute()\n ##print (\"searching:\",term)\n ##print (\"in facts\",self.facts)\n ##input()\n bindings = deepcopy(self.bindings)\n found = False\n for fact in self.facts:\n found = self.unify(term,fact,bindings)\n if found:\n bound_vars = list(bindings.keys())\n n_bound_vars = len(bound_vars)\n for i in range(n_bound_vars):\n for j in range(i+1,n_bound_vars):\n if bindings[bound_vars[i]] == bindings[bound_vars[j]]:\n return False\n self.facts.remove(self.substitute_with_bindings(bindings)) #THINK ABOUT THIS\n break\n return found", "def __contains__(self, i):\n return i in self._ar", "def is_in(elt, seq):\n\treturn any(x is elt for x in seq)", "def contains ( self, pos ):\n \n poly = Polygon(array(self.edges).reshape(-1,2)[:,0],array(self.edges).reshape(-1,2)[:,1])\n dists = poly.is_inside(pos[0,:],pos[1,:]) \n if self.include_border:\n inds = dists >= -self.abs_tol\n else:\n inds = dists > 0\n \n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n dr2 = array(self.edges).reshape(-1,2).mean(0)\n inds[argmin(dr2)] = True\n \n return inds", "def feat_overlap(f1, f2):\n f1start = int(f1[3])\n f1end = int(f1[4])\n f2start = int(f2[3])\n f2end = int(f2[4])\n\n if f1start <= f2end and f1end >= f2start:\n return True\n return False", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def is_in(elt, seq):\n return any(x is elt for x in seq)", "def any_term(self, row):\n return any(term in row for term in self.search_terms)", "def conflateable(seg1, seg2, segment_pairs):\n for segment_pair in segment_pairs:\n seg_set = set(segment_pair)\n if seg1 in seg_set and seg2 in seg_set:\n return True\n return False", "def is_vert(e) :\n f = e[0][0]\n for t in e :\n if f != t[0] :\n return False\n return True", "def _candidates(self, token):\n token_as_list = [token]\n token_1_edits = NorvigCorrector._one_edit_token_distances(token)\n token_2_edits = NorvigCorrector._two_edits_token_distances(token)\n return (\n self._known_in(token_as_list) or self._known_in(token_1_edits) or self._known_in(token_2_edits) or\n token_as_list)", "def test_contains_returns_false_when_word_mismatches(full_trie):\n assert full_trie.contains(\"hello\") is False", "def contains (self,phrase,chars):\r\n\r\n for x in chars:\r\n\r\n if x in phrase:\r\n return True\r\n return False", "def contains_vect(self, v: Tuple[float, float]) -> bool:\n assert len(v) == 2\n return bool(lib.cpBBContainsVect(self, v))", "def isin(hi):\n return finder.search(hi)", "def contains(self,other):\n retVal = False\n\n bounds = self.points\n if( isinstance(other,Feature) ):# A feature\n retVal = True\n for p in other.points: # this isn't completely correct - only tests if points lie in poly, not edges.\n p2 = (int(p[0]),int(p[1]))\n retVal = self._pointInsidePolygon(p2,bounds)\n if( not retVal ):\n break\n # a single point\n elif( (isinstance(other,tuple) and len(other)==2) or ( isinstance(other,np.ndarray) and other.shape[0]==2) ):\n retVal = self._pointInsidePolygon(other,bounds)\n\n elif( isinstance(other,tuple) and len(other)==3 ): # A circle\n #assume we are in x,y, r format\n retVal = True\n rr = other[2]*other[2]\n x = other[0]\n y = other[1]\n for p in bounds:\n test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))\n if( test < rr ):\n retVal = False\n break\n\n elif( isinstance(other,tuple) and len(other)==4 and ( isinstance(other[0],float) or isinstance(other[0],int))):\n retVal = ( self.maxX() <= other[0]+other[2] and\n self.minX() >= other[0] and\n self.maxY() <= other[1]+other[3] and\n self.minY() >= other[1] )\n elif(isinstance(other,list) and len(other) >= 4): # an arbitrary polygon\n #everything else ....\n retVal = True\n for p in other:\n test = self._pointInsidePolygon(p,bounds)\n if(not test):\n retVal = False\n break\n else:\n logger.warning(\"SimpleCV did not recognize the input type to features.contains. This method only takes another blob, an (x,y) tuple, or a ndarray type.\")\n return False\n\n return retVal", "def contains_any_phrase(sent, phrases):\n for p in phrases:\n if p in sent:\n return True\n return False", "def test_match_sub_in(self, subdocument):\n assert subdocument.match({\"and.the\": {\"$in\": [\"duck\", \"drake\"]}})\n assert not subdocument.match({\"and.the\": {\"$in\": [\"hyppo\", \"lion\"]}})", "def match(self, sentence) -> bool:\r\n pass", "def has_all(self, tag, indexes):\n\n return all(self.has(tag, index) for index in indexes)", "def any(self):\n for v in self.sects.values():\n if np.any(v):\n return True\n if self.is_full():\n return False\n else:\n return np.any(self.defval)", "def match(self, words):\n return words == self.words(len(words))", "def test_syntax_converter_expand_fulltext(self):\n spi_search = \"find ft The holographic RG is based on\"\n inv_search = \"fulltext:The and fulltext:holographic and fulltext:RG and fulltext:is and fulltext:based and fulltext:on\"\n self._compare_searches(inv_search, spi_search)", "def _isInside(self, v, select, progress):\n # Compute on non-masked sources :\n xyz = self.xyz\n N = xyz.shape[0]\n inside = np.ones((xyz.shape[0],), dtype=bool)\n v = v.reshape(v.shape[0] * 3, 3)\n\n # Loop over sources :\n progress.show()\n for k in range(N):\n # Get the euclidian distance :\n eucl = cdist(v, xyz[[k], :])\n # Get the closest vertex :\n eucl_argmin = eucl.argmin()\n # Get distance to zero :\n xyz_t0 = np.sqrt((xyz[k, :] ** 2).sum())\n v_t0 = np.sqrt((v[eucl_argmin, :] ** 2).sum())\n inside[k] = xyz_t0 <= v_t0\n progress.setValue(100 * k / N)\n self.data.mask = False\n self.data.mask = inside if select != 'inside' else np.invert(inside)\n # Finally update data sources and text :\n self.update()\n self.text_update()\n progress.hide()", "def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def __check_feature(f,stopwords):\n if f == \"\" or f == None:\n return None\n if f == \"RT\":\n return False\n if f == \"via\":\n return False\n if len(re.findall(r\"(\\w)\", f)) < 1:\n return False\n if f == \"&amp\":\n return False\n if f in stopwords:\n return False\n if len(f) < 2:\n return False\n else:\n return True", "def has_amino_acids(self):\n for frag in self.iter_amino_acids():\n return True\n return False", "def test_feature_in_collection(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n feature = fc1.features[0]\n assert fc1.feature_in_collection(feature)\n\n feature = fc2.features[0]\n assert not fc1.feature_in_collection(feature)", "def ascii_within(directions_array, references_array):\n return numpy.all(_ascii_within(directions_array, references_array))", "def lexicon_features(tokens, feats):\n# feats['neg_words'] = 0;\n# feats['pos_words'] = 0;\n# tokens = list([token.lower() for token in tokens])\n# feats['neg_words'] , feats['pos_words'] = np.count_nonzero(np.in1d(tokens, list(neg_words))), np.count_nonzero(np.in1d(tokens, list(pos_words)))\n neg_count=0\n pos_count=0\n for i in tokens:\n if i.lower() in neg_words:\n neg_count+=1\n if i.lower() in pos_words:\n pos_count+=1\n feats[\"neg_words\"]=neg_count\n feats[\"pos_words\"]=pos_count", "def contains_feat(title):\n return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))", "def match(segment, trip, k = 3):\n for idx in range(k, len(trip) + 1):\n partial = trip[(idx - k):idx]\n if partial == segment:\n return True\n \n return False", "def test_syntax_converter_expand_search_patterns_multiple_conjoined(self):\n spi_search = \"find t bob sam and couch\"\n inv_search = \"title:bob and title:sam and title:couch\"\n self._compare_searches(inv_search, spi_search)", "def where_in(a, b):\n return torch.nonzero((a[..., None] == b).any(-1)).squeeze()", "def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex():\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def test_inV_works(self):\r\n results = self.jon.inV()\r\n assert len(results) == 1\r\n assert self.physics in results\r\n\r\n results = self.physics.inV()\r\n assert len(results) == 1\r\n assert self.eric in results\r\n\r\n results = self.eric.inV()\r\n assert len(results) == 1\r\n assert self.theoretics in results\r\n\r\n results = self.theoretics.inV()\r\n assert len(results) == 1\r\n assert self.blake in results\r\n\r\n results = self.beekeeping.inV()\r\n assert len(results) == 1\r\n assert self.jon in results\r\n\r\n results = self.blake.inV()\r\n assert len(results) == 1\r\n assert self.beekeeping in results", "def test_contains(self):\n r = self.RNA(\"UCA\")\n assert \"U\" in r\n assert \"CA\" in r\n assert \"X\" not in r\n assert \"G\" not in r", "def matcher(features1, features2):\n #TODO: write a matching function\n #Performing the L2-Norm\n new_features1=[]\n new_features2=[]\n for itr in range(5):\n [rootOfSquare1,rootOfSquare2] = sumOfSquares(features1[itr],features2[itr])\n new_features1.append(np.array(features1[itr])/rootOfSquare1)\n new_features2.append(np.array(features2[itr])/rootOfSquare2)\n indices = []\n for itr in range(len(new_features1)):\n findMinDist=[]\n #findMaxCosineVal=[]\n for itr2 in range(len(new_features2)):\n f1 = new_features1[itr]\n f2 = new_features2[itr2]\n\n #For evaluating the cosine similarity\n # [rootOfSquare1,rootOfSquare2] = sumOfSquares(f1,f2)\n # numerator = np.array(f1)*np.array(f2)\n # numeratorSum = sum(numerator)\n # denominator = rootOfSquare1*rootOfSquare2\n # cosine = np.divide(numeratorSum,denominator)\n # findMaxCosineVal.append(cosine)\n\n #For evaluating the similarity based on euclidean distance\n Dist = np.array(f1) - np.array(f2)\n sum=0\n for i in Dist:\n sum=sum+math.pow(i,2)\n rootOfSum = math.sqrt(sum)\n findMinDist.append(rootOfSum)\n # print \"itr: \", itr, \" Matching scores: \", findMaxCosineVal\n # bestMatch = findMaxCosineVal.index(max(findMaxCosineVal))\n bestMatch = findMinDist.index(min(findMinDist))\n indices.append([itr,bestMatch])\n return indices", "def _segments_match(segments_to_match, arguments):\n\n segments_to_match = set(segments_to_match)\n for arg in arguments:\n for segment in list(segments_to_match):\n if segment in arg:\n segments_to_match.remove(segment)\n if not segments_to_match:\n return True\n return not segments_to_match" ]
[ "0.7694182", "0.6534538", "0.611552", "0.59962875", "0.56264204", "0.55972457", "0.5396527", "0.53882056", "0.5361637", "0.52219254", "0.5220756", "0.5129794", "0.5123007", "0.5102911", "0.5101226", "0.506148", "0.5042564", "0.5027478", "0.50159997", "0.5011232", "0.5005611", "0.4997795", "0.49870333", "0.49855343", "0.49701053", "0.49320298", "0.49248138", "0.49116057", "0.49068505", "0.4905949", "0.4902358", "0.48938245", "0.4888808", "0.4884681", "0.48799938", "0.48715854", "0.48711106", "0.48709103", "0.486768", "0.48657638", "0.485931", "0.48543334", "0.48347995", "0.4829053", "0.48272374", "0.4817651", "0.4813017", "0.48069203", "0.48047662", "0.48015615", "0.4800942", "0.4800246", "0.47997153", "0.47997153", "0.4796142", "0.47942504", "0.47942504", "0.47935343", "0.47904563", "0.47858906", "0.47821808", "0.47808695", "0.47670484", "0.47668836", "0.4761123", "0.47596538", "0.47596538", "0.475342", "0.4750249", "0.47476622", "0.47446352", "0.4740441", "0.47352275", "0.47308275", "0.4730703", "0.47246763", "0.47205797", "0.4720273", "0.47145426", "0.471447", "0.47143447", "0.47130987", "0.47047505", "0.4701759", "0.47009853", "0.4694422", "0.46926704", "0.46850955", "0.46845633", "0.46776077", "0.4676133", "0.46738443", "0.4671504", "0.4667492", "0.46578994", "0.46577033", "0.46561494", "0.46559197", "0.46543208", "0.46441692" ]
0.78391874
0
Return `True` if any segments in `inv` matches the features in fts
def fts_match_any(self, fts, inv, normalize=True): return any([self.fts(s, normalize) >= fts for s in inv])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fts_match_all(self, fts, inv, normalize=True):\n return all([self.fts(s, normalize) >= fts for s in inv])", "def fts_intersection(self, segs, normalize=True):\n return reduce(lambda a, b: a & b,\n [self.fts(s, normalize) for s in self.filter_segs(segs, normalize)])", "def fts_contrast(self, fs, ft_name, inv, normalize=True):\n inv_segs = filter(lambda x: x >= fs, map(lambda seg: self.fts(seg, normalize), inv))\n for a in inv_segs:\n for b in inv_segs:\n if a != b:\n if a.differing_specs(b) == [ft_name]:\n return True\n return False", "def fts_count(self, fts, inv, normalize=True):\n return len(list(filter(lambda s: self.fts(s, normalize) >= fts, inv)))", "def match(self, sentence) -> bool:\r\n if (any(word[0] in sentence.lower() for word in self.word_list if word[1] == \"partial\") or any(\r\n word[0].lower() == sentence.lower() for word in self.word_list if word[1] == \"full\")) and not any(\r\n word[0] in sentence.lower() for word in self.word_list if word[1] == \"not\"):\r\n return True\r\n else:\r\n return False", "def match_features(phone_feats, other_feats):\n for feat in other_feats.keys():\n if phone_feats[feat] != other_feats[feat] and other_feats[feat] != UNDEF:\n return False\n return True", "def matches(self, feature):\n pass", "def hasIntersectedWith(self, f):\n try:\n return f in self.hasIntersected\n except AttributeError:\n return False", "def email_features(word_indices):\n\n # Total number of words in the dictionary\n n = 1899\n\n vector = np.arange(1, n + 1).reshape(-1, 1)\n\n return np.in1d(vector, word_indices)", "def all_segs_matching_fts(self, ft_mask):\n matching_segs = [ipa for (ipa, fts) in self.segments if fts >= ft_mask]\n return sorted(matching_segs, key=lambda x: len(x), reverse=True)", "def test_contains_returns_true_for_partial_word_in_multi_word_trie(multi_trie):\n assert multi_trie.contains(\"hell\") is True", "def __contains__(self, sentence):\n return sentence in self._sentences", "def test_hindi(doc):\n hindi_dictionary = ['kai','hai','dhaan','dhan','jhona','pili','jankari','saaf','mela','narma','raja','brahma','jai','parbhani','sangli','jana']\n flag = any(hindi in doc for hindi in hindi_dictionary)\n return(flag)", "def vsone_feature_matching(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict={},\n flann1=None, flann2=None, verbose=None):\n import vtool as vt\n import pyflann\n from vtool import spatial_verification as sver\n #import vtool as vt\n sver_xy_thresh = cfgdict.get('sver_xy_thresh', .01)\n ratio_thresh = cfgdict.get('ratio_thresh', .625)\n refine_method = cfgdict.get('refine_method', 'homog')\n symmetric = cfgdict.get('symmetric', False)\n K = cfgdict.get('K', 1)\n Knorm = cfgdict.get('Knorm', 1)\n #ratio_thresh = .99\n # GET NEAREST NEIGHBORS\n checks = 800\n #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2\n #pseudo_max_dist_sqrd = 2 * (512 ** 2)\n if verbose is None:\n verbose = True\n\n flann_params = {'algorithm': 'kdtree', 'trees': 8}\n if flann1 is None:\n flann1 = vt.flann_cache(vecs1, flann_params=flann_params, verbose=verbose)\n\n #print('symmetric = %r' % (symmetric,))\n if symmetric:\n if flann2 is None:\n flann2 = vt.flann_cache(vecs2, flann_params=flann_params, verbose=verbose)\n\n try:\n try:\n num_neighbors = K + Knorm\n fx2_to_fx1, fx2_to_dist = normalized_nearest_neighbors(flann1, vecs2, num_neighbors, checks)\n #fx2_to_fx1, _fx2_to_dist = flann1.nn_index(vecs2, num_neighbors=K, checks=checks)\n if symmetric:\n fx1_to_fx2, fx1_to_dist = normalized_nearest_neighbors(flann2, vecs1, K, checks)\n\n except pyflann.FLANNException:\n print('vecs1.shape = %r' % (vecs1.shape,))\n print('vecs2.shape = %r' % (vecs2.shape,))\n print('vecs1.dtype = %r' % (vecs1.dtype,))\n print('vecs2.dtype = %r' % (vecs2.dtype,))\n raise\n if symmetric:\n is_symmetric = flag_symmetric_matches(fx2_to_fx1, fx1_to_fx2)\n fx2_to_fx1 = fx2_to_fx1.compress(is_symmetric, axis=0)\n fx2_to_dist = fx2_to_dist.compress(is_symmetric, axis=0)\n\n assigntup = assign_unconstrained_matches(fx2_to_fx1, fx2_to_dist)\n\n fx2_match, fx1_match, fx1_norm, match_dist, norm_dist = assigntup\n fm_ORIG = np.vstack((fx1_match, fx2_match)).T\n fs_ORIG = 1 - np.divide(match_dist, norm_dist)\n # APPLY RATIO TEST\n fm_RAT, fs_RAT, fm_norm_RAT = ratio_test(fx2_match, fx1_match, fx1_norm,\n match_dist, norm_dist,\n ratio_thresh)\n\n # SPATIAL VERIFICATION FILTER\n #with ut.EmbedOnException():\n match_weights = np.ones(len(fm_RAT))\n svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_RAT, sver_xy_thresh,\n dlen_sqrd2, match_weights=match_weights,\n refine_method=refine_method)\n if svtup is not None:\n (homog_inliers, homog_errors, H_RAT) = svtup[0:3]\n else:\n H_RAT = np.eye(3)\n homog_inliers = []\n fm_RAT_SV = fm_RAT.take(homog_inliers, axis=0)\n fs_RAT_SV = fs_RAT.take(homog_inliers, axis=0)\n fm_norm_RAT_SV = fm_norm_RAT[homog_inliers]\n\n top_percent = .5\n top_idx = ut.take_percentile(fx2_to_dist.T[0].argsort(), top_percent)\n fm_TOP = fm_ORIG.take(top_idx, axis=0)\n fs_TOP = fx2_to_dist.T[0].take(top_idx)\n #match_weights = np.ones(len(fm_TOP))\n #match_weights = (np.exp(fs_TOP) / np.sqrt(np.pi * 2))\n match_weights = 1 - fs_TOP\n #match_weights = np.ones(len(fm_TOP))\n svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_TOP, sver_xy_thresh,\n dlen_sqrd2, match_weights=match_weights,\n refine_method=refine_method)\n if svtup is not None:\n (homog_inliers, homog_errors, H_TOP) = svtup[0:3]\n np.sqrt(homog_errors[0] / dlen_sqrd2)\n else:\n H_TOP = np.eye(3)\n homog_inliers = []\n fm_TOP_SV = fm_TOP.take(homog_inliers, axis=0)\n fs_TOP_SV = fs_TOP.take(homog_inliers, axis=0)\n\n matches = {\n 'ORIG' : MatchTup2(fm_ORIG, fs_ORIG),\n 'RAT' : MatchTup3(fm_RAT, fs_RAT, fm_norm_RAT),\n 'RAT+SV' : MatchTup3(fm_RAT_SV, fs_RAT_SV, fm_norm_RAT_SV),\n 'TOP' : MatchTup2(fm_TOP, fs_TOP),\n 'TOP+SV' : MatchTup2(fm_TOP_SV, fs_TOP_SV),\n }\n output_metdata = {\n 'H_RAT': H_RAT,\n 'H_TOP': H_TOP,\n }\n\n except MatchingError:\n fm_ERR = np.empty((0, 2), dtype=np.int32)\n fs_ERR = np.empty((0, 1), dtype=np.float32)\n H_ERR = np.eye(3)\n matches = {\n 'ORIG' : MatchTup2(fm_ERR, fs_ERR),\n 'RAT' : MatchTup3(fm_ERR, fs_ERR, fm_ERR),\n 'RAT+SV' : MatchTup3(fm_ERR, fs_ERR, fm_ERR),\n 'TOP' : MatchTup2(fm_ERR, fs_ERR),\n 'TOP+SV' : MatchTup2(fm_ERR, fs_ERR),\n }\n output_metdata = {\n 'H_RAT': H_ERR,\n 'H_TOP': H_ERR,\n }\n\n return matches, output_metdata", "def has_vectored_fields(self):\r\n return any(ftype.vector for ftype in self._by_number)", "def __contains__(self, ngram):\n return ngram in self.root", "def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)", "def __contains__(self, v):\n for i in self:\n if v in i:\n return True\n False", "def __check_features(f_list, stopwords):\n ok = True\n for f in f_list:\n if not(__check_feature(f,stopwords)):\n return False\n return True", "def flag_features_in_associate_clause(self, docFeatList):\n for feat in docFeatList:\n tlink = feat.getTlink()\n if not tlink or tlink.getType()!='ASSOCIATE':\n continue\n \n sentNum = feat.getSentNum()\n sentence = self.sentences[sentNum]\n sent_start = self.sentence_startPos[sentNum]\n startClause = tlink.getTimexes()[1].getStartPos()\n endPos = sentence[startClause - sent_start:].find(',')\n if endPos < 0: #: No comma is found, skip to be safe\n continue\n else:\n endClause = startClause + endPos\n \n zone = (startClause, endClause, sentNum)\n if not zone in self.clauseZones:\n self.clauseZones.append(zone)\n \n if feat.getStartPos() > startClause and feat.getStartPos() < endClause:\n feat.setInClause(True)\n feat.setTlink(None)\n \n return docFeatList", "def has_match(trajs_0, trajs_1):\n for i in range(len(trajs_0)):\n for j in range(len(trajs_1)):\n R = (trajs_0[i].get_slice()[:,:2] == trajs_1[j].get_slice()[:,:2])\n if isinstance(R, bool):\n if R:\n return True \n elif R.all():\n return True \n else:\n pass \n return False", "def _get_same_sentence_features(words, indexes):\n roots = [(i, w) for i, w in enumerate(words) if int(w.index) == 1]\n points = [0, 0, 0]\n for i, w in roots:\n for j in range(len(indexes)):\n if i <= indexes[j]:\n points[0] += 1\n feature_list = [\n points[0] == points[1],\n points[0] == points[2],\n points[1] == points[2],\n ]\n return feature_list", "def seg_known(self, segment, normalize=True):\n if normalize:\n segment = FeatureTable.normalize(segment)\n return segment in self.seg_dict", "def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass", "def __contains__(self, feature):\n return feature in self.features", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def any_within_poly(self, poly):\n for pt in self:\n if poly.contains(pt):\n return True\n return False", "def test_contains_returns_true_when_word_in_trie(full_trie):\n assert full_trie.contains(\"hey\") is True", "def partialSetMatchAnnotated(self, annotatedMention):\n aWords = annotatedMention.importantWords()\n dWords = self.importantWords()\n \n if dWords.intersection(aWords) == dWords:\n # this mention is a subset of the annotated mention\n if dWords == aWords:\n return True # exact match\n if len(annotatedMention.shortSets) > 0:\n # annotated mention has short sections, try to if one is included\n # in the detected mention\n for ss in annotatedMention.shortSets:\n if ss.intersection(dWords) == ss:\n # detected mention contains all of the words in a short section\n return True\n \n return False", "def match(self, sentence) -> bool:\r\n for word in self.word_list:\r\n if word.lower() in sentence.lower():\r\n return True\r\n return False", "def __contains__(self, val):\n return val in [i[0] for i in self.registered_intents]", "def vectorize(self,text):\r\n \r\n lv_active = set()\r\n words = word_tokenize(text)\r\n for word in words:\r\n if word in self.tree:\r\n ancestors = self.tree.word_ancestors(word)\r\n lv_active.update(ancestors)\r\n \r\n return self.nl.isin(lv_active).values", "def __contains__(self, ngram):\n return ngram in self._ngrams", "def pertenece(self,v):\n return v in self.vertices.keys()", "def can_recept(self, text, *args, **kwargs):\n for each_cur in self.flat_norm.keys():\n if each_cur.lower() in text.lower():\n return True\n\n else:\n return False", "def __contains__(self, f) :\n if self.__disc is infinity :\n return True\n \n (s, l) = f\n\n (a, _, c) = apply_GL_to_form(self.__p1list[l], s)\n if not c % self.__level == 0 :\n return False\n\n return a + c < self.index()", "def isin(hi):\n return finder.search(hi)", "def contains_feat(title):\n return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))", "def __contains__(self, doc_label):\n return doc_label in self.docs", "def any_term(self, row):\n return any(term in row for term in self.search_terms)", "def _contains(self, df: pandas.DataFrame, mapped_triples: MappedTriples, invert: bool = False) -> numpy.ndarray:\n raise NotImplementedError", "def conj(fs):\n def feature(s, i):\n return all(f(s, i) for f in fs)\n return feature", "def contains ( self, pos ):\n \n poly = Polygon(array(self.edges).reshape(-1,2)[:,0],array(self.edges).reshape(-1,2)[:,1])\n dists = poly.is_inside(pos[0,:],pos[1,:]) \n if self.include_border:\n inds = dists >= -self.abs_tol\n else:\n inds = dists > 0\n \n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n dr2 = array(self.edges).reshape(-1,2).mean(0)\n inds[argmin(dr2)] = True\n \n return inds", "def __contains__(self, fragment):\n return fragment in self._items", "def __contains__(self, seq):\n return bool(libhts.faidx_has_seq(self._fai, seq))", "def isIntersection(self, v):\n return (any(inter.v == v for inter in self.inter1) or\n any(inter.v == v for inter in self.inter2))", "def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex(): \n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def search(self):\n\n term = self.substitute()\n ##print (\"searching:\",term)\n ##print (\"in facts\",self.facts)\n ##input()\n bindings = deepcopy(self.bindings)\n found = False\n for fact in self.facts:\n found = self.unify(term,fact,bindings)\n if found:\n bound_vars = list(bindings.keys())\n n_bound_vars = len(bound_vars)\n for i in range(n_bound_vars):\n for j in range(i+1,n_bound_vars):\n if bindings[bound_vars[i]] == bindings[bound_vars[j]]:\n return False\n self.facts.remove(self.substitute_with_bindings(bindings)) #THINK ABOUT THIS\n break\n return found", "def _attentive_matching(self, h1, h2, cosine_matrix, w):\n # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n h1 = self._time_distributed_multiply(h1, w)\n # attentive vector (batch_size, h1_timesteps, embedding_szie)\n attentive_vec = self._mean_attentive_vectors(h2, cosine_matrix)\n # attentive_vec * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n attentive_vec = self._time_distributed_multiply(attentive_vec, w)\n # matching vector, (batch_size, h1_timesteps, mp_dim)\n matching = self._cosine_similarity(h1, attentive_vec)\n return matching", "def _is_in_doc(t: int, d: List[List[str]]) -> bool:\n t = str(t)\n for s in d:\n if t in s:\n return True\n return False", "def interior_contains(self, Vobj):\n return False", "def interior_contains(self, Vobj):\n return False", "def route_is_contained_in_other_route(route,target):\n id_route = 0\n id_target = 0\n found = True\n while found and id_route < len(route) and id_target < len(target):\n found = False\n while not found and id_target < len(target):\n if route[id_route] == target[id_target]:\n found = True\n else:\n id_target += 1\n id_route += 1\n return found", "def is_in(elt, seq):\n\treturn any(x is elt for x in seq)", "def __contains__(self, key):\n return key in self.vertList", "def in_field(self, vec):\n return (abs(vec[0]) + abs(vec[1]) + abs(vec[2])) <= 2 * self.n", "def lexicon_features(tokens, feats):\n# feats['neg_words'] = 0;\n# feats['pos_words'] = 0;\n# tokens = list([token.lower() for token in tokens])\n# feats['neg_words'] , feats['pos_words'] = np.count_nonzero(np.in1d(tokens, list(neg_words))), np.count_nonzero(np.in1d(tokens, list(pos_words)))\n neg_count=0\n pos_count=0\n for i in tokens:\n if i.lower() in neg_words:\n neg_count+=1\n if i.lower() in pos_words:\n pos_count+=1\n feats[\"neg_words\"]=neg_count\n feats[\"pos_words\"]=pos_count", "def test_contains_returns_false_when_word_mismatches(full_trie):\n assert full_trie.contains(\"hello\") is False", "def is_in(elt, seq):\n return any(x is elt for x in seq)", "def __contains__(self, i):\n return i in self._ar", "def contains_any_phrase(sent, phrases):\n for p in phrases:\n if p in sent:\n return True\n return False", "def where_in(a, b):\n return torch.nonzero((a[..., None] == b).any(-1)).squeeze()", "def test_match_in(self, subdocument):\n assert subdocument.match({\"hello\": {\"$in\": [\"there\", \"here\"]}})\n assert not subdocument.match({\"hello\": {\"$in\": [\"ici\", \"here\"]}})", "def has_subroutines(otf: ttLib.TTFont) -> bool:\n table_tag = _sniff_cff_table_format(otf)\n top_dict = otf[table_tag].cff.topDictIndex[0]\n all_subrs = [top_dict.GlobalSubrs]\n if hasattr(top_dict, \"FDArray\"):\n all_subrs.extend(\n fd.Private.Subrs for fd in top_dict.FDArray if hasattr(fd.Private, \"Subrs\")\n )\n elif hasattr(top_dict.Private, \"Subrs\"):\n all_subrs.append(top_dict.Private.Subrs)\n return any(all_subrs)", "def contains (self,phrase,chars):\r\n\r\n for x in chars:\r\n\r\n if x in phrase:\r\n return True\r\n return False", "def __check_feature(f,stopwords):\n if f == \"\" or f == None:\n return None\n if f == \"RT\":\n return False\n if f == \"via\":\n return False\n if len(re.findall(r\"(\\w)\", f)) < 1:\n return False\n if f == \"&amp\":\n return False\n if f in stopwords:\n return False\n if len(f) < 2:\n return False\n else:\n return True", "def contains(self, Vobj):\n return self.polyhedron()._is_zero( self.eval(Vobj) )", "def contains(self, Vobj):\n return self.polyhedron()._is_zero( self.eval(Vobj) )", "def test_distribution_with_many_clauses(self):\n spi_search = \"find a mele and brooks and holtkamp and o'connell\"\n inv_search = \"author:mele author:brooks author:holtkamp author:o'connell\"\n self._compare_searches(inv_search, spi_search)", "def contained_in(self, smiles):\n return MolFromSmiles(smiles).HasSubstructMatch(self.rdmol)", "def vsone_matching(metadata, cfgdict={}, verbose=None):\n # import vtool as vt\n #assert isinstance(metadata, ut.LazyDict), 'type(metadata)=%r' % (type(metadata),)\n\n annot1 = metadata['annot1']\n annot2 = metadata['annot2']\n\n ensure_metadata_feats(annot1, cfgdict=cfgdict)\n ensure_metadata_feats(annot2, cfgdict=cfgdict)\n\n if 'dlen_sqrd' not in annot2:\n def eval_dlen_sqrd(annot):\n rchip = annot['rchip']\n dlen_sqrd = rchip.shape[0] ** 2 + rchip.shape[1] ** 2\n return dlen_sqrd\n annot2.set_lazy_func('dlen_sqrd', lambda: eval_dlen_sqrd(annot2))\n\n # Exceute relevant dependencies\n kpts1 = annot1['kpts']\n vecs1 = annot1['vecs']\n kpts2 = annot2['kpts']\n vecs2 = annot2['vecs']\n dlen_sqrd2 = annot2['dlen_sqrd']\n flann1 = annot1.get('flann', None)\n flann2 = annot2.get('flann', None)\n\n matches, output_metdata = vsone_feature_matching(\n kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict=cfgdict,\n flann1=flann1, flann2=flann2, verbose=verbose)\n metadata.update(output_metdata)\n return matches, metadata", "def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def __contains__(self, label: str) -> bool:\n return label in self.fuzzy_patterns or label in self.regex_patterns", "def any(self, *args, **kwargs):\n if self.fragmented:\n return (\n np.any(self[self._begin:].view(ndarray), *args, **kwargs) and\n np.any(self[:self._end].view(ndarray), *args, **kwargs)\n )\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n return (np.any(part.view(ndarray), *args, **kwargs))", "def matches(self, tgt_residence_dir: str) -> bool:", "def contains(self,other):\n retVal = False\n\n bounds = self.points\n if( isinstance(other,Feature) ):# A feature\n retVal = True\n for p in other.points: # this isn't completely correct - only tests if points lie in poly, not edges.\n p2 = (int(p[0]),int(p[1]))\n retVal = self._pointInsidePolygon(p2,bounds)\n if( not retVal ):\n break\n # a single point\n elif( (isinstance(other,tuple) and len(other)==2) or ( isinstance(other,np.ndarray) and other.shape[0]==2) ):\n retVal = self._pointInsidePolygon(other,bounds)\n\n elif( isinstance(other,tuple) and len(other)==3 ): # A circle\n #assume we are in x,y, r format\n retVal = True\n rr = other[2]*other[2]\n x = other[0]\n y = other[1]\n for p in bounds:\n test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))\n if( test < rr ):\n retVal = False\n break\n\n elif( isinstance(other,tuple) and len(other)==4 and ( isinstance(other[0],float) or isinstance(other[0],int))):\n retVal = ( self.maxX() <= other[0]+other[2] and\n self.minX() >= other[0] and\n self.maxY() <= other[1]+other[3] and\n self.minY() >= other[1] )\n elif(isinstance(other,list) and len(other) >= 4): # an arbitrary polygon\n #everything else ....\n retVal = True\n for p in other:\n test = self._pointInsidePolygon(p,bounds)\n if(not test):\n retVal = False\n break\n else:\n logger.warning(\"SimpleCV did not recognize the input type to features.contains. This method only takes another blob, an (x,y) tuple, or a ndarray type.\")\n return False\n\n return retVal", "def _isInside(self, v, select, progress):\n # Compute on non-masked sources :\n xyz = self.xyz\n N = xyz.shape[0]\n inside = np.ones((xyz.shape[0],), dtype=bool)\n v = v.reshape(v.shape[0] * 3, 3)\n\n # Loop over sources :\n progress.show()\n for k in range(N):\n # Get the euclidian distance :\n eucl = cdist(v, xyz[[k], :])\n # Get the closest vertex :\n eucl_argmin = eucl.argmin()\n # Get distance to zero :\n xyz_t0 = np.sqrt((xyz[k, :] ** 2).sum())\n v_t0 = np.sqrt((v[eucl_argmin, :] ** 2).sum())\n inside[k] = xyz_t0 <= v_t0\n progress.setValue(100 * k / N)\n self.data.mask = False\n self.data.mask = inside if select != 'inside' else np.invert(inside)\n # Finally update data sources and text :\n self.update()\n self.text_update()\n progress.hide()", "def contains(self, smiles):\n return self.rdmol.HasSubstructMatch(MolFromSmiles(str(smiles)))", "def is_vert(e) :\n f = e[0][0]\n for t in e :\n if f != t[0] :\n return False\n return True", "def test_feature_in_collection(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n feature = fc1.features[0]\n assert fc1.feature_in_collection(feature)\n\n feature = fc2.features[0]\n assert not fc1.feature_in_collection(feature)", "def passivep(tags):\n \n after_to_be = list(dropwhile(lambda tag: not tag.startswith(\"BE\"), tags))\n nongerund = lambda tag: tag.startswith(\"V\") and not tag.startswith(\"VBG\")\n\n filtered = filter(nongerund, after_to_be)\n out = any(filtered)\n\n return out", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def match(self, sentence) -> bool:\r\n pass", "def feat_overlap(f1, f2):\n f1start = int(f1[3])\n f1end = int(f1[4])\n f2start = int(f2[3])\n f2end = int(f2[4])\n\n if f1start <= f2end and f1end >= f2start:\n return True\n return False", "def test_contains(self):\n r = self.RNA(\"UCA\")\n assert \"U\" in r\n assert \"CA\" in r\n assert \"X\" not in r\n assert \"G\" not in r", "def is_phrase_in(self, phrase, text):\n return re.search(r\"\\b{}\\b\".format(phrase), text, re.IGNORECASE) is not None", "def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex():\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def get_features(words, vectors):\n result = [vectors.loc[word].values for word in words if word in df_keys.values.reshape(-1)]\n if result:\n return np.stack(result)\n return None", "def test_syntax_converter_expand_fulltext(self):\n spi_search = \"find ft The holographic RG is based on\"\n inv_search = \"fulltext:The and fulltext:holographic and fulltext:RG and fulltext:is and fulltext:based and fulltext:on\"\n self._compare_searches(inv_search, spi_search)", "def contains_vect(self, v: Tuple[float, float]) -> bool:\n assert len(v) == 2\n return bool(lib.cpBBContainsVect(self, v))", "def _candidates(self, token):\n token_as_list = [token]\n token_1_edits = NorvigCorrector._one_edit_token_distances(token)\n token_2_edits = NorvigCorrector._two_edits_token_distances(token)\n return (\n self._known_in(token_as_list) or self._known_in(token_1_edits) or self._known_in(token_2_edits) or\n token_as_list)", "def contain_op(self, expr):\n return expr in self.table.inv", "def in_lattice(self, ref):\n assert ref[0] == self.start.sym, 'The first word is not null.'\n cur_node = set([self.start])\n for word in ref[1:]:\n next_node = set()\n for i in cur_node:\n for j in i.exits:\n if word == j.dest.sym:\n next_node.add(j.dest)\n if not next_node:\n return False\n else:\n cur_node = next_node\n if sum([i == self.end for i in cur_node]) == 0:\n return False\n return True", "def test_match_sub_in(self, subdocument):\n assert subdocument.match({\"and.the\": {\"$in\": [\"duck\", \"drake\"]}})\n assert not subdocument.match({\"and.the\": {\"$in\": [\"hyppo\", \"lion\"]}})", "def _is_included(self, graph, fp):\n return any(fp in includes for includes in itervalues(graph))", "def __contains__(self: TokenMatcher, label: str) -> bool:\n return label in self._patterns", "def test_syntax_converter_expand_search_patterns_multiple_conjoined(self):\n spi_search = \"find t bob sam and couch\"\n inv_search = \"title:bob and title:sam and title:couch\"\n self._compare_searches(inv_search, spi_search)", "def ascii_within(directions_array, references_array):\n return numpy.all(_ascii_within(directions_array, references_array))", "def filter_one_v_all(description):\n brain_parts = [\"forebrain\", \"midbrain\", \"hindbrain\"]\n for part in brain_parts:\n if part in description:\n return True\n return False" ]
[ "0.76956517", "0.6445244", "0.6092043", "0.603439", "0.5596667", "0.55699706", "0.5360697", "0.53144646", "0.52958703", "0.52827823", "0.51305246", "0.51228803", "0.50828075", "0.50755215", "0.50623965", "0.50345546", "0.5033701", "0.502957", "0.50193626", "0.5012903", "0.5011398", "0.50076133", "0.500338", "0.49948752", "0.49916336", "0.49841496", "0.4963538", "0.49338245", "0.49166107", "0.49059576", "0.48999304", "0.48872423", "0.4886792", "0.48858443", "0.48852855", "0.4884545", "0.48843092", "0.48705533", "0.48591092", "0.48584473", "0.48577267", "0.4857298", "0.48556685", "0.4854102", "0.48475546", "0.483934", "0.48349226", "0.48348883", "0.48325568", "0.48278317", "0.48247147", "0.48247147", "0.4815967", "0.4814713", "0.4802621", "0.48020968", "0.48008144", "0.48001596", "0.47952306", "0.4786081", "0.47806966", "0.47804672", "0.47791606", "0.47756207", "0.4775148", "0.47718588", "0.47675532", "0.47675532", "0.47554576", "0.47495192", "0.47468042", "0.47312516", "0.47286323", "0.4727317", "0.47268206", "0.47218287", "0.4721408", "0.47079787", "0.47062472", "0.47054464", "0.47014016", "0.4695576", "0.4695576", "0.46936455", "0.46904647", "0.46887854", "0.46870598", "0.46845186", "0.46844113", "0.46713", "0.46671525", "0.4658317", "0.46524823", "0.4652276", "0.4647769", "0.46347216", "0.46331102", "0.46306247", "0.46259284", "0.461763" ]
0.7692603
1
Return `True` if there is a segment in `inv` that contrasts in feature `ft_name`.
def fts_contrast(self, fs, ft_name, inv, normalize=True): inv_segs = filter(lambda x: x >= fs, map(lambda seg: self.fts(seg, normalize), inv)) for a in inv_segs: for b in inv_segs: if a != b: if a.differing_specs(b) == [ft_name]: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fts_match_any(self, fts, inv, normalize=True):\n return any([self.fts(s, normalize) >= fts for s in inv])", "def seg_known(self, segment, normalize=True):\n if normalize:\n segment = FeatureTable.normalize(segment)\n return segment in self.seg_dict", "def fts_match_all(self, fts, inv, normalize=True):\n return all([self.fts(s, normalize) >= fts for s in inv])", "def pertenece(self,v):\n return v in self.vertices.keys()", "def is_incident(self, Vobj):\n return self.polyhedron().incidence_matrix()[Vobj.index(), self.index()] == 1", "def is_incident(self, Vobj):\n return self.polyhedron().incidence_matrix()[Vobj.index(), self.index()] == 1", "def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def contains_vertex(self, v_name: str) -> bool:\n for i in self.adj_list:\n if i == v_name:\n return True\n return False", "def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex(): \n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex():\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def isVersor(self) -> bool:\n\n Vhat = self.gradeInvol()\n Vrev = ~self\n Vinv = Vrev/(self*Vrev)[0]\n\n gpres = grades_present(Vhat*Vinv, 0.000001)\n if len(gpres) == 1:\n if gpres[0] == 0:\n if np.sum(np.abs((Vhat*Vinv).value - (Vinv*Vhat).value)) < 0.0001:\n for e in basis_vectors(self.layout).values():\n gpres = grades_present(Vhat*e*Vrev, 0.000001)\n if not (len(gpres) == 1 and gpres[0] == 1):\n return False\n gpres = grades_present(self, 0.000001)\n if len(gpres) == 1:\n return False\n else:\n return True\n return False", "def __contains__(self, sentence):\n return sentence in self._sentences", "def hasIntersectedWith(self, f):\n try:\n return f in self.hasIntersected\n except AttributeError:\n return False", "def is_equivalence(self) -> bool:", "def isrefinement(self, t):\n if isinstance(t, basestring):\n return t in self.refined_types\n return self.isdependent(t)", "def is_calibration_tag_for_name(ins, exp, run, name='dark') :\n for attr in run_attributes(ins, exp, run) :\n if attr['class'] == 'Calibrations' and attr['name'] == name : return True\n return False", "def __continas__ (self, name):\n return name in self.containments", "def is_from_inertial(self, freq_threshold: float = 0.05) -> bool:\n self = cast(\"Flight\", self)\n\n if \"compute_track\" not in self.data.columns:\n self = self.cumulative_distance(compute_gs=False)\n\n freq = (\n self.diff(\"compute_track\")\n .compute_track_diff.round()\n .value_counts(normalize=True)\n )\n if 90 not in freq.index or -90 not in freq.index:\n return False\n return ( # type: ignore\n freq[90] > freq_threshold and freq[-90] > freq_threshold\n )", "def is_virtual(entry):\n\n if entry.get('text', '') == '':\n return 'No'\n\n # search for Invasion split cards\n regex = search('\\[This is half of the split card (.+)\\]', entry['text'])\n if regex is not None:\n return 'Yes: ' + regex.group(1)\n\n # search for Kamigawa flip cards\n regex = search('\\[Flips from (.+)\\]', entry['text'])\n if regex is not None:\n return 'Yes: ' + regex.group(1)\n\n # search for Innistrad shapeshifters\n regex = search('\\[(|.+)Back face. Transforms into (.+)\\.\\]', entry['text'])\n if regex is not None:\n return 'Yes: ' + regex.group(2)\n\n return 'No'", "def valid_inverse_functionality(self, graph: Graph, fact: Tuple[str, str, str]) -> bool:\n similar_relation_exists = (None, fact[1], fact[2]) in graph\n # increment the counter if a similar fact already exists (True -> +1, False -> +0)\n self.num_facts_violating_inverse_functionality += similar_relation_exists\n return not similar_relation_exists", "def in_lattice(self, ref):\n assert ref[0] == self.start.sym, 'The first word is not null.'\n cur_node = set([self.start])\n for word in ref[1:]:\n next_node = set()\n for i in cur_node:\n for j in i.exits:\n if word == j.dest.sym:\n next_node.add(j.dest)\n if not next_node:\n return False\n else:\n cur_node = next_node\n if sum([i == self.end for i in cur_node]) == 0:\n return False\n return True", "def __contains__(self, f) :\n if self.__disc is infinity :\n return True\n \n (s, l) = f\n\n (a, _, c) = apply_GL_to_form(self.__p1list[l], s)\n if not c % self.__level == 0 :\n return False\n\n return a + c < self.index()", "def match(self, sentence) -> bool:\r\n if (any(word[0] in sentence.lower() for word in self.word_list if word[1] == \"partial\") or any(\r\n word[0].lower() == sentence.lower() for word in self.word_list if word[1] == \"full\")) and not any(\r\n word[0] in sentence.lower() for word in self.word_list if word[1] == \"not\"):\r\n return True\r\n else:\r\n return False", "def is_inverted_dim(subj, morph_dim):\n left, right = morphs.subj.TRAINING[subj].lower().split(\"|\")\n les, gre = morph_dim\n assert (les in left) != (gre in left)\n assert (les in right) != (gre in right)\n return gre in left", "def is_transversal(self, gate_name: str) -> bool:\n return gate_name in self._transversal_gates", "def _isActive(self, inPars):\n\t\tfor f in self.inputKeys:\n\t\t\tif f.name not in inPars:\n\t\t\t\treturn False\n\t\treturn True", "def _contains_vowel(self, stem):\n for i in range(len(stem)):\n if not self._is_consonant(stem, i):\n return True\n return False", "def interior_contains(self, Vobj):\n return False", "def interior_contains(self, Vobj):\n return False", "def contains(self, Vobj):\n return self.polyhedron()._is_zero( self.eval(Vobj) )", "def contains(self, Vobj):\n return self.polyhedron()._is_zero( self.eval(Vobj) )", "def is_incident(self, Hobj):\n return self.polyhedron().incidence_matrix()[self.index(), Hobj.index()] == 1", "def is_incident(self, Hobj):\n return self.polyhedron().incidence_matrix()[self.index(), Hobj.index()] == 1", "def can_recept(self, text, *args, **kwargs):\n for each_cur in self.flat_norm.keys():\n if each_cur.lower() in text.lower():\n return True\n\n else:\n return False", "def __bool__(self):\n return _osgAnimation.mapVertexInfluence___bool__(self)", "def isEquivTo(self, details):\r\n return self.getWikiLanguageName() == details.getWikiLanguageName()", "def checkintercambio(self,pos):\n\t\tif not Cost().existe('intercambio'):\n\t\t\treturn False\n\t\tif pos < len(self.objective):\n\t\t\t\"\"\"Evaluo caso de intercambio\"\"\"\n\t\t\tb1 = self.verBase()\n\t\t\tb2= self.verSigBase()\n\t\t\to1 = self.objective[pos]\n\t\t\to2 = self.verSigObj(pos)\n\t\t\tif (not b2 is None and not o2 is None) and (b1 == o2) and (b2 == o1):\n\t\t\t\treturn True\n\t\treturn False", "def isSegmentFile(self, segment):\n return os.path.isfile(\"{wd}/{jn}-run/{seg}.rst7\".format( wd=self.workdir, jn=self.jobname, seg=segment.getNameString()))", "def contains(name):", "def has_invites(self):\r\n return self.invite_ct > 0", "def __isVerb__(self, word):\n self.verbs = ('go', 'stop', 'kill', 'eat')\n for verb in self.verbs:\n if verb == word:\n return ('verb', word), True\n return None, False", "def test_support_INVEX(self):\n self.assertEqual(self._parseFeature(\"INVEX\", \"Z\"), \"Z\")\n self.assertEqual(self._parseFeature(\"INVEX\"), \"I\")", "def is_vert(e) :\n f = e[0][0]\n for t in e :\n if f != t[0] :\n return False\n return True", "def in_results(compound):\n name = decode(compound)\n return (name in results_bank)", "def one_v_one(description):\n return \"forebrain\" in description", "def is_ftf(self):\n g = self.get_gene().get_seq()\n if 'd' != g[1]:\n return False\n if not len(g) >= 4:\n return False\n for x in range(2, len(g)):\n dec = 'c' if x % 2 == 0 else 'd'\n if dec != g[x]:\n return False\n return True", "def _isFIdx(self, featureName):\n return 1 if (featureName in self.featureNames) else 0", "def contain_op(self, expr):\n return expr in self.table.inv", "def containsEdge(self, e):\n return any(e.nvt in [self.vertices[i-2], self.vertices[i]] and self.vertices[i-1] == e.pvt for i in range(len(self.vertices)))", "def is_in_box_rt(self, rt):\n regions = self.boxes_rt.at(rt)\n if len(regions) > 0:\n return True\n else:\n return False", "def __contains__(self, seq):\n return bool(libhts.faidx_has_seq(self._fai, seq))", "def isin(hi):\n return getme.lower() in hi.lowercase", "def isOnInteriorSide(self, v):\n n = self.normalVect()\n return n.dotProduct(vector(self.vertices[0]) - vector(v)) > 0", "def uses_feature(self, fcname):\n used = False\n if any([fcname.upper() in y for y in [x.upper() for x in self._featureclasses]]):\n used = True\n return used", "def has_vectored_fields(self):\r\n return any(ftype.vector for ftype in self._by_number)", "def isDisambiguatedByNextVerb(self, word):\n\t\treturn 'verb' in disambig_const.DISAMBIGUATATION_TABLE.get(word, {});", "def _include_feature(self, name):\n return (self._feature_names is None or name in self._feature_names or\n name.startswith(self._neighbor_config.prefix))", "def __contains__(self, val):\n return val in [i[0] for i in self.registered_intents]", "def test_inV_works(self):\r\n results = self.jon.inV()\r\n assert len(results) == 1\r\n assert self.physics in results\r\n\r\n results = self.physics.inV()\r\n assert len(results) == 1\r\n assert self.eric in results\r\n\r\n results = self.eric.inV()\r\n assert len(results) == 1\r\n assert self.theoretics in results\r\n\r\n results = self.theoretics.inV()\r\n assert len(results) == 1\r\n assert self.blake in results\r\n\r\n results = self.beekeeping.inV()\r\n assert len(results) == 1\r\n assert self.jon in results\r\n\r\n results = self.blake.inV()\r\n assert len(results) == 1\r\n assert self.beekeeping in results", "def contains_edge(self, u: str, v: str) -> bool:\n if v in self.adj_list[u]:\n return True\n else:\n return False", "def __contains__(self, key):\n return key in self.vertList", "def has_ex_variant(funcname):\n if funcname[-2::] == \"Ex\":\n # Already an Ex..\n return False\n for func in soloud_codegen.soloud_func:\n if func[1] == (funcname + \"Ex\"):\n return True\n return False", "def is_in_adr_lexicon(text, adr_lexicon_dict):\n for item in adr_lexicon_dict:\n if item.lower() == text.lower():\n return True\n\n return False", "def __contains__(self, feature):\n return feature in self.features", "def match(self, proof: dict) -> bool:\n return proof.get(\"proofPurpose\") == self.term", "def isInvertible(self):\n return bool(self.isSquare() and self.determinant())", "def presence(label):\r\n\r\n return lambda x, y: 1.0 * ((label in x) == (label in y))", "def is_trained(self):\n return len(self.indicator_words) > 0", "def hasConstantForm(self, sentence):", "def has_keypoints_label(self, label):\n return label in self.schema", "def current_involvement(doing, eid):\n if eid in doing: return True\n for dn in active_target_of(doing, eid):\n return True\n return False", "def isinvertible(self):\n if np.all(np.abs(self.maroots) > 1):\n return True\n else:\n return False", "def token_has_vector(self, token):\n return self.svecs.vocab.unit2id(token.text) != UNK_ID", "def detect(self, fstring, fname=None):\n return True", "def contains(self, term):\n\t\tif term in self.textFile:\n\t\t\treturn True\n\t\t\n\t\treturn False", "def __check_feature(f,stopwords):\n if f == \"\" or f == None:\n return None\n if f == \"RT\":\n return False\n if f == \"via\":\n return False\n if len(re.findall(r\"(\\w)\", f)) < 1:\n return False\n if f == \"&amp\":\n return False\n if f in stopwords:\n return False\n if len(f) < 2:\n return False\n else:\n return True", "def isInverted(self):\n return self.__inverted", "def __contains__(self, doc_label):\n return doc_label in self.docs", "def __contains__(self, f) :\n if self.__disc is infinity :\n return True\n \n (s, l) = f\n\n (a, b, c) = apply_GL_to_form(self.__p1list[l], s)\n if not c % self.__level == 0 :\n return False\n \n disc = 4*a*c - b**2\n if disc == 0 :\n return gcd([a,b,c]) < self._indefinite_content_bound()\n else :\n return disc < self.__disc", "def is_snv(self):\n return len(self.REF) == 1 and all(a.type == \"SNV\" for a in self.ALT)", "def feature_one(ds, tup):\n # try:\n # if (nx.shortest_path_length(G, frm, to) == 1):\n # o1.write(\"trusted\\n\")\n # else:\n # o1.write(\"unverified\\n\")\n # except:\n # o1.write(\"unverified\\n\")\n\n return tup[0] in ds[tup[1]]", "def contains_vertex(self, vertex_name: n):\n return vertex_name in self._graph.keys()", "def filter_one_v_all(description):\n brain_parts = [\"forebrain\", \"midbrain\", \"hindbrain\"]\n for part in brain_parts:\n if part in description:\n return True\n return False", "def is_edge(self, v, w):\n return self.op_norm(v[0], w[0]) == (v[1] + w[1]) and (self.variant.is_bipartite() or v != w)", "def accepts(self, word: Iterable[str]) -> bool:\n if self._enfa is None:\n self._enfa = self.to_epsilon_nfa()\n return self._enfa.accepts(word)", "def is_independent(self, word):\n return not self.is_dependent(word)", "def isFissile(self):\n return self.name in self.fissile", "def is_V(self):\n return True", "def is_V(self):\n return True", "def is_FSAL(self):\n if np.all(self.A[-1,:]==self.b): return True\n else: return False", "def in_field(self, vec):\n return (abs(vec[0]) + abs(vec[1]) + abs(vec[2])) <= 2 * self.n", "def is_visible_segm(*args):\n return _ida_segment.is_visible_segm(*args)", "def __contains__(self, arg):\r\n\r\n return arg in self.grfx[0]", "def is_tft(self):\n g = self.get_gene().get_seq()\n if 'c' != g[1]:\n return False\n if not len(g) >= 4:\n return False\n for x in range(2, len(g)):\n dec = 'c' if x % 2 == 0 else 'd'\n if dec != g[x]:\n return False\n return True", "def isIntersection(self, v):\n return (any(inter.v == v for inter in self.inter1) or\n any(inter.v == v for inter in self.inter2))", "def apoOidInPerspective(apoOid, perspectiveName):\n print \"PartOfPerspectives - apoOidInPerspective\"\n print \"apoOid\"\n print apoOid\n print \"perspectiveName\"\n print perspectiveName\n\n inPerspective = False\n if apoOid in _byApoOid:\n if perspectiveName in _byApoOid[apoOid]:\n inPerspective = True\n \n print \"inPerspective\"\n print inPerspective\n \n return inPerspective", "def part_exists(requested_part:str, parts:list):\n return requested_part.lower() in parts", "def check_word(self, word):\n\n return self.graph.is_in(word)", "def _check_partner_invoice_addr(self,cr,uid,ids,context={}):\n partner_obj = self.browse(cr,uid,ids[0])\n if partner_obj.vat and partner_obj.vat[:2].upper() == 'VE' and not partner_obj.parent_id:\n res = partner_obj.type == 'invoice'\n if res:\n return True\n else:\n return False\n else:\n return True\n return True" ]
[ "0.5811271", "0.57281613", "0.5448158", "0.5358385", "0.53195685", "0.53195685", "0.52680486", "0.52578926", "0.52107763", "0.5175667", "0.5123594", "0.5117304", "0.5098134", "0.5091532", "0.50853395", "0.5074376", "0.50618905", "0.5061141", "0.50536674", "0.50494987", "0.50453997", "0.5034473", "0.5022513", "0.5004951", "0.496378", "0.49608934", "0.49582684", "0.4928828", "0.49229062", "0.49229062", "0.49172357", "0.49172357", "0.49146825", "0.49146825", "0.48992243", "0.48797077", "0.4879243", "0.4878836", "0.48739114", "0.48512033", "0.48448563", "0.4833643", "0.4823532", "0.48227584", "0.48172015", "0.48163608", "0.48045877", "0.48041221", "0.47831976", "0.47815648", "0.47786754", "0.4771812", "0.474806", "0.47466567", "0.47386578", "0.47356877", "0.47346756", "0.473025", "0.47194904", "0.47153798", "0.47105977", "0.47077945", "0.4699814", "0.46936604", "0.4689699", "0.46875548", "0.46845454", "0.46808597", "0.4663189", "0.4657501", "0.4656311", "0.4649225", "0.46401963", "0.46375564", "0.46340477", "0.46278402", "0.4627079", "0.46267238", "0.46125954", "0.4601145", "0.4597291", "0.45966405", "0.4595282", "0.45942122", "0.45941535", "0.45890945", "0.458228", "0.45761767", "0.45760697", "0.45760697", "0.45709938", "0.4570311", "0.4568508", "0.4568102", "0.45605457", "0.45555082", "0.4552878", "0.4551792", "0.45451117", "0.4540709" ]
0.67574793
0
Return the count of segments in an inventory matching a given feature mask.
def fts_count(self, fts, inv, normalize=True): return len(list(filter(lambda s: self.fts(s, normalize) >= fts, inv)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSegmentCount(self) -> int:\n ...", "def get_ingredient_count(cls, requestform):\n\n count = 0\n for r in requestform:\n if r[0:4] == 'item':\n count += 1\n return count", "def intersection_count(G=None, min_streets=2):\n spn = streets_per_node(G)\n node_ids = set(G.nodes)\n return sum(count >= min_streets and node in node_ids for node, count in spn.items())", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def _count_occupied_seats(grid: List[List[str]]) -> int:\n total = 0\n for row in grid:\n total += row.count('#')\n return total", "def _count_subset_neighbors(v, X):\n return len(set(v.neighbors).intersection(X))", "def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def sort_and_count_segments(self, starts, ends, points):\r\n \r\n # Cons: needs lot of memeory space\r\n lst = []\r\n for i in range(len(starts)): \r\n lst.append(range(starts[i], ends[i]+1))\r\n \r\n # store all the items in list\r\n lst_2 = []\r\n for sublist in lst:\r\n for item in sublist:\r\n lst_2.append(item)\r\n \r\n sorted_lst_2 = sorted(lst_2) # get sorted list\r\n \r\n count = [0] * len(points)\r\n \r\n # find item via binary search and count the occuranace of the item.\r\n for i in range(len(points)):\r\n if self.binary_search_for_count_segments(sorted_lst_2, points[i]) == points[i]:\r\n count[i] += sorted_lst_2.count(points[i])\r\n \r\n return count", "def count_region(\n reference_seq, # type: pyfaidx.Fasta\n region, # type: Tuple[str, int, int]\n pattern=None # type: Optional[str]\n): # type: (...) -> int\n\n chrom, start, end = region\n seq = reference_seq[chrom][int(start):int(end)]\n\n return _count_sequence(seq, regex=_build_regex(pattern))", "def calculate_number_of_segments(self):\n return sum(len(eg.transcript_file.segments) for eg in self.exemplars)", "def count_segments(markers) -> int:\n cnt = Counter()\n for row in markers:\n cnt.update(row)\n n_cnt = dict(takewhile(lambda x: x[1] >= 10, cnt.most_common()))\n del n_cnt[1]\n del n_cnt[-1]\n return len(n_cnt.keys())", "def count_segments(self, raw_only: bool = False) -> int:\n if self.segments:\n self_count = 0 if raw_only else 1\n return self_count + sum(\n seg.count_segments(raw_only=raw_only) for seg in self.segments\n )\n else:\n return 1", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def count(sub_stng, stng):\n instance_count = 0\n start_index = 0\n while stng.find(sub_stng, start_index) != -1:\n instance_count += 1\n start_index = stng.find(sub_stng, start_index) + 1\n\n return instance_count", "def test_vector_feature_count(self):\n\n # Read and verify test data\n for vectorname in ['test_buildings.shp',\n 'tsunami_building_exposure.shp',\n 'Padang_WGS84.shp',\n 'OSM_building_polygons_20110905.shp',\n 'OSM_subset.shp']:\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n coords = layer.get_geometry()\n attributes = layer.get_data()\n\n # Check basic data integrity\n N = len(layer)\n assert len(coords) == N\n assert len(attributes) == N\n assert FEATURE_COUNTS[vectorname] == N", "def count(self,val):\n return sum(1 for e in self.frontierpq if e[0]==val)", "def street_segment_count(Gu):\n if nx.is_directed(Gu): # pragma: no cover\n msg = \"`Gu` must be undirected\"\n raise ValueError(msg)\n return len(Gu.edges)", "def get_number_of_measurement(self):\n used_fragments = set()\n counter = 0\n for fragment in self.observed_fragments:\n num_of_isotope = 0\n used_counter = 0\n for i in self.mdv[fragment]:\n num_of_isotope = num_of_isotope + 1\n if self.mdv[fragment][i]['use'] == 'use':\n\n counter = counter + 1\n used_counter = used_counter + 1\n if num_of_isotope == used_counter:\n used_fragments.add(fragment)\n return counter-len(used_fragments)", "def get_count_vector(self, result_vector, \n feature_type, target_label ):\n key = []\n for versus_labels in result_vector:\n tokened_versus_labels = re.findall(\"'(\\w+)'\", versus_labels)\n key.append(tuple(tokened_versus_labels))\n \n for t in self.total_histogram[feature_type]:\n if sorted(key) == sorted(eval(t)):\n key = t\n \n target_label = unicode(target_label)\n \n \n bin_indices = []\n for versus_labels in result_vector:\n bin_indices.append(self.get_bin(result_vector[versus_labels]))\n bin_indices = unicode(tuple(bin_indices))\n \n if target_label in self.total_histogram[feature_type][key]:\n if bin_indices in self.total_histogram[feature_type][key][target_label]:\n return self.total_histogram[feature_type][key][target_label][bin_indices] \n return 0", "def count_indications(self) -> int:\n return self._count_model(Indication)", "def compute_detection_counts(kinds, valid_mask, aoi_mask, scene_counts):\n scene_counts = np.maximum(scene_counts, 1)\n if len(kinds):\n pairs = (kinds == 'pair_trawlers')\n singles = (kinds == 'single_trawler')\n scales = (kinds == 'pair_trawlers') * 2 + (kinds == 'single_trawler')\n aoi_pts = round((scales * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n aoi_pairs = round((pairs * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n else:\n aoi_pts = aoi_pairs = 0\n return aoi_pts, aoi_pairs", "def create_scene_counts(df, scene_map):\n scene_counts = np.zeros(len(df), dtype=int)\n # \n dates = set([x[:8] for x in df.scene_id])\n assert len(dates) == 1 # Not necessary, but true for what we are doing\n detect_pts = np.array([(x.longitude, x.latitude) for x in df.itertuples()])\n if len(detect_pts):\n for scene_id in scene_map:\n if scene_id[:8] in dates:\n scene = scene_map[scene_id]\n scene_bounds = json.loads(scene.boundary)\n sbound_lons = [y['lon'] for y in scene_bounds]\n sbound_lats = [y['lat'] for y in scene_bounds]\n scene_poly = mplpath.Path(np.transpose([sbound_lons, sbound_lats]))\n scene_counts += scene_poly.contains_points(detect_pts)\n return scene_counts", "def countAtom (dico_count, PDB_parsed, debug = 0):\n count = 0\n \n for atom in PDB_parsed : \n residue = tool.transformAA(atom[\"resName\"])\n if debug : print residue\n \n if residue in dico_count : \n atom_Name = atom[\"name\"]\n if atom_Name in dico_count[residue] : \n count = count + 1\n return count", "def detect_num_feats_in_image(img, **kwargs):\n # We dont need to find vectors at all here\n kwargs['only_count'] = True\n #kwargs['only_count'] = False\n #Valid keyword arguments are: + str(HESAFF_PARAM_DICT.keys())\n hesaff_ptr = _new_image_hesaff(img, **kwargs)\n if __DEBUG__:\n print('[hes] detect')\n # Get num detected\n nKpts = HESAFF_CLIB.detect(hesaff_ptr)\n HESAFF_CLIB.free_hesaff(hesaff_ptr)\n return nKpts", "def Test_NumSegments(Daten):\n N_Leitungen = len(Daten.PipeSegments)\n\n return N_Leitungen", "def H_count(mol,idxs):\n mol_H=Chem.AddHs(mol)\n hcount=0\n for num, bonds in enumerate(mol_H.GetBonds()):\n if mol_H.GetBondWithIdx(num).GetBeginAtomIdx() == idxs:\n if mol_H.GetAtomWithIdx(mol_H.GetBondWithIdx(num).GetEndAtomIdx()).GetSymbol() == 'H':\n hcount += 1\n elif mol_H.GetBondWithIdx(num).GetEndAtomIdx() == idxs:\n if mol_H.GetAtomWithIdx(mol_H.GetBondWithIdx(num).GetBeginAtomIdx()).GetSymbol() == 'H':\n hcount += 1\n return hcount", "def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def count_variants(filename, content=None):\n open_fn = gzip.open if is_gz_file(filename) else open\n count = 0\n with open_fn(filename, \"rt\") as ifile:\n for line in ifile:\n if not line.startswith(\"#\"):\n if content:\n if content in line:\n count += 1\n else:\n count += 1\n return count", "def chunk_count(vaid):\n out, err = _iquery(\"op_count(filter(list('chunk map'), aid=%s))\" % vaid)\n assert not err, \"chunk_count(%d): %s\" % (vaid, err)\n return int(out)", "def fruit_nb(x):\r\n return len([y for y in metamer(x) if Feature(y, 'fruit')])", "def get_regions_counts(fname, seglen, mincounts):\n counts = defaultdict(int)\n seglen=int(seglen)\n with open(fname) as fin:\n infile = csv.DictReader(fin, delimiter='\\t')\n for line in infile:\n if int(line['interactions']) < mincounts:\n continue\n t_reg = (\n line['RNA1 chromosome'],int(int(line['Start of RNA1 first read'])/seglen)*seglen,\n line['RNA1 strand'], \n line['RNA2 chromosome'],int(int(line['Start of RNA2 last read'])/seglen)*seglen,\n line['RNA2 strand'])\n\n counts[t_reg] = int(line['interactions'])\n return counts", "def flagser_contain(adjacency_matrix):\n N=adjacency_matrix.shape[0]\n row,col=convertCOO(adjacency_matrix,ret_data=False)\n return compute_cell_count(N, np.transpose(np.array( (row,col))))", "def heavy_count(mol,idxs):\n count = 0\n for num, bonds in enumerate(mol.GetBonds()):\n if mol.GetBondWithIdx(num).GetBeginAtomIdx() == idxs:\n if mol.GetAtomWithIdx(mol.GetBondWithIdx(num).GetEndAtomIdx()).GetSymbol() != 'H':\n count += 1\n elif mol.GetBondWithIdx(num).GetEndAtomIdx() == idxs:\n if mol.GetAtomWithIdx(mol.GetBondWithIdx(num).GetBeginAtomIdx()).GetSymbol() != 'H':\n count += 1\n return count", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def count(handle, extractor, sample_size, threshold, use_freq=False):\n barcodes = defaultdict(int)\n\n for i, record in enumerate(SeqIO.parse(handle, guess_file_format(handle))):\n if i > sample_size:\n break\n barcodes[extractor.get(record)] += 1\n\n if use_freq:\n return filter(lambda x: barcodes[x] >= threshold, barcodes)\n return sorted(barcodes, key=barcodes.get, reverse=True)[:threshold]", "def support_count(pattern, D):\n support_count = 0\n tmp_p = set(pattern)\n for transaction in D:\n if tmp_p <= set(transaction):\n support_count += 1\n return support_count", "def get_occupied_count(self, position):\n\n total_occupied = 0\n visible_seats = self.get_visible_seats(position)\n\n for seat in visible_seats:\n if self.seats[seat] == \"#\":\n total_occupied += 1\n\n return total_occupied", "def count_values_in_list(self,list_,value,start=0,end=None):\r\n return self.get_slice_from_list(list_,start,end).count(value)", "def count_labels(labels, num_classes):\n return np.array([\n np.bincount(segment_labels, minlength=num_classes) for _, segment_labels in labels\n ])", "def count_num_masked_tiles(subgrid):\n\n\tnum_masked_tiles = 0\n\tfor tile in subgrid:\n\t\tif (tile == MaskedTile.MASKED) or (tile == MaskedTile.FLAG):\n\t\t\tnum_masked_tiles += 1\n\n\treturn num_masked_tiles", "def __len__(self):\n return sum(f.count for f in self.filters)", "def count():", "def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass", "def get_count(feature, value):\r\n return CourseEnrollment.objects.filter(\r\n course_id=course_id,\r\n **get_filter(feature, value)\r\n ).count()", "def feature_index(self, feature: Text) -> int:\n count = 0\n for feature_name in self.vectorizer.get_feature_names():\n if(feature == feature_name):\n return count\n count += 1", "def countComponents26(cube):\n n,l = labelComponents26(cube);\n return n;", "def all_segs_matching_fts(self, ft_mask):\n matching_segs = [ipa for (ipa, fts) in self.segments if fts >= ft_mask]\n return sorted(matching_segs, key=lambda x: len(x), reverse=True)", "def test_count_reads_in_region_total(self):\n self.c.skipZeros = False\n self.c.stepSize = 200\n self.c.binLength = 200\n resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200)\n nt.assert_equal(resp, np.array([[2, 4.]]))", "def count_segments_naive(self, starts, ends, points):\r\n count = [0] * len(points)\r\n \r\n for i in range(len(points)):\r\n for j in range(len(starts)):\r\n if starts[j] <= points[i] <= ends[j]:\r\n count[i] += 1\r\n \r\n return count", "def get_count(self, tag: Text) -> int:\r\n sub_tags = tag.split(\"+\")\r\n return len([e for e in self.elements if all(t in e.tags for t in sub_tags)])", "def partition_Basic(segfile):\n scenelist = Recording.read_segs(segfile)\n segcount = 0\n for l in scenelist.values():\n segcount += len(l)\n return scenelist, segcount", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def k_ary_support_count(itemset, tagnamesdict):\n X = itemset[0]\n x_list = tagnamesdict[X]\n inter = set(x_list)\n\n for i in range(1, len(itemset)):\n Y = itemset[i]\n y_list = tagnamesdict[Y]\n inter = inter.intersection(y_list)\n\n support_count = len(inter)\n return support_count", "def num_instances_mgf(infile_name):\n\tinfile = open(infile_name)\n\tnum_instances = 0\n\tfor line in infile:\n\t\tif line.startswith(\"BEGIN IONS\"):\n\t\t\tnum_instances += 1\n\treturn(num_instances)", "def count(seq, predicate):\n count = 0\n for item in seq:\n if predicate(item):\n count += 1\n return count", "def count_mask(mask):\n count = int(mask.sum())\n if count == 0:\n return count, None, None, None, None\n\n # argmax for mask finds the first True value\n x_min = (mask.argmax(axis=0) != 0).argmax()\n x_max = mask.shape[1] - np.flip((mask.argmax(axis=0) != 0), axis=0).argmax() - 1\n w = (mask.shape[1] - np.flip((mask.argmax(axis=0) != 0), axis=0).argmax()\n - (mask.argmax(axis=0) != 0).argmax())\n h = (mask.shape[0] - np.flip((mask.argmax(axis=1) != 0), axis=0).argmax()\n - (mask.argmax(axis=1) != 0).argmax())\n return count, w, h, x_min, x_max", "def get_event_count(event_times, start, end):\n mask = (event_times > start) & (event_times <= end)\n return event_times[mask].size", "def count_segments(s):\n s = s.strip().split()\n return len(s)", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def num_cusps_of_regions(self):\n G = self._get_puncturefinder_graph()\n # return [sum(G.subgraph(vertices=region).edge_labels())\n # for region in G.connected_components()]\n return [sum(edge[2]['weight']\n for edge in subgraph.edges(data=True))\n for subgraph in nx.connected_component_subgraphs(G)]", "def count_if(self, criteria):\n # set count to 0\n count = 0\n # iterate through nodes in deque\n for item in self:\n # if the node's data meets the criteria passed,\n if criteria(item):\n # increment count\n count += 1\n # return the count\n return count", "def count_neighbor_flags(self, x, y):\n\t\treturn sum(self.marks[n][m] == FLAG for (n, m) in self.get_valid_neighbors(x, y))", "def count(seq):\n\treturn sum(1 for x in seq)", "def count_neighbor_flags(self, i, j):\n return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def count(a, sub, start=0, end=None):\n return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))", "def numSegments(self):\n\n return self.getHierView().numSegments()", "def part_2(ranges: 'RangeSet', total_ips_count: int = 1 << 32) -> int:\n\n allowed_count = total_ips_count - len(ranges)\n print(f\"part 2: there are total {allowed_count} allowed IPs\")\n return allowed_count", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count", "def countOccurrences(lst, x):\n res = 0\n for i in lst:\n if i == x:\n res += 1\n return res", "def total_occurrences(word1, word2, flag):\n result = 0\n word1_length = len(word1)\n for i in range(word1_length):\n if word1[i] == flag:\n result += 1\n\n word2_length = len(word2)\n for i in range(word2_length):\n if word2[i] == flag:\n result += 1\n\n return result", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def count_hits_region(location, region):\n l=len(region)\n c=0\n for i in range(0,l-1):\n if hits_border(location,region[i],region[i+1])==True:\n c=c+1\n return c", "def trace_region_count(self):\n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value", "def test_over_mask_over_regions_segmented_and_whole_extractor(region, images_used):\n atlas = None\n dict_parameters = None\n reshape_kind = None\n\n if images_used == \"MRI\":\n atlas = mri_atlas.load_atlas_mri()\n dict_parameters = MRI_stack_NORAD.get_parameters()\n reshape_kind = \"C\"\n\n elif images_used == \"PET\":\n atlas = pet_atlas.load_atlas()\n dict_parameters = PET_stack_NORAD.get_parameters()\n reshape_kind = \"F\"\n\n whole_mask_flatten, mask_segmented_flatten = \\\n get_whole_region_mask_and_region_segmented_mask(\n atlas=atlas,\n dict_parameters=dict_parameters,\n region=region,\n reshape_kind=reshape_kind)\n\n print(\"Number voxels activaed in whole MRI: {0}\\n\"\n \"length whole image: {1} \\n\"\n \"Number voxles activaed in region segmented 3d: {2}\\n\"\n \"length region segmented {3}\".format(\n sum(whole_mask_flatten), len(whole_mask_flatten),\n sum(mask_segmented_flatten), len(mask_segmented_flatten)))", "def count(self, args):\n counter = 0\n lists = args.split()\n\n if lists[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n objects = storage.all()\n for key in objects:\n name = key.split('.')\n if name[0] == lists[0]:\n counter += 1\n print(counter)", "def count(self):\n return self.vcount", "def feature_count(self, f, cat):\n res = self.con.execute(\n 'select count from fc where feature=\"%s\" and category=\"%s\"'\n %(f, cat)).fetchone()\n \n if res == None:\n return 0\n else:\n return float(res[0])", "def find_function_candidates(remaining_segments):\n segment_counts = {}\n for segment in remaining_segments:\n for start in range(0, len(segment)):\n for end in range(start + 1, len(route_steps)):\n subsegment = tuple(segment[start:end + 1])\n if len(\",\".join(subsegment)) <= 20:\n segment_counts[subsegment] = segment_counts.get(subsegment, 0) + 1\n\n result = []\n for sequence, count in dict(segment_counts).items():\n if count > 1:\n result.append((list(sequence), count))\n result.sort(reverse=True, key=lambda seq: len(seq[0]) * seq[1])\n return result", "def count_occurence(self, idx: int) -> int:\n instruction = self.trace[idx]\n addr = instruction.ip\n cnt = 0\n step = 1 if idx > self.current_index else -1\n for i in range(self.current_index, idx, step):\n e = self.trace[i]\n if e.ip == addr:\n cnt += 1\n return cnt", "def patch_areas(patch_ids):\n\n return np.bincount(patch_ids.reshape((-1,)))[1:]", "def total_data(map_index, next_sse_index, ss_def, contacts_def):\n no_of_contacts = 0\n contacts_true = contacts_def.keys()\n start, end = ss_def[next_sse_index][3], ss_def[next_sse_index][4]\n for i in range(start, end + 1):\n if i in contacts_true:\n contacts = contacts_def[i]\n for contact in contacts:\n for index in map_index:\n tstart, tend = ss_def[index][3], ss_def[index][4]\n if contact in range(tstart, tend + 1):\n no_of_contacts += 1\n return no_of_contacts", "def count(self, contig=None, start=None, stop=None, region=None,\n until_eof=False, tid=None, read_callback='nofilter',\n reference=None, end=None):\n\n # pass the signature to fetch\n signature = locals()\n signature.pop('read_callback')\n signature.pop('self')\n roi_reads = self.fetch(**signature)\n # make `nofilter` the default filter unless told otherwise\n # read_callback = kwargs.get('read_callback', 'nofilter')\n\n # go through all the reads over a given region and count them\n count = 0\n for read in roi_reads:\n if filter_read(read, read_callback):\n count += 1\n return count", "def path_count(path):\n\n def x_is_contained_in_y(bin_num):\n x_and_y = bin_num & path == bin_num\n x_or_y = bin_num | path == path\n x_and_y_complement = bin_num & (-path - 1) == 0\n return x_and_y and x_or_y and x_and_y_complement\n\n return sum([x_is_contained_in_y(bin_num) for bin_num in pow2(path)])", "def get_num_hit(boxes_truth, boxes_pred, is_hit):\n out = 0\n for tbox in boxes_truth:\n for pbox in boxes_pred:\n if is_hit(tbox, pbox):\n out += 1\n return out", "def numAtoms(self, flag=None):\n\n return len(self._getSubset(flag)) if flag else self._n_atoms", "def overlap_count(haystack, needle):\n count = 0\n index = 0\n while True:\n try:\n i = haystack.index(needle, index)\n except ValueError:\n break\n count += 1\n index = i+1\n return count", "def count(self, volume):\n\n countResult = 0\n\n for x in range(volume.shape[0]):\n for y in range(volume.shape[1]):\n for z in range(volume.shape[2]):\n if self.isMember(volume[x,y,z]):\n countResult += 1\n\n return countResult", "def count(self):\n return sum([self.bits[x][y] for x in range(self.n_rows)\n for y in range(self.n_columns)])", "def num_linearly_ind_features(self, S, eps=1e-11):\n return len(S[S >= eps])", "def countBlocksWithFlags(self, blockTypeSpec, assemTypeSpec=None):\n assems = self.getAssemblies(typeSpec=assemTypeSpec)\n try:\n return max(sum(b.hasFlags(blockTypeSpec) for b in a) for a in assems)\n except ValueError:\n # In case assems is empty\n return 0", "def ship_count(self):\r\n return sum(f.ship_count for f in self)", "def _count_code(code_ser, code):\n return (code_ser == code).sum()", "def get_parts_count(self):\n\t\treturn call_sdk_function('PrlSrvCfgHdd_GetPartsCount', self.handle)", "def testSectionCount(self):\n\n self.sectionCount(3640)", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0" ]
[ "0.60189056", "0.57778174", "0.5745941", "0.5679098", "0.562681", "0.5572302", "0.54622877", "0.5433682", "0.5416207", "0.5405498", "0.5370428", "0.5358139", "0.5354548", "0.5344947", "0.5300131", "0.5284212", "0.5281979", "0.527662", "0.525905", "0.5236418", "0.52358586", "0.5224278", "0.52191806", "0.5191498", "0.5187952", "0.51805085", "0.51687", "0.5166016", "0.5158889", "0.51479965", "0.5137742", "0.51289386", "0.5123505", "0.5118949", "0.51159656", "0.5106756", "0.5078641", "0.5071177", "0.50663555", "0.5062608", "0.50579596", "0.5040406", "0.5026596", "0.5022952", "0.50217545", "0.50187635", "0.5015077", "0.50117147", "0.5005507", "0.49977344", "0.4996591", "0.4995394", "0.49913204", "0.49779958", "0.4975999", "0.49620935", "0.4953921", "0.49372828", "0.49355116", "0.49303204", "0.49283367", "0.49170336", "0.49162364", "0.49130479", "0.49049503", "0.48990777", "0.4892409", "0.4882447", "0.48706448", "0.48617488", "0.48591965", "0.48578468", "0.48496702", "0.48495632", "0.48419297", "0.48384017", "0.4837738", "0.4833276", "0.48312747", "0.4831168", "0.48279262", "0.4823886", "0.4807536", "0.4805543", "0.48022547", "0.4799193", "0.47974744", "0.47956035", "0.47933936", "0.47910127", "0.47901806", "0.4789581", "0.47888717", "0.47885978", "0.478479", "0.47830704", "0.4776869", "0.47727188", "0.47657835", "0.47646534" ]
0.49445313
57
Implements fixedwidth pattern matching. Matches just in case pattern is the same length (in segments) as the word and each of the segments in the pattern is a featural subset of the corresponding segment in the word. Matches return the corresponding list of feature sets; failed matches return None.
def match_pattern(self, pat, word, normalize=True): segs = self.word_fts(word, normalize) if len(pat) != len(segs): return None else: if all([s >= p for (s, p) in zip(segs, pat)]): return segs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_segs_matching_fts(self, ft_mask):\n matching_segs = [ipa for (ipa, fts) in self.segments if fts >= ft_mask]\n return sorted(matching_segs, key=lambda x: len(x), reverse=True)", "def better_matching(self, pattern, offsets=False, slice_len=5):\n first_occurencies, counts_at = self.setup_better_match(slice_len)\n pattern = list(pattern)\n top, bottom = 0, self.tl\n while top <= bottom:\n if pattern:\n current = pattern.pop()\n if counts_at(bottom+1)[current] - counts_at(top)[current] > 0:\n #if current in self.last_col[top:bottom+1]:\n top = first_occurencies[current] + counts_at(top)[current]\n bottom = first_occurencies[current] + counts_at(bottom + 1)[current] - 1\n else:\n if offsets:\n return []\n else:\n return 0\n else:\n if offsets:\n return self.offsets(top, bottom)\n else:\n return bottom - top + 1", "def match_features(desc1, desc2):\n bf = cv2.BFMatcher(cv2.NORM_HAMMING)\n matches = bf.knnMatch(desc1, desc2, k=2) # typo fixed\n\n # Apply ratio test\n good = []\n for m, n in matches:\n if m.distance < 0.9*n.distance:\n good.append([m])\n\n return good", "def all_matches(self, pattern):\n\n pat_len = len(pattern)\n if pat_len > self.text_len:\n raise ValueError(\"Pattern length is bigger than text\")\n\n bad_char_table = self._bad_char_table(pattern)\n L = self._good_suffix_table_one(pattern)\n H = self._good_suffix_table_two(pattern)\n indexes = []\n\n pat_end_ind = pat_len - 1\n prev_end = -1\n # prev_end - previous index of pattern end relative to text (for Galil's rule)\n # p - index of char in pattern\n # t - index of char in text\n while pat_end_ind < self.text_len:\n p = pat_len - 1\n t = pat_end_ind\n while p >= 0 and t > prev_end and pattern[p] == self.text[t]:\n p -= 1\n t -= 1\n if p == -1 or t == prev_end: # Matched or holds Galil's rule\n indexes.append(pat_end_ind - pat_len + 1)\n pat_end_ind += pat_len - H[1] if pat_len > 1 else 1\n else:\n char_shift = self._bad_char_shift(self.text[t], bad_char_table)\n if p+1 == pat_len:\n suffix_shift = 1\n elif L[p] == -1:\n suffix_shift = pat_len - H[p+1]\n else:\n suffix_shift = pat_len - L[p]\n shift = max(char_shift, suffix_shift)\n prev_end = pat_end_ind if shift >= p+1 else prev_end #update parameter for Galil's rule\n pat_end_ind += shift\n\n return indexes", "def _internal_match(self, pattern):\n compiled_re = re.compile(pattern)\n for word in self.words:\n if compiled_re.fullmatch(word) is not None:\n yield word", "def findfeatures(xarr, farr, sl, sf, ws, mdiff=20, wdiff=20, sigma=5, niter=5,\n sections=3):\n\n # detect lines in the input spectrum and identify the peaks and peak values\n xp, xf = find_points(xarr, farr, kernal_size=sigma, sections=sections)\n\n # return no solution if no peaks were found\n if len(xp) == 0:\n return None\n\n # find the best match to the lines\n wp = findmatch(xarr, farr, xp, xf, sl, sf, ws, xlimit=mdiff, wlimit=wdiff)\n\n try:\n for i in range(len(xp)):\n if wp[i] > -1:\n pass\n except Exception as e:\n message = 'Unable to match line lists because %s' % e\n raise SpecError(message)\n return xp, wp", "def add_substring_match_features(self,lst1,lst2,normalize_factor = 1.):\n for i in range(self.RE_LOW,self.RE_HI+1):\n for j in range(self.WILD_LOW,self.WILD_HI+1):\n self.features.append(Measures.longest_substring_with_wildcards(lst1, lst2, i, j) / normalize_factor)", "def match_features(kp1, kp2, des1, des2):\n FLANN_INDEX_LSH = 6\n index_params= dict(algorithm = FLANN_INDEX_LSH,\n table_number = 6, # 12\n key_size = 12, # 20\n multi_probe_level = 2) #2\n\n search_params = dict(checks=50) # or pass empty dictionary\n flann = cv2.FlannBasedMatcher(index_params,search_params)\n matches = flann.knnMatch(des1,des2,k=2)\n good = []\n for (m,n) in matches:\n if m.distance < 0.8*n.distance: ## Lowe's ratio imp for tuning\n good.append(m)\n\n if len(good) < 20:\n return []\n return good", "def get_fixed_length_feat(self, feat, num_segment, start_pos, end_pos):\n nfeats = feat[:,:].shape[0]\n if nfeats <= self.num_segments:\n stride = 1\n else:\n stride = nfeats * 1.0 / num_segment\n if self.split != \"train\":\n spos = 0\n else:\n random_end = -0.5 + stride\n if random_end == np.floor(random_end):\n random_end = random_end - 1.0\n spos = np.random.random_integers(0,random_end)\n s = np.round( np.arange(spos, nfeats-0.5, stride) ).astype(int)\n start_pos = float(nfeats-1.0) * start_pos\n end_pos = float(nfeats-1.0) * end_pos\n\n if not (nfeats < self.num_segments and len(s) == nfeats) \\\n and not (nfeats >= self.num_segments and len(s) == num_segment):\n s = s[:num_segment] # ignore last one\n assert (nfeats < self.num_segments and len(s) == nfeats) \\\n or (nfeats >= self.num_segments and len(s) == num_segment), \\\n \"{} != {} or {} != {}\".format(len(s), nfeats, len(s), num_segment)\n\n start_index, end_index = None, None\n for i in range(len(s)-1):\n if s[i] <= end_pos < s[i+1]:\n end_index = i\n if s[i] <= start_pos < s[i+1]:\n start_index = i\n\n if start_index is None:\n start_index = 0\n if end_index is None:\n end_index = num_segment-1\n\n cur_feat = feat[s, :]\n nfeats = min(nfeats, num_segment)\n out = np.zeros((num_segment, cur_feat.shape[1]))\n out [:nfeats,:] = cur_feat\n return out, nfeats, start_index, end_index", "def add_substring_match_features(self,lst1,lst2,normalize_factor = 1.):\n for j in range(self.WILD_LOW,self.WILD_HI+1):\n self.features.append(Measures.longest_substring_with_wildcards(lst1, lst2, j) / normalize_factor)", "def match(self, pattern, d=0, conserved_part=0, slice_len=5):\n first_occurencies, counts_at = self.setup_better_match(slice_len)\n pattern = list(pattern)\n top, bottom = 0, self.tl\n if d == 0:\n conserved_part = len(pattern)\n for i in range(conserved_part):\n if pattern:\n current = pattern.pop()\n if counts_at(bottom+1)[current] - counts_at(top)[current] > 0:\n #if current in self.last_col[top:bottom+1]:\n top = first_occurencies[current] + counts_at(top)[current]\n bottom = first_occurencies[current] + counts_at(bottom + 1)[current] - 1\n else:\n return []\n else:\n return self.offsets(top, bottom)\n candidates = [{ 'offset': i, 'errors': 0 } for i in xrange(top, min(bottom+1, self.tl-1))]\n while candidates and pattern:\n dellist = []\n current = pattern.pop()\n for ndx, candidate in enumerate(candidates):\n letter = self.last_col[candidate['offset']]\n if letter != current:\n candidate['errors'] += 1\n if candidate['errors'] > d or (letter == '$' and len(pattern)):\n dellist.append(ndx)\n continue\n candidate['offset'] = first_occurencies[letter] + counts_at(candidate['offset'])[letter]\n candidates = [c for (i,c) in enumerate(candidates) if i not in dellist]\n return self.offsets(starts=[c['offset'] for c in candidates])", "def partial_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + f'+{self.chars}c'\n self.text.tag_add('found', start, end)\n start = end", "def partial_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + f'+{self.chars}c'\n self.text.tag_add('found', start, end)\n start = end", "def match_features(self):\n type_of_None = type(None)\n if type(self.featureDesA) != type_of_None and type(self.featureDesB) != type_of_None:\n matches = self.bfMatcher.match(self.featureDesA, self.featureDesB)\n self.match = sorted(matches, key=lambda x: x.distance)\n self.match = self.match[:50]\n else:\n self.match = []", "def exact_find_in_pattern(fixed_text, cur=0, patterns=PATTERNS):\n return [x for x in patterns if (cur + len(x['find']) <= len(fixed_text))\n and x['find'] == fixed_text[cur:(cur + len(x['find']))]]", "def match(self, pattern):\n result = Words()\n result.words = self._internal_match(pattern)\n return result", "def findall(pattern, string, overlapping=True, sensitive=True, regexp=False):\n if regexp:\n return SE.occurrences_re(pattern, string)\n if overlapping:\n return SE.occurrences(pattern, string, sensitive)\n else:\n return SE.full_words(pattern, string, sensitive)", "def __call__(self: TokenMatcher, doc: Doc) -> List[Tuple[str, int, int, None]]:\n mapped_patterns = defaultdict(list)\n matcher = Matcher(self.vocab)\n for label, patterns in self._patterns.items():\n for pattern in patterns:\n mapped_patterns[label].extend(\n _spacyfy(\n self._searcher.match(doc, pattern, **self.defaults),\n pattern,\n )\n )\n for label in mapped_patterns.keys():\n matcher.add(label, mapped_patterns[label])\n matches = matcher(doc)\n if matches:\n extended_matches = [\n (self.vocab.strings[match_id], start, end, None)\n for match_id, start, end in matches\n ]\n extended_matches.sort(key=lambda x: (x[1], -x[2] - x[1]))\n for i, (label, _start, _end, _details) in enumerate(extended_matches):\n on_match = self._callbacks.get(label)\n if on_match:\n on_match(self, doc, i, extended_matches)\n return extended_matches\n else:\n return []", "def match(self):\r\n results = []\r\n pattern = self.pattern\r\n text = self.text\r\n m = len(self.pattern)\r\n n = len(self.text)\r\n p = self._prefix\r\n k = 0\r\n for i in range(n):\r\n while k > 0 and text[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == text[i]:\r\n k = k+1\r\n if k == m:\r\n results.append(i-m+1)\r\n k = p[k-1]\r\n return results", "def __get_word_window_one_word_help(self, pattern, tokens, constraints):\n textsnippets = []\n textlength = len(tokens)\n for ind, token in enumerate(tokens):\n if check_pattern(pattern, token):\n if constraints is not None:\n self.__check_constraints(constraints, (ind, ind), ind, pattern, None, None, textsnippets, tokens)\n else:\n self.__get_word_window_help((ind, ind), textsnippets, textlength, tokens)\n return textsnippets", "def find_pattern(pattern, text):\r\n pat_text = pattern + '$' + text\r\n\r\n prefixes = [None] * len(pat_text)\r\n\r\n prefixes[0] = 0\r\n border = 0\r\n matches = []\r\n for idx, letter in enumerate(pat_text[1:], start=1):\r\n while border > 0 and letter != pat_text[border]:\r\n border = prefixes[border - 1]\r\n if letter == pat_text[border]:\r\n border = border + 1\r\n else:\r\n border = 0\r\n\r\n if border == len(pattern):\r\n matches.append(idx - len(pattern) - border)\r\n prefixes[idx] = border\r\n\r\n return matches", "def findMatches(sequence, patterns):\n#\n#\n# idGenerator = IdGenerator()\n# root = Edge('', None, idGenerator)\n# i = 0\n# sequence = sequence + '$'\n# print len(sequence)\n# for i in range(len(sequence)):\n# seq = sequence[i:]\n# edge = root\n# while len(seq) > 0:\n# edge = edge.addSequence(seq, i)\n# seq = seq[1:]\n# print i\n # root = buildTrie(generateSequences(sequence))\n matches = [[m.start() for m in re.finditer('(?=' + pattern + ')', sequence)] for pattern in patterns]\n return matches", "def lookup_stress_patterns_for_word(word: Text) -> Sequence[Sequence[Stress]]:\n return [\n word.pf.stress_pattern for word in EnglishUtils.all_possible_forms_for(word)\n ]", "def find_pattern(self, pattern: str) -> bool:\n\n if not pattern and self.is_word:\n return True\n\n node = self\n for i, char in enumerate(pattern):\n if char == \".\":\n res = []\n for each in node.children.values():\n res.append(each.find_pattern(pattern[i + 1:]))\n return any(res)\n\n if char in node.children:\n node = node.children[char]\n else:\n return False\n\n return node.is_word", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def find_frequent_patterns(transactions, support_threshold, possible_class_values):\n tree = FPTree(transactions, support_threshold, possible_class_values, None, None)\n # print(tree.to_string())\n return tree.mine_patterns(support_threshold)", "def spdfs(self, match):\n\n if self.flag == 1:\n print match\n # print \"Match : \", match\n\n if self.center == 0 and self.left == 0 and self.right == 0:\n return\n\n if self.center != 0:\n self.center.spdfs(match + self.center.ch)\n\n if self.right != 0:\n self.right.spdfs(match[:-1] + self.right.ch)\n\n if self.left != 0:\n self.left.spdfs(match[:-1]+self.left.ch)", "def match_rule_patterns(fixed_text, cur=0):\n pattern = exact_find_in_pattern(fixed_text, cur, RULE_PATTERNS)\n # if len(pattern) == 1:\n if len(pattern) > 0:\n return {\"matched\": True, \"found\": pattern[0]['find'],\n \"replaced\": pattern[0]['replace'], \"rules\": pattern[0]['rules']}\n else:\n return {\"matched\": False, \"found\": None,\n \"replaced\": fixed_text[cur], \"rules\": None}", "def getFeatureMatches(img1, img2):\n sift = xfeatures2d.SIFT_create()\n\n kp1, des1 = sift.detectAndCompute(img1, None)\n kp2, des2 = sift.detectAndCompute(img2, None)\n\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50)\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1, des2, k=2)\n Left_Pts = list()\n Right_Pts = list()\n\n # Ratio criteria according to Lowe's paper\n for i, (m, n) in enumerate(matches):\n if m.distance < 0.5 * n.distance:\n Left_Pts.append(kp1[m.queryIdx].pt)\n Right_Pts.append(kp2[m.trainIdx].pt)\n\n left = np.array(Left_Pts)\n right = np.array(Right_Pts)\n features = (left, right)\n return features", "def matches(self, feature):\n pass", "def find_feature(self, pattern):\n idxs = []\n for idx, header in enumerate(self.headers):\n header = header.decode('utf-8')\n lp = len(pattern)\n\n # Find continuations\n if header == pattern:\n idxs.append(idx)\n elif header[:lp] == pattern and header[lp] in [str(i) for i in range(0, 10)]:\n idxs.append(idx)\n\n return idxs", "def fnmatch(self, pattern):\n return FNMatcher(pattern)(self)", "def find_matches(word, string_grid, separator='\\n'):\n word_len = len(word)\n if isinstance(string_grid, list):\n matrix = string_grid\n else:\n matrix = matrixify(string_grid, separator)\n row_length, column_length = len(matrix), len(matrix[0])\n base_matches = find_base_match(word[0], matrix)\n\n if column_length < word_len > row_length or not base_matches:\n return []\n elif word_len == 1:\n return base_matches\n\n return complete_match(word, matrix, base_matches, word_len, row_length, column_length)", "def find_match_BFS(self, array, pattern):\n\n # special case optimisation\n match = special_matches(array, pattern)\n if match.is_match:\n return match\n\n self.compile(pattern) # create the state firsts\n\n # simulate finite state machine\n idx = - 1\n stack = [0] # a stack of current states\n matches = [[]] # a stack of corresponding matches\n while idx < len(array) - 1 and stack:\n idx += 1\n K = len(stack) # note this this grows with splits\n for k in range(K):\n # advance each one at a time\n state_id = stack.pop(0)\n match = matches.pop(0)\n state = self.states[state_id]\n next_state = state.transition(array[idx])\n for s in next_state:\n if s and s.is_final:\n if array[idx+1:].count(BLACK) == 0:\n match_final = match + [s.symbol]\n match_final += [WHITE] * (len(array) - idx - 1)\n return Match(match_final, pattern=self.pattern)\n # else: its not added to the stack\n elif s:\n stack.append(s.id)\n matches.append(match + [s.symbol])\n\n return Match(pattern=self.pattern) # no match", "def find_pattern(pattern, text):\r\n result = []\r\n pattern_len = len(pattern)\r\n d_pattern_len = 2 * pattern_len\r\n if pattern_len > len(text):\r\n return []\r\n\r\n new_string = pattern + \"$\" + text\r\n prefix_function_res = prefix_function(new_string)\r\n return [x - d_pattern_len for (x, y) in filter(lambda x: x[1] == pattern_len, enumerate(prefix_function_res))]", "def SearchRe(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n matches = re.findall(pattern, arg)\n proc = context.processor\n matches_nodeset = []\n for groups in matches:\n proc.pushResult()\n proc.writers[-1].startElement('Match', EMPTY_NAMESPACE)\n if type(groups) != type(()):\n groups = (groups,)\n for group in groups:\n proc.writers[-1].startElement('Group', EMPTY_NAMESPACE)\n proc.writers[-1].text(group)\n proc.writers[-1].endElement('Group')\n proc.writers[-1].endElement('Match')\n frag = proc.popResult()\n context.rtfs.append(frag)\n matches_nodeset.append(frag.childNodes[0])\n return matches_nodeset", "def __get_word_window_more_words_help(self, split_pattern, tokens, constraints):\n textsnippets = []\n textlength = len(tokens)\n for ind, token in enumerate(tokens):\n p_index = 0\n end_index = ind\n while p_index < len(split_pattern):\n if check_pattern(split_pattern[p_index], tokens[end_index]):\n p_index += 1\n end_index += 1\n else:\n break\n if p_index == len(split_pattern):\n if constraints is not None:\n self.__check_constraints(constraints, (ind, end_index - 1), ind, split_pattern, None, None, textsnippets, tokens)\n else:\n self.__get_word_window_help((ind, end_index - 1), textsnippets, textlength, tokens)\n return textsnippets", "def kbest_matches(self, k=1, minlen=2, buffer=0):\n ki = 0\n while k is None or ki < k:\n idx = None\n lcm = None\n while idx is None:\n idx = np.unravel_index(np.argmax(self._wp, axis=None), self._wp.shape)\n if idx[0] == 0 or idx[1] == 0:\n return None\n r, c = idx\n lcm = LCMatch(self, r, c)\n for (x, y) in lcm.path:\n x += 1\n y += 1\n if len(self._wp.mask.shape) > 0 and self._wp.mask[x, y] is True: # True means invalid\n # print('found path contains masked, restart')\n lcm = None\n idx = None\n break\n else:\n self._wp[x, y] = ma.masked\n if len(lcm.path) < minlen:\n # print('found path too short, restart')\n lcm = None\n idx = None\n if buffer > 0 and lcm is not None:\n miny, maxy = 0, self._wp.shape[1] - 1\n minx, maxx = 0, self._wp.shape[0] - 1\n for (x, y) in lcm.path:\n xx = x + 1\n for yy in range(max(miny, y + 1 - buffer), min(maxy, y + 1 + buffer)):\n self._wp[xx, yy] = ma.masked\n yy = y + 1\n for xx in range(max(minx, x + 1 - buffer), min(maxx, x + 1 + buffer)):\n self._wp[xx, yy] = ma.masked\n if lcm is not None:\n ki += 1\n yield lcm", "def frequent_words_with_mismatches(text, k, d):\n\n patterns = []\n freq_map = {}\n n = len(text)\n for i in range(n - k + 1):\n pattern = text[i:i + k]\n pattern_rc = reverse_complement(pattern)\n neighborhood = neighbors(pattern, d) + neighbors(pattern_rc, d)\n for j in range(len(neighborhood)):\n neighbor = neighborhood[j]\n if neighbor not in freq_map.keys():\n freq_map[neighbor] = 1\n else:\n freq_map[neighbor] = freq_map[neighbor] + 1\n m = max_map(freq_map)\n for key in freq_map.keys():\n if freq_map[key] == m:\n patterns.append(key)\n return patterns", "def get_pattern(topic):\n variants = get_variants(topic)\n sub_patterns = [r'(.*\\b)%s\\b(.*)' % variant.lower() for variant in variants]\n return re.compile(r'|'.join(sub_patterns), flags=re.IGNORECASE)", "def find_matching_segments(self):\n hyp_matched_segs = [TIntervalGroup() for i in range(len(self.hyp))]\n for gid_ref, match_ref in enumerate(self.ref):\n bg_ref = match_ref.bbox_group\n max_gid, max_area = -1, 0\n for gid_hyp, bg_hyp in enumerate(self.hyp.get_bbox_groups()):\n rx, ry = bg_ref.page_range(), bg_hyp.page_range()\n if ry[0] > rx[1]:\n break\n area = (bg_ref & bg_hyp)\n if area > max_area:\n max_gid, max_area = gid_hyp, area\n if max_gid != -1:\n hyp_matched_segs[max_gid].extend(match_ref.tinterval_group.copy())\n print('%d -> %d' % (gid_ref, max_gid))\n for seg in hyp_matched_segs:\n seg.reduce()\n return hyp_matched_segs", "def ApproxStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tindex = []\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\t\t\tindex.append(i)\n\treturn index", "def patternMatching(self, x, prefix, i, pattern, queue):\n if x is None:\n return\n char = pattern[i]\n if char == '.' or char < x.key_char:\n self.patternMatching(x.left, prefix, i, pattern, queue)\n if char == '.' or char == x.key_char:\n if i == len(pattern) - 1 and x.value is not None:\n queue.enqueue(str(prefix) + str(x.key_char))\n if i < len(pattern) - 1:\n self.patternMatching(x.mid, str(prefix) + str(x.key_char), i + 1, pattern, queue)\n prefix = prefix[:-1]\n\n if char == '.' or char > x.key_char:\n self.patternMatching(x.right, prefix, i, pattern, queue)", "def get_word_window(self, pattern, tokens, constraints):\n split_pattern = pattern.split()\n if len(split_pattern) > 1:\n textsnippets = self.__get_word_window_more_words_help(split_pattern, tokens, constraints)\n else:\n textsnippets = self.__get_word_window_one_word_help(pattern, tokens, constraints)\n print(textsnippets)\n return textsnippets", "def match_mfovs_features(matcher_params, sec1_cache, sec2_cache, mfovs1, mfovs2):\n \n thread_local_store = ThreadLocalStorageLRU()\n if 'matcher' in thread_local_store.keys():\n matcher = thread_local_store['matcher']\n else:\n # Initialize the matcher, and store it in the local thread storage\n matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)\n thread_local_store['matcher'] = matcher\n \n# matcher = getattr(threadLocal, 'matcher', None)\n# if matcher is None:\n# # Initialize the matcher, and store it in the local thread storage\n# matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)\n# threadLocal.matcher = matcher\n\n def get_kps_descs(mfovs, sec_cache):\n mfovs = list(mfovs)\n if len(mfovs) == 1:\n mfovs_kps = np.array(sec_cache[\"pre_match_blobs\"][mfovs[0]][0])\n mfovs_descs = np.array(sec_cache[\"pre_match_blobs\"][mfovs[0]][1])\n else:\n mfovs_kps_arrays = []\n mfovs_descs_arrays = []\n for mfov in mfovs:\n kps_descs = sec_cache[\"pre_match_blobs\"][mfov]\n if len(kps_descs[0]) > 0:\n mfovs_kps_arrays.append(kps_descs[0])\n mfovs_descs_arrays.append(kps_descs[1])\n if len(mfovs_kps_arrays) == 0:\n mfovs_kps = np.array([])\n mfovs_descs = np.array([])\n elif len(mfovs_kps_arrays) == 1:\n mfovs_kps = mfovs_kps_arrays[0]\n mfovs_descs = mfovs_descs_arrays[0]\n else:\n mfovs_kps = np.vstack(mfovs_kps_arrays)\n mfovs_descs = np.vstack(mfovs_descs_arrays)\n return np.array(mfovs_kps), np.array(mfovs_descs)\n\n mfovs1_kps, mfovs1_descs = get_kps_descs(mfovs1, sec1_cache)\n mfovs2_kps, mfovs2_descs = get_kps_descs(mfovs2, sec2_cache)\n\n model, filtered_matches = matcher.match_and_filter(mfovs1_kps, mfovs1_descs, mfovs2_kps, mfovs2_descs)\n return mfovs1, model, filtered_matches", "def __naive_matching(pattern, text):\n matched_positions = []\n n = len(text)\n m = len(pattern)\n # Last possible pattern starting position in the text is n - m\n for pos in range((n - m) + 1):\n if pattern == text[pos:(pos+m)]:\n matched_positions.append(pos)\n return matched_positions", "def find_matches(words, min_match_ratio):\n couples = []\n with Pool(processes=mp.cpu_count()) as pool:\n results = pool.starmap(\n get_fuzz_ratio, itertools.combinations(words, 2))\n for result, word, paired_word in results:\n if result >= min_match_ratio:\n couples.append([word, paired_word])\n return couples", "def distance_between_pattern_and_strings(pattern, dna):\n\n k = len(pattern)\n distance = 0\n\n for text in dna:\n hamming_distance = 1000000\n for i in range(len(text) - k + 1):\n if hamming_distance > compute_hamming_distance(pattern, text[i:i + k]):\n hamming_distance = compute_hamming_distance(pattern, text[i:i + k])\n distance = distance + hamming_distance\n return distance", "def match_patterns(sdfg: SDFG,\n patterns: Union[Type[xf.PatternTransformation], List[Type[xf.PatternTransformation]]],\n node_match: Callable[[Any, Any], bool] = type_match,\n edge_match: Optional[Callable[[Any, Any], bool]] = None,\n permissive: bool = False,\n metadata: Optional[PatternMetadataType] = None,\n states: Optional[List[SDFGState]] = None,\n options: Optional[List[Dict[str, Any]]] = None):\n\n if isinstance(patterns, type):\n patterns = [patterns]\n if isinstance(options, dict):\n options = [options]\n\n # Collect transformation metadata\n if metadata is not None:\n # Transformation metadata can be evaluated once per apply loop\n interstate_transformations, singlestate_transformations = metadata\n else:\n # Otherwise, precompute all transformation data once\n (interstate_transformations, singlestate_transformations) = get_transformation_metadata(patterns, options)\n\n # Collect SDFG and nested SDFGs\n sdfgs = sdfg.all_sdfgs_recursive()\n\n # Try to find transformations on each SDFG\n for tsdfg in sdfgs:\n ###################################\n # Match inter-state transformations\n if len(interstate_transformations) > 0:\n # Collapse multigraph into directed graph in order to use VF2\n digraph = collapse_multigraph_to_nx(tsdfg)\n\n for xform, expr_idx, nxpattern, matcher, opts in interstate_transformations:\n for subgraph in matcher(digraph, nxpattern, node_match, edge_match):\n match = _try_to_match_transformation(tsdfg, digraph, subgraph, tsdfg, xform, expr_idx, nxpattern, -1,\n permissive, opts)\n if match is not None:\n yield match\n\n ####################################\n # Match single-state transformations\n if len(singlestate_transformations) == 0:\n continue\n for state_id, state in enumerate(tsdfg.nodes()):\n if states is not None and state not in states:\n continue\n\n # Collapse multigraph into directed graph in order to use VF2\n digraph = collapse_multigraph_to_nx(state)\n\n for xform, expr_idx, nxpattern, matcher, opts in singlestate_transformations:\n for subgraph in matcher(digraph, nxpattern, node_match, edge_match):\n match = _try_to_match_transformation(state, digraph, subgraph, tsdfg, xform, expr_idx, nxpattern,\n state_id, permissive, opts)\n if match is not None:\n yield match", "def DiscoverPatterns(parameters, graph):\n patternCount = 0\n # get initial one-edge patterns\n parentPatternList = GetInitialPatterns(graph, parameters.temporal)\n if DEBUGFLAG:\n print(\"Initial patterns (\" + str(len(parentPatternList)) + \"):\")\n for pattern in parentPatternList:\n pattern.print_pattern(' ')\n discoveredPatternList = []\n while ((patternCount < parameters.limit) and parentPatternList):\n print(str(parameters.limit - patternCount) + \" patterns left\")\n childPatternList = []\n # extend each pattern in parent list (***** todo: in parallel)\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if ((len(parentPattern.instances) > 1) and (patternCount < parameters.limit)):\n patternCount += 1\n extendedPatternList = Pattern.ExtendPattern(parentPattern, parameters.temporal)\n while (extendedPatternList):\n extendedPattern = extendedPatternList.pop(0)\n if DEBUGFLAG:\n print(\"Extended Pattern:\")\n extendedPattern.print_pattern(' ')\n if (len(extendedPattern.definition.edges) <= parameters.maxSize):\n # evaluate each extension and add to child list\n extendedPattern.evaluate(graph)\n if ((not parameters.prune) or (extendedPattern.value >= parentPattern.value)):\n Pattern.PatternListInsert(extendedPattern, childPatternList, parameters.beamWidth, parameters.valueBased)\n # add parent pattern to final discovered list\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n parentPatternList = childPatternList\n # insert any remaining patterns in parent list on to discovered list\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n return discoveredPatternList", "def findWordsInPattern (self, pattern, letters):\n\t\twords = []\n\t\tletters = ' ' + letters\n\t\twords = self.root.findWordsInPattern(pattern, letters, u'')\n\t\treturn words;", "def match_pattern_seq(self, pat, const, normalize=True):\n segs = [self.fts(s, normalize) for s in const]\n if len(pat) != len(segs):\n return False\n else:\n return all([s >= p for (s, p) in zip(segs, pat)])", "def word_filter(trie, pattern):\n\n def inner(trie, pattern, stack='', result=set()):\n\n if pattern == '':\n if trie.value is not None:\n result.add((stack, trie.value))\n\n return result\n\n if pattern[0] == '*':\n result |= inner(trie, pattern[1:], stack, result)\n\n for key, child in trie.children.items():\n\n if pattern[0] == key or pattern[0] == '?':\n result |= inner(child, pattern[1:], stack+key, result)\n elif pattern[0] == '*':\n result |= inner(child, pattern, stack+key, result)\n\n return result\n\n result = inner(trie, pattern)\n\n return list(result)", "def SearchRePy20(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n proc = context.processor\n matches_nodeset = []\n _re =re.compile(pattern)\n _match =_re.search(arg)\n while _match:\n proc.pushResult()\n proc.writers[-1].startElement('Match', EMPTY_NAMESPACE)\n _groups =_match.groups()\n # .groups() return empty tuple when the pattern did not do grouping\n if not _groups: _groups =tuple(_match.group())\n for group in _groups:\n proc.writers[-1].startElement('Group', EMPTY_NAMESPACE)\n # MatchObject groups return None if unmatched\n # unlike .findall() returning empty strings\n proc.writers[-1].text(group or '')\n proc.writers[-1].endElement('Group')\n proc.writers[-1].endElement('Match')\n frag = proc.popResult()\n context.rtfs.append(frag)\n matches_nodeset.append(frag.childNodes[0])\n _match =_re.search(arg, _match.end())\n return matches_nodeset", "def frequent_words(text, k):\n frequent_patterns = []\n count = {}\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n count[i] = pattern_count(text, pattern)\n max_count = max(count.values()) if count else 0\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n if count[i] == max_count and pattern not in frequent_patterns:\n frequent_patterns.append(text[i:i+k])\n return frequent_patterns", "def whole_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + ' wordend'\n # whole word includes a space before\n found = self.text.get(start + '-1c', end)\n if found == ' ' + self.term:\n self.text.tag_add('found', start, end)\n start = end", "def whole_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + ' wordend'\n # whole word includes a space before\n found = self.text.get(start + '-1c', end)\n if found == ' ' + self.term:\n self.text.tag_add('found', start, end)\n start = end", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def gen_matches(self, subseq, startpos):\n \n raise TypeError, \"PatternBase is an abstract base class\"", "def subtree_matching(self, subtree):\n\t\t#TODO implement this in a faster way\n\t\ttext = self.preorder_traverse_to_list()\n\t\tpattern = subtree.preorder_traverse_to_list()\n\n\t\tprint text\n\t\tprint pattern\n\n\t\tmatches = []\n\t\tfor i in range(len(text)):\n\t\t\tif text[i:i+len(pattern)] == pattern:\n\t\t\t\tmatches.append(i)\n\t\treturn matches", "def get_gt_patterns_found(groundtruth, patterns):\n hits = [0 for g in groundtruth] # 1 if hit, 0 if miss (on gt)\n\n # For each ground_truth pattern, check if we found it with our algorithm\n for i, gt in enumerate(groundtruth):\n c1 = gt.vs[\"label\"]\n c1_edge = gt.es[\"label\"]\n\n for p in patterns:\n if len(p.es) == 0:\n continue\n c2 = p.vs[\"label\"]\n c2_edge = p.es[\"label\"]\n\n if len(c1) != len(c2) or len(c1_edge) != len(c2_edge):\n continue\n\n try:\n if gt.isomorphic_vf2(p, color1=c1, color2=c2,\n edge_color1=c1_edge, edge_color2=c2_edge):\n if(hits[i] >= 1):\n print(\"Warning: ground-truth pattern already found\")\n else:\n hits[i] = 1\n # print(\"hit:\",p)\n break\n except:\n print('Error')\n print(c1_edge)\n print(c2_edge)\n\n return (sum(hits), len(hits)) # hits, total", "def pattern_overlap(self, pattern):\n return np.sum((2.*pattern.flatten() - 1.)*self.spins)/self.nspins", "def match_segment(var, pattern, input, bindings, start=0):\n\n # If there are no words in pattern following var, we can just match var\n # to the remainder of the input.\n if not pattern:\n return match_variable(var, input, bindings)\n\n # Get the segment boundary word and look for the first occurrence in\n # the input starting from index start.\n word = pattern[0]\n try:\n pos = start + input[start:].index(word)\n except ValueError:\n # When the boundary word doesn't appear in the input, no match.\n return False\n\n # Match the located substring to the segment variable and recursively\n # pattern match using the resulting bindings.\n var_match = match_variable(var, input[:pos], dict(bindings))\n match = match_pattern(pattern, input[pos:], var_match)\n\n # If pattern matching fails with this substring, try a longer one.\n if not match:\n return match_segment(var, pattern, input, bindings, start + 1)\n \n return match", "def match_filter_patterns(self, content: str, guild_id: int) -> Optional[typing.Tuple[re.Match, str]]:\n for pattern, pattern_identifier in self._filter_cache[guild_id].items():\n if search := pattern.search(content):\n return search, pattern_identifier", "def frequent_words_t(text, k, t):\n frequent_patterns = []\n count = {}\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n count[i] = pattern_count(text, pattern)\n if count[i] >= t and pattern not in frequent_patterns:\n frequent_patterns.append(text[i:i+k])\n return frequent_patterns", "def sparsify_by_offsets(text, pattern_offsets, width, verbose = True):\n import sparse_string\n\n # !@#$ Not sparsifying text for now!!!\n return text\n\n num_offsets = 0\n sparse_text = sparse_string.SparseString(text)\n for offset_list in pattern_offsets.values():\n num_offsets += len(offset_list)\n for offset in offset_list:\n sparse_text.add_interval(offset, offset + width)\n sparse_text.sparsify()\n if verbose:\n print ' sparsify_by_offsets(text=%d,offsets=%d) occupancy = %d%%' % (len(text), num_offsets, \n int(sparse_text.get_occupancy() *100.0))\n return sparse_text", "def highlight_pattern(self, pad, pattern,\n tag, start=\"1.0\", end=\"end\", regexp=False):\n start = pad.index(start)\n end = pad.index(end)\n pad.mark_set(\"matchStart\", start)\n pad.mark_set(\"matchEnd\", start)\n pad.mark_set(\"searchLimit\", end)\n\n count = GUI.IntVar()\n while True:\n index = pad.search(pattern, \"matchEnd\", \"searchLimit\", count=count,\n regexp=regexp)\n if index == \"\":\n break\n pad.mark_set(\"matchStart\", index)\n pad.mark_set(\"matchEnd\", \"%s+%sc\" % (index, count.get()))\n pad.tag_add(tag, \"matchStart\", \"matchEnd\")", "def solve(k, text, pattern):\n # print(k, text, pattern)\n base = pow(10, 9)\n M1 = base + 7\n M2 = base + 9\n X = 263\n len_p = len(pattern)\n len_t = len(text)\n pattern1, pattern2 = pre_compute_hashes(pattern, M1, M2, X)\n text1, text2 = pre_compute_hashes(text, M1, M2, X)\n res = []\n p_hash1, p_hash2 = get_hash_value(\n pattern1, M1, X, 0, len_p), get_hash_value(pattern2, M2, X, 0, len_p)\n for i in range(len_t-len_p+1): # all possible candidates\n subs_hash1 = get_hash_value(text1, M1, X, i, len_p)\n subs_hash2 = get_hash_value(text2, M2, X, i, len_p)\n is_valid = find_num_matches(pattern1, pattern2, text1, text2, M1, M2, X, k, len_p, i)\n if is_valid:\n res.append(i)\n return res", "def topic_pattern_match(pattern):\n client = AdminClient({\"bootstrap.servers\": \"PLAINTEXT://localhost:9092\"})\n topic_metadata = client.list_topics()\n topics = topic_metadata.topics\n filtered_topics = {key: value for key, value in topics.items() if contains_substring(key, pattern)}\n return len(filtered_topics) > 0", "def same_length(words, pattern):\r\n new_list = list()\r\n for i in words:\r\n if len(pattern) == len(i):\r\n new_list.append(i)\r\n return new_list", "def find_match_DFS(self, array, pattern):\n def simulate(state_id, match, idx):\n if idx >= len(array):\n return Match(pattern=self.pattern) # no match\n state = self.states[state_id]\n for s in state.transitions:\n if s.symbol & array[idx]:\n if s.is_final:\n if array[idx+1:].count(BLACK) == 0:\n match_final = match + [s.symbol]\n match_final += [WHITE] * (len(array) - idx - 1)\n return Match(match_final, pattern=self.pattern)\n # else: its not added to the stack\n else:\n ans = simulate(s.id, match + [s.symbol], idx+1)\n if ans.is_match:\n return ans\n return Match(pattern=self.pattern) # no match\n min_length = sum(pattern) + len(pattern) -1\n self.compile(pattern) # create the state first\n\n return simulate(0, [], 0) # start recursive call", "def pfm(pattern_sequence):\n if isinstance(pattern_sequence, Pattern):\n sequences = []\n for match_sequences in pattern_sequence.matchtable_pset.match_sequences.itervalues():\n for strand, sequence in match_sequences:\n if strand == 2:\n sequences.append(revcomp(sequence))\n else:\n sequences.append(sequence)\n else:\n sequences = pattern_sequence\n\n ncol = len(sequences[0])\n matrix = {\n 'a': [0] * ncol,\n 't': [0] * ncol,\n 'c': [0] * ncol,\n 'g': [0] * ncol,\n }\n total = [0] * ncol\n\n for s in sequences:\n for i, j in enumerate(s):\n matrix.get(j)[i] += 1\n total[i] += 1\n\n # Normalization\n for i in xrange(ncol):\n matrix.get('a')[i] = float(matrix.get('a')[i]) / total[i]\n matrix.get('t')[i] = float(matrix.get('t')[i]) / total[i]\n matrix.get('c')[i] = float(matrix.get('c')[i]) / total[i]\n matrix.get('g')[i] = float(matrix.get('g')[i]) / total[i]\n\n return matrix", "def word_fts(self, word, normalize=True):\n return [self.fts(ipa, False) for ipa in self.ipa_segs(word, normalize)]", "def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)", "def match_pattern(pattern, input, bindings=None):\n\n # Check to see if matching failed before we got here.\n if bindings is False:\n return False\n \n # When the pattern and the input are identical, we have a match, and\n # no more bindings need to be found.\n if pattern == input:\n return bindings\n\n bindings = bindings or {}\n\n # Match input and pattern according to their types.\n if is_segment(pattern):\n token = pattern[0] # segment variable is the first token\n var = token[2:] # segment variable is of the form ?*x\n return match_segment(var, pattern[1:], input, bindings)\n elif is_variable(pattern):\n var = pattern[1:] # single variables are of the form ?foo\n return match_variable(var, [input], bindings)\n elif contains_tokens(pattern) and contains_tokens(input):\n # Recurse:\n # try to match the first tokens of both pattern and input. The bindings\n # that result are used to match the remainder of both lists.\n return match_pattern(pattern[1:],\n input[1:],\n match_pattern(pattern[0], input[0], bindings))\n else:\n return False", "def findmatch(xarr, farr, xp, xf, sl, sf, ws, xlimit=10, wlimit=2):\n wp = xp * 0.0 - 1\n px = xp * 0.0\n\n # calculate it using only xp and sl\n if sf is None and not ws:\n print('Currently not available')\n\n # calculate it without any wavelength solution\n elif not ws:\n pass\n\n # calculate it without any flux information\n elif sf is None and ws:\n for i in xf.argsort()[::-1]:\n cx = mcentroid(xarr, farr, xc=xp[i], xdiff=4)\n if abs(cx - xp[i]) < xlimit:\n w = wavematch(ws(cx), wp, sl)\n wp[i] = w\n\n # calculate it using all of the information\n else:\n dcoef = ws.coef * 0.0\n dcoef[0] = 10\n dcoef[1] = dcoef[1] * 0.2\n ndstep = 20\n # this matches up the spectra but only varies the first\n # two coefficients by a small amount\n nws = spectramatch(\n xarr, farr, sl, sf, ws, dcoef, ndstep=ndstep, res=2, dres=0.1)\n for i in range(len(xf)): # xf.argsort()[::-1]:\n cx = mcentroid(xarr, farr, xc=xp[i], xdiff=4)\n if abs(cx - xp[i]) < xlimit:\n w = wavematch(nws(cx), wp, sl, wlimit=wlimit)\n wp[i] = w\n px[i] = matchprob(cx, w, xf[i], xp, xf, sl, nws, dw=0.8)\n # print cx, nws.value(cx), wp[i], px[i], xp[i], xf[i]\n return wp", "def faster_frequent_words(text, k):\n frequent_patterns = []\n freq_array = compute_freq(text, k)\n max_count = max(freq_array)\n for i in range(0, len(text)-k+1):\n if freq_array[i] == max_count:\n pattern = number_to_pattern(i, k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def get_patterns_also_in_gt(groundtruth, patterns):\n hits = [0 for p in patterns] # 1 if hit, 0 if miss\n\n # For each ground_truth pattern, check if we found it with our algorithm\n for i, p in enumerate(patterns):\n if len(p.es) == 0:\n continue\n c1 = p.vs[\"label\"]\n c1_edge = p.es[\"label\"]\n\n for gt in groundtruth:\n c2 = gt.vs[\"label\"]\n c2_edge = gt.es[\"label\"]\n\n if len(c1) != len(c2) or len(c1_edge) != len(c2_edge):\n continue\n\n if gt.isomorphic_vf2(p, color1=c1, color2=c2,\n edge_color1=c1_edge, edge_color2=c2_edge):\n if(hits[i] >= 1):\n print(\"Warning: ground-truth pattern already found\")\n else:\n hits[i] = 1\n break # consider multiple instances of same pattern?\n\n return (sum(hits), len(hits)) # hits,total", "def create_masks(img_path, frame_num):\n #import the images\n key_frame = cv2.imread(img_path + \"_\" + str(frame_num) + \".png\")\n beam_mask = filter_beam(key_frame)\n key_frame = cv2.cvtColor(cv2.bitwise_and(beam_mask,key_frame), cv2.COLOR_BGR2GRAY)\n cv2.imwrite(img_path + \"_\" + str(frame_num) + \"_beamed.png\",key_frame)\n key_frame = change_contrast(key_frame, 4.0)\n\n #key_mask = cv2.imread(img_path + \"_mask_\" + str(frame_num) + \".png\",0)\n #masked_key = cv2.bitwise_and(key_frame,key_mask)\n new_frame = cv2.imread(img_path + \"_\" + str(frame_num + 1) + \".png\")\n new_frame = cv2.cvtColor(cv2.bitwise_and(beam_mask,new_frame), cv2.COLOR_BGR2GRAY)\n new_frame = change_contrast(new_frame, 4.0)\n\n #trying with a couple methods here:\n #SIFT method\n sift = cv2.SIFT_create()\n keypoints_1, descriptors_1 = sift.detectAndCompute(key_frame,None)\n keypoints_2, descriptors_2 = sift.detectAndCompute(new_frame,None)\n bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)\n\n matches = bf.match(descriptors_1,descriptors_2)\n matches = sorted(matches, key = lambda x:x.distance)\n for x in keypoints_1:\n print(x.pt)\n\n img3 = cv2.drawMatches(key_frame, keypoints_1, new_frame, keypoints_2, matches, new_frame, flags=2)\n cv2.imshow(\"matched\",img3)\n cv2.waitKey(0)\n\n #use the SIFT paradigm but do it semi-manually\n\n #active contouring method", "def matchShapes(cnt1, cnt2):\n\tret = cv2.matchShapes(cnt1, cnt2, 1, 0.0)\n\treturn ret", "def findfeatures(self):\n self.set_wdiff()\n\n #xp, wp=st.findfeatures(self.xarr, self.farr, self.slines, self.sfluxes,\n # self.ws, mdiff=self.mdiff, wdiff=self.wdiff, sigma=self.sigma, niter=self.niter, sections=3)\n xp,wp=st.crosslinematch(self.xarr, self.farr, self.slines, self.sfluxes,\n self.ws, mdiff=self.mdiff, wdiff=20, sigma=self.sigma, niter=self.niter)\n for x, w in zip(xp, wp):\n if w not in self.wp and w>-1: \n self.xp.append(x)\n self.wp.append(w)\n self.plotFeatures()\n self.redraw_canvas()", "def search(filename, pattern, replace_whitespace_char=' '):\n with open(filename, 'rb') as f_in:\n pdf_pages = pdftotext.PDF(f_in)\n pdf_str = '\\n\\n'.join(pdf_pages)\n pdf_str = re.sub('\\s+', replace_whitespace_char, pdf_str)\n matches = re.findall(pattern, pdf_str)\n if not matches:\n return\n print(\"Found {len(matches):d} matche(s)\")\n for match in matches:\n print(f\" {match}\")", "def _partial_match(pattern, reference):\n tokens = reference.replace('/', ' / ').replace('@', ' @ ').replace('#', ' # ').split()\n\n def partial_sums(iterable):\n partial = ''\n for i in iterable:\n partial += i\n yield partial\n\n return any(map(pattern.match, list(partial_sums(tokens))))", "def patterns(self) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self.fuzzy_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern.text, \"type\": \"fuzzy\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n for label, patterns in self.regex_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern, \"type\": \"regex\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n return all_patterns", "def find_charity_sentences(subdoc, factory) -> List:\n\n calculate_distances_per_pattern(subdoc, factory, merge=True, pattern_prefix='x_charity_')\n\n slices = []\n vectors = filter_values_by_key_prefix(subdoc.distances_per_pattern_dict, 'x_charity_')\n vectors_i = []\n for v in vectors:\n if max(v) > 0.6:\n vector_i, _ = improve_attention_vector(subdoc.embeddings, v, relu_th=0.6, mix=0.9)\n vectors_i.append(vector_i)\n else:\n vectors_i.append(v)\n\n x = max_exclusive_pattern(vectors_i)\n x = relu(x, 0.8)\n subdoc.distances_per_pattern_dict['$at_x_charity_'] = x\n\n dups = {}\n for i in np.nonzero(x)[0]:\n bounds = get_sentence_bounds_at_index(i, subdoc.tokens)\n\n if bounds[0] not in dups:\n sl = slice(bounds[0], bounds[1])\n sum_ = sum(x[sl])\n confidence = 'x'\n # confidence = np.mean( np.nonzero(x[sl]) )\n nonzeros_count = len(np.nonzero(x[sl])[0])\n print('nonzeros_count=', nonzeros_count)\n confidence = 0\n\n if nonzeros_count > 0:\n confidence = sum_ / nonzeros_count\n print('confidence=', confidence)\n if confidence > 0.8:\n # GLOBALS__['renderer'].render_color_text(subdoc.tokens_cc[sl],\n # subdoc.distances_per_pattern_dict['$at_x_charity_'][sl], _range=(0, 1))\n print(i, sum_)\n\n slices.append((sl, confidence, sum_))\n\n dups[bounds[0]] = True\n\n return slices", "def get_ngram_features(data, subsequences, overlapping=False):\n features = pd.DataFrame(index=data.index)\n \n for subseq in subsequences:\n if overlapping:\n features[subseq] = data.sequence.apply(find_overlapping, args=(subseq, ))\n else:\n features[subseq] = data.sequence.str.count(subseq)\n \n \n return features", "def filter_patterns(self,threshold):\n if threshold is not None:\n pass #learn threshold\n return filter(lambda pattern: pattern.score > threshold, self.patterns)", "def test__parser__grammar_sequence_nested(seg_list, caplog):\n bs = StringParser(\"bar\", KeywordSegment)\n fs = StringParser(\"foo\", KeywordSegment)\n bas = StringParser(\"baar\", KeywordSegment)\n g = Sequence(Sequence(bs, fs), bas)\n ctx = ParseContext(dialect=None)\n with caplog.at_level(logging.DEBUG, logger=\"sqlfluff.parser\"):\n # Matching the start of the list shouldn't work\n logging.info(\"#### TEST 1\")\n assert not g.match(seg_list[:2], parse_context=ctx)\n # Matching the whole list should, and the result should be flat\n logging.info(\"#### TEST 2\")\n assert g.match(seg_list, parse_context=ctx).matched_segments == (\n KeywordSegment(\"bar\", seg_list[0].pos_marker),\n seg_list[1], # This will be the whitespace segment\n KeywordSegment(\"foo\", seg_list[2].pos_marker),\n KeywordSegment(\"baar\", seg_list[3].pos_marker)\n # NB: No whitespace at the end, this shouldn't be consumed.\n )", "def _preprocessing(self, pattern):\n\n pat_len = len(pattern)\n if pat_len == 1:\n return [1]\n\n Z = [0 for _ in pattern]\n Z[0] = pat_len\n Z[1] = self._matched_len(pattern, 0, 1)\n for i in range(2, 1 + Z[1]):\n Z[i] = Z[1] - i + 1\n # Defines boundaries for z-box\n left = 0\n right = 0\n for i in range(2 + Z[1], pat_len):\n if i <= right: # z-box contains i\n k = i - left\n b = Z[k]\n a = right - i + 1\n if b < a: # b ends within existing z-box\n Z[i] = b\n else: # b ends at or after the end of the z-box, we need to do an explicit match to the right of the z-box\n Z[i] = b + self._matched_len(pattern, a, right+1)\n left = i\n right = i + Z[i] - 1\n else: # z-box does not contain i\n Z[i] = self._matched_len(pattern, 0, i)\n if Z[i] > 0:\n left = i\n right = i + Z[i] - 1\n return Z", "def apply_subpattern_rules(flags, input_tensors, output_tensors, tensor_list, tensor_map):\n matched_subpattern = None\n for rule, target_pattern in OP_SUBPATTERN_RULES.items():\n if matched_subpattern is not None:\n break\n # One rule for multiple patterns\n if isinstance(target_pattern, tuple):\n for pattern in target_pattern:\n if rule(flags, pattern, SIMPLE_MAPPING, input_tensors,\n output_tensors, tensor_list, tensor_map):\n matched_subpattern = pattern\n break\n elif rule(flags, input_tensors, output_tensors,\n tensor_list, tensor_map) and isinstance(target_pattern, OpSubPatterns):\n # One rule for one pattern\n matched_subpattern = OP_SUBPATTERN_RULES[rule]\n break\n elif not isinstance(target_pattern, OpSubPatterns):\n raise ValueError(\"Wrong Subpattern rule dictionary format: \" +\n \"SubPattern expected but received \" + str(type(target_pattern)))\n return matched_subpattern", "def complete_match(word, matrix, base_matches, word_len, row_length, column_length):\n match_candidates = (complete_line(base, neighbor, word_len, row_length, column_length)\n for base in base_matches\n for neighbor in matched_neighbors(base, word[1], matrix, row_length,\n column_length))\n\n return [match for match in match_candidates if convert_to_word(match, matrix) == word]", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def fuzzy_match_simple(pattern, instring):\n p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)\n while (p_idx != p_len) and (s_idx != s_len):\n if pattern[p_idx].lower() == instring[s_idx].lower():\n p_idx += 1\n s_idx += 1\n return p_len != 0 and s_len != 0 and p_idx == p_len", "def _apply_pattern_rules(flags, input_tensors, output_tensors, tensor_list, tensor_map):\n matched_pattern = OpPatterns.OPAQUE_PATTERN\n for rule, target_pattern in OP_PATTERN_RULES.items():\n if matched_pattern != OpPatterns.OPAQUE_PATTERN:\n break\n # One rule for multiple patterns\n if isinstance(target_pattern, tuple):\n for pattern in target_pattern:\n if rule(flags, pattern, SIMPLE_MAPPING, input_tensors,\n output_tensors, tensor_list, tensor_map):\n matched_pattern = pattern\n break\n elif rule(flags, input_tensors, output_tensors,\n tensor_list, tensor_map) and isinstance(target_pattern, OpPatterns):\n # One rule for one pattern\n matched_pattern = OP_PATTERN_RULES[rule]\n break\n elif not isinstance(target_pattern, OpPatterns):\n raise ValueError(\"Wrong Subpattern rule dictionary format: \" +\n \"Pattern expected but received \" + str(type(target_pattern)))\n return matched_pattern", "def __finite_state_matching(text, transitions, pattern_length):\n occurence_positions = []\n n = len(text)\n # start state\n q = 0\n # Run the text through the FSM\n for i in range(n):\n q = transitions[(q, text[i])]\n # The accepting state is reached when the state number is equal\n # to the pattern length\n if q == pattern_length:\n occurence_positions.append(((i+1)-pattern_length))\n return occurence_positions", "def patterns(self: TokenMatcher) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self._patterns.items():\n for pattern in patterns:\n p = {\"label\": label, \"pattern\": pattern, \"type\": self.type}\n all_patterns.append(p)\n return all_patterns", "def find_clumps(text, k, len_win, t):\n\n patterns = []\n len_text = len(text)\n for i in range(len_text - len_win + 1):\n window = text[i:i + len_win]\n freq_map = frequency_table(window, k)\n for key in freq_map.keys():\n if freq_map[key] >= t and key not in patterns:\n patterns.append(key)\n return patterns", "def get_pattern(flags: dict, input_tensors: list,\n output_tensors: list, tensor_list: list, tensor_map: dict):\n # If nothing matches, default pattern would be opaque pattern\n matched_pattern = OpPatternRecognizer._apply_pattern_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n matched_subpattern = OpPatternRecognizer.apply_subpattern_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n matched_special_op = OpPatternRecognizer.apply_spec_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n return matched_pattern, matched_subpattern, matched_special_op", "def lookup_coarse_stress_patterns_for_word(\n word: Text,\n ) -> Sequence[Sequence[CoarseStress]]:\n return [\n word.pf.coarse_stress_pattern\n for word in EnglishUtils.all_possible_forms_for(word)\n ]" ]
[ "0.55103326", "0.5505358", "0.5503776", "0.54771847", "0.5460074", "0.53849596", "0.536037", "0.5330876", "0.5322131", "0.52336895", "0.5171854", "0.51623005", "0.51623005", "0.5151723", "0.51417255", "0.510438", "0.50986654", "0.5090402", "0.5082829", "0.50758904", "0.50735044", "0.50685304", "0.506139", "0.50572777", "0.50458705", "0.50458705", "0.5037755", "0.50267947", "0.50056195", "0.5002791", "0.498685", "0.49828196", "0.49473184", "0.49447364", "0.49427205", "0.4933814", "0.49109235", "0.4885397", "0.48841622", "0.48833132", "0.48804134", "0.48750392", "0.48742136", "0.48631823", "0.48508352", "0.48390612", "0.4821843", "0.48212603", "0.48092356", "0.4802434", "0.4800816", "0.47959602", "0.47808498", "0.47696066", "0.47612953", "0.47599944", "0.47564304", "0.47564304", "0.47423166", "0.47121865", "0.4709513", "0.47082523", "0.46996674", "0.46975613", "0.46959805", "0.46879166", "0.46806368", "0.4674837", "0.46747535", "0.4670238", "0.46513283", "0.46486014", "0.46270475", "0.4627022", "0.46256962", "0.46245912", "0.4620758", "0.46186483", "0.46163595", "0.46114105", "0.46065897", "0.46030757", "0.46015775", "0.45963976", "0.45950636", "0.45933637", "0.45897043", "0.45881656", "0.4584169", "0.458121", "0.45737553", "0.45607913", "0.45512307", "0.4544434", "0.45436653", "0.4542129", "0.45379525", "0.45313945", "0.45293456", "0.452885" ]
0.6005872
0
Implements limited pattern matching. Matches just in case pattern is the same length (in segments) as the constituent and each of the segments in the pattern is a featural subset of the corresponding segment in the word.
def match_pattern_seq(self, pat, const, normalize=True): segs = [self.fts(s, normalize) for s in const] if len(pat) != len(segs): return False else: return all([s >= p for (s, p) in zip(segs, pat)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_pattern(self, pat, word, normalize=True):\n segs = self.word_fts(word, normalize)\n if len(pat) != len(segs):\n return None\n else:\n if all([s >= p for (s, p) in zip(segs, pat)]):\n return segs", "def is_segment(pattern):\n return (type(pattern) is list\n and pattern\n and len(pattern[0]) > 2\n and pattern[0][0] == '?'\n and pattern[0][1] == '*'\n and pattern[0][2] in string.ascii_letters\n and ' ' not in pattern[0])", "def match_segment(var, pattern, input, bindings, start=0):\n\n # If there are no words in pattern following var, we can just match var\n # to the remainder of the input.\n if not pattern:\n return match_variable(var, input, bindings)\n\n # Get the segment boundary word and look for the first occurrence in\n # the input starting from index start.\n word = pattern[0]\n try:\n pos = start + input[start:].index(word)\n except ValueError:\n # When the boundary word doesn't appear in the input, no match.\n return False\n\n # Match the located substring to the segment variable and recursively\n # pattern match using the resulting bindings.\n var_match = match_variable(var, input[:pos], dict(bindings))\n match = match_pattern(pattern, input[pos:], var_match)\n\n # If pattern matching fails with this substring, try a longer one.\n if not match:\n return match_segment(var, pattern, input, bindings, start + 1)\n \n return match", "def advanced_search(self, pattern):\n pass", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def partial_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + f'+{self.chars}c'\n self.text.tag_add('found', start, end)\n start = end", "def partial_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + f'+{self.chars}c'\n self.text.tag_add('found', start, end)\n start = end", "def better_matching(self, pattern, offsets=False, slice_len=5):\n first_occurencies, counts_at = self.setup_better_match(slice_len)\n pattern = list(pattern)\n top, bottom = 0, self.tl\n while top <= bottom:\n if pattern:\n current = pattern.pop()\n if counts_at(bottom+1)[current] - counts_at(top)[current] > 0:\n #if current in self.last_col[top:bottom+1]:\n top = first_occurencies[current] + counts_at(top)[current]\n bottom = first_occurencies[current] + counts_at(bottom + 1)[current] - 1\n else:\n if offsets:\n return []\n else:\n return 0\n else:\n if offsets:\n return self.offsets(top, bottom)\n else:\n return bottom - top + 1", "def _internal_match(self, pattern):\n compiled_re = re.compile(pattern)\n for word in self.words:\n if compiled_re.fullmatch(word) is not None:\n yield word", "def match(self, pattern, d=0, conserved_part=0, slice_len=5):\n first_occurencies, counts_at = self.setup_better_match(slice_len)\n pattern = list(pattern)\n top, bottom = 0, self.tl\n if d == 0:\n conserved_part = len(pattern)\n for i in range(conserved_part):\n if pattern:\n current = pattern.pop()\n if counts_at(bottom+1)[current] - counts_at(top)[current] > 0:\n #if current in self.last_col[top:bottom+1]:\n top = first_occurencies[current] + counts_at(top)[current]\n bottom = first_occurencies[current] + counts_at(bottom + 1)[current] - 1\n else:\n return []\n else:\n return self.offsets(top, bottom)\n candidates = [{ 'offset': i, 'errors': 0 } for i in xrange(top, min(bottom+1, self.tl-1))]\n while candidates and pattern:\n dellist = []\n current = pattern.pop()\n for ndx, candidate in enumerate(candidates):\n letter = self.last_col[candidate['offset']]\n if letter != current:\n candidate['errors'] += 1\n if candidate['errors'] > d or (letter == '$' and len(pattern)):\n dellist.append(ndx)\n continue\n candidate['offset'] = first_occurencies[letter] + counts_at(candidate['offset'])[letter]\n candidates = [c for (i,c) in enumerate(candidates) if i not in dellist]\n return self.offsets(starts=[c['offset'] for c in candidates])", "def __naive_matching(pattern, text):\n matched_positions = []\n n = len(text)\n m = len(pattern)\n # Last possible pattern starting position in the text is n - m\n for pos in range((n - m) + 1):\n if pattern == text[pos:(pos+m)]:\n matched_positions.append(pos)\n return matched_positions", "def _partial_match(pattern, reference):\n tokens = reference.replace('/', ' / ').replace('@', ' @ ').replace('#', ' # ').split()\n\n def partial_sums(iterable):\n partial = ''\n for i in iterable:\n partial += i\n yield partial\n\n return any(map(pattern.match, list(partial_sums(tokens))))", "def all_segs_matching_fts(self, ft_mask):\n matching_segs = [ipa for (ipa, fts) in self.segments if fts >= ft_mask]\n return sorted(matching_segs, key=lambda x: len(x), reverse=True)", "def highlight_pattern(self, pad, pattern,\n tag, start=\"1.0\", end=\"end\", regexp=False):\n start = pad.index(start)\n end = pad.index(end)\n pad.mark_set(\"matchStart\", start)\n pad.mark_set(\"matchEnd\", start)\n pad.mark_set(\"searchLimit\", end)\n\n count = GUI.IntVar()\n while True:\n index = pad.search(pattern, \"matchEnd\", \"searchLimit\", count=count,\n regexp=regexp)\n if index == \"\":\n break\n pad.mark_set(\"matchStart\", index)\n pad.mark_set(\"matchEnd\", \"%s+%sc\" % (index, count.get()))\n pad.tag_add(tag, \"matchStart\", \"matchEnd\")", "def DiscoverPatterns(parameters, graph):\n patternCount = 0\n # get initial one-edge patterns\n parentPatternList = GetInitialPatterns(graph, parameters.temporal)\n if DEBUGFLAG:\n print(\"Initial patterns (\" + str(len(parentPatternList)) + \"):\")\n for pattern in parentPatternList:\n pattern.print_pattern(' ')\n discoveredPatternList = []\n while ((patternCount < parameters.limit) and parentPatternList):\n print(str(parameters.limit - patternCount) + \" patterns left\")\n childPatternList = []\n # extend each pattern in parent list (***** todo: in parallel)\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if ((len(parentPattern.instances) > 1) and (patternCount < parameters.limit)):\n patternCount += 1\n extendedPatternList = Pattern.ExtendPattern(parentPattern, parameters.temporal)\n while (extendedPatternList):\n extendedPattern = extendedPatternList.pop(0)\n if DEBUGFLAG:\n print(\"Extended Pattern:\")\n extendedPattern.print_pattern(' ')\n if (len(extendedPattern.definition.edges) <= parameters.maxSize):\n # evaluate each extension and add to child list\n extendedPattern.evaluate(graph)\n if ((not parameters.prune) or (extendedPattern.value >= parentPattern.value)):\n Pattern.PatternListInsert(extendedPattern, childPatternList, parameters.beamWidth, parameters.valueBased)\n # add parent pattern to final discovered list\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n parentPatternList = childPatternList\n # insert any remaining patterns in parent list on to discovered list\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n return discoveredPatternList", "def interval_pattern(s1, s2, pattern : str):\n string = tools.get_interval_string(s1,s2)\n return re.fullmatch(pattern,string)", "def findall(pattern, string, overlapping=True, sensitive=True, regexp=False):\n if regexp:\n return SE.occurrences_re(pattern, string)\n if overlapping:\n return SE.occurrences(pattern, string, sensitive)\n else:\n return SE.full_words(pattern, string, sensitive)", "def all_matches(self, pattern):\n\n pat_len = len(pattern)\n if pat_len > self.text_len:\n raise ValueError(\"Pattern length is bigger than text\")\n\n bad_char_table = self._bad_char_table(pattern)\n L = self._good_suffix_table_one(pattern)\n H = self._good_suffix_table_two(pattern)\n indexes = []\n\n pat_end_ind = pat_len - 1\n prev_end = -1\n # prev_end - previous index of pattern end relative to text (for Galil's rule)\n # p - index of char in pattern\n # t - index of char in text\n while pat_end_ind < self.text_len:\n p = pat_len - 1\n t = pat_end_ind\n while p >= 0 and t > prev_end and pattern[p] == self.text[t]:\n p -= 1\n t -= 1\n if p == -1 or t == prev_end: # Matched or holds Galil's rule\n indexes.append(pat_end_ind - pat_len + 1)\n pat_end_ind += pat_len - H[1] if pat_len > 1 else 1\n else:\n char_shift = self._bad_char_shift(self.text[t], bad_char_table)\n if p+1 == pat_len:\n suffix_shift = 1\n elif L[p] == -1:\n suffix_shift = pat_len - H[p+1]\n else:\n suffix_shift = pat_len - L[p]\n shift = max(char_shift, suffix_shift)\n prev_end = pat_end_ind if shift >= p+1 else prev_end #update parameter for Galil's rule\n pat_end_ind += shift\n\n return indexes", "def find_pattern(pattern, text):\r\n result = []\r\n pattern_len = len(pattern)\r\n d_pattern_len = 2 * pattern_len\r\n if pattern_len > len(text):\r\n return []\r\n\r\n new_string = pattern + \"$\" + text\r\n prefix_function_res = prefix_function(new_string)\r\n return [x - d_pattern_len for (x, y) in filter(lambda x: x[1] == pattern_len, enumerate(prefix_function_res))]", "def pattern_overlap(self, pattern):\n return np.sum((2.*pattern.flatten() - 1.)*self.spins)/self.nspins", "def gen_matches(self, subseq, startpos):\n \n raise TypeError, \"PatternBase is an abstract base class\"", "def get_pattern(topic):\n variants = get_variants(topic)\n sub_patterns = [r'(.*\\b)%s\\b(.*)' % variant.lower() for variant in variants]\n return re.compile(r'|'.join(sub_patterns), flags=re.IGNORECASE)", "def has_substring(pattern, text):\n M = len(pattern)\n N = len(text)\n\n # create the LPS\n lps = [0] * M\n j = 0\n\n compute_lsp(pattern, M, lps)\n\n i = 0\n final_index = 0\n\n while (N - i) >= (M - j):\n if pattern[j] == text[i]:\n i += 1\n j += 1\n if j == M:\n # on Last index\n final_index = i - j\n j = lps[j - 1]\n\n elif i < N and pattern[j] != text[i]:\n\n if j != 0:\n j = lps[j - 1]\n else:\n i += 1\n\n return final_index", "def add_substring_match_features(self,lst1,lst2,normalize_factor = 1.):\n for i in range(self.RE_LOW,self.RE_HI+1):\n for j in range(self.WILD_LOW,self.WILD_HI+1):\n self.features.append(Measures.longest_substring_with_wildcards(lst1, lst2, i, j) / normalize_factor)", "def select_strong_subjective_patterns(self):\n self.ss_patterns = {}\n for pattern in self.learned_patterns.keys():\n freq = self.learned_patterns[pattern]['freq']\n prob = self.learned_patterns[pattern]['prob']\n if freq >= self.t1_threshold and prob >= self.t2_threshold: \n self.ss_patterns[pattern] = self.learned_patterns[pattern]\n # delete some patterns with low frequency and probability for efficiency\n elif freq > 5 and freq < ((self.t1_threshold*3) / 4):\n \tdel(self.learned_patterns[pattern])\n \n sorted_ss = sorted(self.ss_patterns.iteritems(),key=lambda x: x[1]['prob'], reverse=True)\n self.sorted_ss_patterns = sorted_ss \n for (s,v) in sorted_ss:\n title = (Tcolors.OKGREEN+s+Tcolors.ENDC+\" \").ljust(70,'-') \n pbs = (str)(v['freq'])+\"/\" + Tcolors.CYAN + (str)(v['prob']) + Tcolors.ENDC\n if self.debug: print title + \"------------> \" + pbs\n if self.debug: print\n if len(sorted_ss) > self.pl_threshold:\n \tself.t1_threshold += 1", "def __check_constraints(self, constraints, token_pos, sent_num, pattern, sent, sentences, textsnippets, tokens):\n pos = 0\n more_words_flag = False\n if token_pos[0] == token_pos[1]:\n pos = token_pos[0]\n else:\n more_words_flag = True\n\n for add_info in constraints:\n # find pattern that matches target word\n index = add_info[2]\n found_constraint_flag = True\n if more_words_flag:\n constraint = add_info[0].split()\n i = 0\n while found_constraint_flag and i < len(pattern) and i < len(constraint):\n if check_pattern(pattern[i], constraint[i]):\n pass\n else:\n found_constraint_flag = False\n break\n i += 1\n\n if found_constraint_flag or check_pattern(pattern, add_info[0]):\n # set token_pos depending if index is positive or negative\n if more_words_flag and index > 0:\n pos = token_pos[1]\n elif more_words_flag and index < 0:\n pos = token_pos[0]\n\n if self.__sentence_mode:\n if (0 <= pos + index < len(tokens)) and check_pattern(add_info[1], tokens[pos + index]):\n self.__get_sentence_window_help(pos, sent_num, sentences, textsnippets)\n else:\n while index != 0:\n if index > 0:\n index -= 1\n else:\n index += 1\n if (0 < pos + index < len(tokens)) and check_pattern(add_info[1], tokens[pos + index]):\n self.__get_sentence_window_help(pos, sent_num, sentences, textsnippets)\n break\n else:\n if (0 <= pos + index < len(tokens)) and check_pattern(add_info[1], tokens[pos + index]):\n self.__get_word_window_help(token_pos, textsnippets, len(tokens), tokens)\n else:\n while index != 0:\n if index > 0:\n index -= 1\n else:\n index += 1\n if (0 < pos + index < len(tokens)) and check_pattern(add_info[1], tokens[pos + index]):\n self.__get_word_window_help(token_pos, textsnippets, sent, tokens)\n break", "def exact_find_in_pattern(fixed_text, cur=0, patterns=PATTERNS):\n return [x for x in patterns if (cur + len(x['find']) <= len(fixed_text))\n and x['find'] == fixed_text[cur:(cur + len(x['find']))]]", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def indapproxpattern(pattern, string, nummismatch):\n\n indarr = []\n# substringarr = []\n numchars = len(pattern)\n\n for i in xrange(0, len(string) - numchars + 1):\n \n substring = patterncount.subtext(string, i, numchars)\n \n if hammingdist(pattern, substring) <= nummismatch:\n \n indarr.append(i)\n# substringarr.append(substring)\n \n return indarr", "def __get_word_window_more_words_help(self, split_pattern, tokens, constraints):\n textsnippets = []\n textlength = len(tokens)\n for ind, token in enumerate(tokens):\n p_index = 0\n end_index = ind\n while p_index < len(split_pattern):\n if check_pattern(split_pattern[p_index], tokens[end_index]):\n p_index += 1\n end_index += 1\n else:\n break\n if p_index == len(split_pattern):\n if constraints is not None:\n self.__check_constraints(constraints, (ind, end_index - 1), ind, split_pattern, None, None, textsnippets, tokens)\n else:\n self.__get_word_window_help((ind, end_index - 1), textsnippets, textlength, tokens)\n return textsnippets", "def search(self, pattern):\n raise NotImplementedError()", "def word_and_pattern (word,pattern):\r\n for i in range(len(pattern)):\r\n if pattern[i]!= '_' and pattern.count(pattern[i]) != word.count(pattern[i]):\r\n return False\r\n return True", "def check(word):\n for i in range(1, target_length):\n if word[0:i] in fragments and word[i:] in fragments:\n print(\"%s + %s => %s\" % (word[0:i], word[i:], word))", "def find_pattern(self, pattern: str) -> bool:\n\n if not pattern and self.is_word:\n return True\n\n node = self\n for i, char in enumerate(pattern):\n if char == \".\":\n res = []\n for each in node.children.values():\n res.append(each.find_pattern(pattern[i + 1:]))\n return any(res)\n\n if char in node.children:\n node = node.children[char]\n else:\n return False\n\n return node.is_word", "def filter_patterns(self,threshold):\n if threshold is not None:\n pass #learn threshold\n return filter(lambda pattern: pattern.score > threshold, self.patterns)", "def frequent_words_with_mismatches(text, k, d):\n\n patterns = []\n freq_map = {}\n n = len(text)\n for i in range(n - k + 1):\n pattern = text[i:i + k]\n pattern_rc = reverse_complement(pattern)\n neighborhood = neighbors(pattern, d) + neighbors(pattern_rc, d)\n for j in range(len(neighborhood)):\n neighbor = neighborhood[j]\n if neighbor not in freq_map.keys():\n freq_map[neighbor] = 1\n else:\n freq_map[neighbor] = freq_map[neighbor] + 1\n m = max_map(freq_map)\n for key in freq_map.keys():\n if freq_map[key] == m:\n patterns.append(key)\n return patterns", "def lookup_stress_patterns_for_word(word: Text) -> Sequence[Sequence[Stress]]:\n return [\n word.pf.stress_pattern for word in EnglishUtils.all_possible_forms_for(word)\n ]", "def ApproxStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tindex = []\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\t\t\tindex.append(i)\n\treturn index", "def add_substring_match_features(self,lst1,lst2,normalize_factor = 1.):\n for j in range(self.WILD_LOW,self.WILD_HI+1):\n self.features.append(Measures.longest_substring_with_wildcards(lst1, lst2, j) / normalize_factor)", "def word_pattern():\n return Pattern._nonkey_words() + (Pattern._var() + Pattern._unkey_words()).many() + Pattern._var().possibly()", "def whole_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + ' wordend'\n # whole word includes a space before\n found = self.text.get(start + '-1c', end)\n if found == ' ' + self.term:\n self.text.tag_add('found', start, end)\n start = end", "def whole_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + ' wordend'\n # whole word includes a space before\n found = self.text.get(start + '-1c', end)\n if found == ' ' + self.term:\n self.text.tag_add('found', start, end)\n start = end", "def find(pattern, string, start=0, overlapping=True, sensitive=True, regexp=False):\n if regexp:\n return SE.next_occurrence_re(pattern, string, start)\n if not overlapping: # whole words\n return SE.next_word(pattern, string, start, sensitive)\n else:\n return SE.next_occurrence(pattern, string, start, sensitive)", "def find_substring(pattern, target):\n # Eliminate trivial cases.\n n = len(target)\n m = len(pattern)\n if (not n or not m or m > n):\n return False\n #\n # Search by comparing hashes.\n pattern_hash = hash(pattern)\n for string_start in range(n - m + 1):\n string_end = string_start + m\n if pattern_hash == hash(target[string_start:string_end]):\n return True\n return False", "def search_string(self, pattern):\n if not pattern:\n return True\n\n letter = pattern[0]\n if letter not in self.edges:\n return False\n\n edge = self.edges[letter]\n\n pattern_chunk = pattern[:edge.length]\n edge_chunk = self.string[edge.edge_start:edge.edge_end][:len(pattern)]\n\n if pattern_chunk == edge_chunk:\n if len(pattern) >= edge.length:\n return edge.search_string(pattern[edge.length:])\n return True\n return False", "def __get_word_window_one_word_help(self, pattern, tokens, constraints):\n textsnippets = []\n textlength = len(tokens)\n for ind, token in enumerate(tokens):\n if check_pattern(pattern, token):\n if constraints is not None:\n self.__check_constraints(constraints, (ind, ind), ind, pattern, None, None, textsnippets, tokens)\n else:\n self.__get_word_window_help((ind, ind), textsnippets, textlength, tokens)\n return textsnippets", "def frequent_words(text, k):\n frequent_patterns = []\n count = {}\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n count[i] = pattern_count(text, pattern)\n max_count = max(count.values()) if count else 0\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n if count[i] == max_count and pattern not in frequent_patterns:\n frequent_patterns.append(text[i:i+k])\n return frequent_patterns", "def match(self):\r\n results = []\r\n pattern = self.pattern\r\n text = self.text\r\n m = len(self.pattern)\r\n n = len(self.text)\r\n p = self._prefix\r\n k = 0\r\n for i in range(n):\r\n while k > 0 and text[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == text[i]:\r\n k = k+1\r\n if k == m:\r\n results.append(i-m+1)\r\n k = p[k-1]\r\n return results", "def kbest_matches(self, k=1, minlen=2, buffer=0):\n ki = 0\n while k is None or ki < k:\n idx = None\n lcm = None\n while idx is None:\n idx = np.unravel_index(np.argmax(self._wp, axis=None), self._wp.shape)\n if idx[0] == 0 or idx[1] == 0:\n return None\n r, c = idx\n lcm = LCMatch(self, r, c)\n for (x, y) in lcm.path:\n x += 1\n y += 1\n if len(self._wp.mask.shape) > 0 and self._wp.mask[x, y] is True: # True means invalid\n # print('found path contains masked, restart')\n lcm = None\n idx = None\n break\n else:\n self._wp[x, y] = ma.masked\n if len(lcm.path) < minlen:\n # print('found path too short, restart')\n lcm = None\n idx = None\n if buffer > 0 and lcm is not None:\n miny, maxy = 0, self._wp.shape[1] - 1\n minx, maxx = 0, self._wp.shape[0] - 1\n for (x, y) in lcm.path:\n xx = x + 1\n for yy in range(max(miny, y + 1 - buffer), min(maxy, y + 1 + buffer)):\n self._wp[xx, yy] = ma.masked\n yy = y + 1\n for xx in range(max(minx, x + 1 - buffer), min(maxx, x + 1 + buffer)):\n self._wp[xx, yy] = ma.masked\n if lcm is not None:\n ki += 1\n yield lcm", "def partialSetMatchAnnotated(self, annotatedMention):\n aWords = annotatedMention.importantWords()\n dWords = self.importantWords()\n \n if dWords.intersection(aWords) == dWords:\n # this mention is a subset of the annotated mention\n if dWords == aWords:\n return True # exact match\n if len(annotatedMention.shortSets) > 0:\n # annotated mention has short sections, try to if one is included\n # in the detected mention\n for ss in annotatedMention.shortSets:\n if ss.intersection(dWords) == ss:\n # detected mention contains all of the words in a short section\n return True\n \n return False", "def topic_pattern_match(pattern):\n client = AdminClient({\"bootstrap.servers\": \"PLAINTEXT://localhost:9092\"})\n topic_metadata = client.list_topics()\n topics = topic_metadata.topics\n filtered_topics = {key: value for key, value in topics.items() if contains_substring(key, pattern)}\n return len(filtered_topics) > 0", "def word_filter(trie, pattern):\n\n def inner(trie, pattern, stack='', result=set()):\n\n if pattern == '':\n if trie.value is not None:\n result.add((stack, trie.value))\n\n return result\n\n if pattern[0] == '*':\n result |= inner(trie, pattern[1:], stack, result)\n\n for key, child in trie.children.items():\n\n if pattern[0] == key or pattern[0] == '?':\n result |= inner(child, pattern[1:], stack+key, result)\n elif pattern[0] == '*':\n result |= inner(child, pattern, stack+key, result)\n\n return result\n\n result = inner(trie, pattern)\n\n return list(result)", "def frequent_words_t(text, k, t):\n frequent_patterns = []\n count = {}\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n count[i] = pattern_count(text, pattern)\n if count[i] >= t and pattern not in frequent_patterns:\n frequent_patterns.append(text[i:i+k])\n return frequent_patterns", "def search (text, pattern):\n\tfor i in xrange(len(text)-len(pattern)+1):\n\t\tfound = True\n\t\tfor j in xrange(len(pattern)):\n\t\t\tif text[i+j] != pattern[j]:\n\t\t\t\tfound = False\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tprint 'Pattern found at index:', i\n\treturn", "def get_fixed_length_feat(self, feat, num_segment, start_pos, end_pos):\n nfeats = feat[:,:].shape[0]\n if nfeats <= self.num_segments:\n stride = 1\n else:\n stride = nfeats * 1.0 / num_segment\n if self.split != \"train\":\n spos = 0\n else:\n random_end = -0.5 + stride\n if random_end == np.floor(random_end):\n random_end = random_end - 1.0\n spos = np.random.random_integers(0,random_end)\n s = np.round( np.arange(spos, nfeats-0.5, stride) ).astype(int)\n start_pos = float(nfeats-1.0) * start_pos\n end_pos = float(nfeats-1.0) * end_pos\n\n if not (nfeats < self.num_segments and len(s) == nfeats) \\\n and not (nfeats >= self.num_segments and len(s) == num_segment):\n s = s[:num_segment] # ignore last one\n assert (nfeats < self.num_segments and len(s) == nfeats) \\\n or (nfeats >= self.num_segments and len(s) == num_segment), \\\n \"{} != {} or {} != {}\".format(len(s), nfeats, len(s), num_segment)\n\n start_index, end_index = None, None\n for i in range(len(s)-1):\n if s[i] <= end_pos < s[i+1]:\n end_index = i\n if s[i] <= start_pos < s[i+1]:\n start_index = i\n\n if start_index is None:\n start_index = 0\n if end_index is None:\n end_index = num_segment-1\n\n cur_feat = feat[s, :]\n nfeats = min(nfeats, num_segment)\n out = np.zeros((num_segment, cur_feat.shape[1]))\n out [:nfeats,:] = cur_feat\n return out, nfeats, start_index, end_index", "def better_clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n first_subtext = text[:L]\n freq_array = compute_freq(first_subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for i in range(1, len(text) - L + 1):\n old_kmer = text[i - 1:i - 1 + k]\n old_kmer_number = pattern_to_number(old_kmer)\n freq_array[old_kmer_number] -= 1\n new_kmer = text[i + L:i + L + k]\n new_kmer_number = pattern_to_number(new_kmer)\n freq_array[new_kmer_number] += 1\n if freq_array[new_kmer_number] >= t:\n clumps[new_kmer_number] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern) \n return frequent_patterns", "def section_decision(indexer, sentence, offset_list, end_matches):\n if indexer == 0:\n # First section:\n mask_str = sentence[:offset_list[indexer]]\n\n elif indexer == (len(end_matches) - 1):\n # The last section:\n mask_str = sentence[offset_list[indexer - 1]:]\n\n else:\n # Any middle sections:\n mask_str = sentence[offset_list[indexer - 1]:offset_list[indexer]]\n\n return mask_str", "def find_pattern(pattern, text):\r\n pat_text = pattern + '$' + text\r\n\r\n prefixes = [None] * len(pat_text)\r\n\r\n prefixes[0] = 0\r\n border = 0\r\n matches = []\r\n for idx, letter in enumerate(pat_text[1:], start=1):\r\n while border > 0 and letter != pat_text[border]:\r\n border = prefixes[border - 1]\r\n if letter == pat_text[border]:\r\n border = border + 1\r\n else:\r\n border = 0\r\n\r\n if border == len(pattern):\r\n matches.append(idx - len(pattern) - border)\r\n prefixes[idx] = border\r\n\r\n return matches", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def matches_rule(word):\n return re.search(pattern, word)", "def __init__(self, pattern):\r\n self.pattern = pattern", "def calculate(signal_patterns, digits) -> int:\n\n def update_candidates():\n # Remove all known answers from all candidates\n for i in range(len(candidates)):\n for v in answers:\n try:\n candidates[i].remove(v)\n except:\n pass\n\n helper = Helper()\n answers = [None] * 10\n\n # Set a list of candidate answers for each unknown digit\n # Start with all 10 signal_patterns and eliminate all the ones whose length doesn't match the digit\n candidates = [list(filter(lambda x: len(x) == helper.num_of_segs(i), signal_patterns)) for i in range(10)]\n update_candidates()\n\n for _ in range(100):\n unknowns = [k for k, v in enumerate(answers) if v is None]\n knowns = [k for k, v in enumerate(answers) if v is not None]\n if len(unknowns) == 0:\n break\n # For every digit we still need to solve, compare it to the digits we already know the answers to\n # See if the unknown digit is a \"subset\" of the known digit(s), and if it is, then the segments should also be a subset.\n # Vice versa if the unknown digit is a \"superset\" of the known digit(s), then similarly the segments should be as well.\n for unknown_n in unknowns:\n for known_n in knowns:\n if helper.x_in_y(known_n, unknown_n):\n # Then we expect segments in answers[known_n] to be a subset of segments in unknown_n\n # Remove candidates from unknown_n where that isn't the case\n candidates[unknown_n] = list(filter(lambda el: helper.seg_x_in_y(answers[known_n], el), candidates[unknown_n]))\n if helper.x_in_y(unknown_n, known_n):\n # Then we expect candidates in unknown_n to be a subset of segments in answers[known_n]\n # Remove candidates from unknown_n where that isn't the case\n candidates[unknown_n] = list(filter(lambda el: helper.seg_x_in_y(el, answers[known_n]), candidates[unknown_n]))\n\n # If candidates for unknown_n is only one element, set the answer. If zero, then raise exception.\n if len(candidates[unknown_n]) == 0:\n raise Exception(f\"There are no candidate answers for {unknown_n}\")\n elif len(candidates[unknown_n]) == 1:\n answers[unknown_n] = candidates[unknown_n][0]\n update_candidates()\n\n answers = [\"\".join(sorted(x)) for x in answers]\n digits = [\"\".join(sorted(x)) for x in digits]\n\n return int(answers.index(digits[0])) * 1000 + \\\n int(answers.index(digits[1])) * 100 + \\\n int(answers.index(digits[2])) * 10 + \\\n int(answers.index(digits[3]))", "def fuzzy_match_simple(pattern, instring):\n p_idx, s_idx, p_len, s_len = 0, 0, len(pattern), len(instring)\n while (p_idx != p_len) and (s_idx != s_len):\n if pattern[p_idx].lower() == instring[s_idx].lower():\n p_idx += 1\n s_idx += 1\n return p_len != 0 and s_len != 0 and p_idx == p_len", "def iter_band_search(self, pattern):\n raise NotImplementedError()", "def test_syntax_converter_expand_search_patterns_alone(self):\n spi_search = \"find t bob sam\"\n inv_search = \"title:bob and title:sam\"\n self._compare_searches(inv_search, spi_search)", "def lookup_coarse_stress_patterns_for_word(\n word: Text,\n ) -> Sequence[Sequence[CoarseStress]]:\n return [\n word.pf.coarse_stress_pattern\n for word in EnglishUtils.all_possible_forms_for(word)\n ]", "def find_matching_segments(self):\n hyp_matched_segs = [TIntervalGroup() for i in range(len(self.hyp))]\n for gid_ref, match_ref in enumerate(self.ref):\n bg_ref = match_ref.bbox_group\n max_gid, max_area = -1, 0\n for gid_hyp, bg_hyp in enumerate(self.hyp.get_bbox_groups()):\n rx, ry = bg_ref.page_range(), bg_hyp.page_range()\n if ry[0] > rx[1]:\n break\n area = (bg_ref & bg_hyp)\n if area > max_area:\n max_gid, max_area = gid_hyp, area\n if max_gid != -1:\n hyp_matched_segs[max_gid].extend(match_ref.tinterval_group.copy())\n print('%d -> %d' % (gid_ref, max_gid))\n for seg in hyp_matched_segs:\n seg.reduce()\n return hyp_matched_segs", "def __get_sentence_window_more_words(self, split_pattern, sentences, constraints):\n textsnippets = []\n for ind, sent in enumerate(sentences):\n tokens = self.tokenizer.tokenize(sent)\n p_index = 0\n begin_index = ind\n end_index = ind\n while p_index < len(split_pattern):\n if (end_index < len(tokens)) and check_pattern(split_pattern[p_index], tokens[end_index]):\n p_index += 1\n end_index += 1\n else:\n break\n if p_index == len(split_pattern):\n # search for constraints in sentence\n if constraints is not None:\n self.__check_constraints(constraints, (begin_index, end_index), ind, split_pattern, sent, sentences,\n textsnippets, tokens)\n else:\n # TODO end_index nicht genau genug für word_window\n self.__get_sentence_window_help(end_index, ind, sentences, textsnippets)\n return textsnippets", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def subStringMatchExact(target,key,length):\r\n index = find(target,key)\r\n if index < 0:\r\n return ()\r\n else:\r\n matches = subStringMatchExact(target[index+len(key):len(target)],key,length)\r\n index += (length - len(target))\r\n matches += (index,)\r\n print matches\r\n return matches", "def Like(text, pattern):\n return fnmatch.fnmatch(text, pattern)", "def plaintext_simple_search(pattern, plaintext_data, concordancing=False, **kwargs):\n import re\n result = []\n if isinstance(pattern, STRINGTYPE):\n pattern = [pattern]\n for p in pattern:\n if concordancing:\n pat = r'(.{0,140})\\b(' + re.escape(p) + r')\\b(.{0,140})'\n pat = compiler(pat)\n if pat == 'Bad query':\n return 'Bad query'\n matches = re.findall(pat, plaintext_data)\n if concordancing:\n matches = [list(m) for m in matches]\n for i in matches:\n result.append(i)\n else: \n for m in range(len(matches)):\n result.append(p)\n return result", "def match(self, pattern):\n result = Words()\n result.words = self._internal_match(pattern)\n return result", "def isMatch(self, s: str, p: str) -> bool:\n def is_match(self, text, pattern):\n if not pattern:\n return not text\n\n first_match = bool(text) and pattern[0] in {text[0], '.'}\n\n if len(pattern) >= 2 and pattern[1] == '*':\n return (self.isMatch(text, pattern[2:]) or\n first_match and self.isMatch(text[1:], pattern))\n else:\n return first_match and self.isMatch(text[1:], pattern[1:])\n\n def isMatch(self, text, pattern):\n memo = {}\n\n def dp(i, j):\n if (i, j) not in memo:\n if j == len(pattern):\n ans = i == len(text)\n else:\n first_match = i < len(text) and pattern[j] in {text[i], '.'}\n if j + 1 < len(pattern) and pattern[j + 1] == '*':\n ans = dp(i, j + 2) or first_match and dp(i + 1, j)\n else:\n ans = first_match and dp(i + 1, j + 1)\n\n memo[i, j] = ans\n return memo[i, j]\n\n return dp(0, 0)", "def str_search_two(pattern, text):\n N, M = len(text), len(pattern)\n i, j = 0, 0 \n while i < N and j < M:\n if text[i] == pattern[j]:\n j += 1\n else:\n i -= j\n j = 0 \n i += 1\n if j == M:\n return i - M\n else:\n return N", "def find_clumps(text, k, len_win, t):\n\n patterns = []\n len_text = len(text)\n for i in range(len_text - len_win + 1):\n window = text[i:i + len_win]\n freq_map = frequency_table(window, k)\n for key in freq_map.keys():\n if freq_map[key] >= t and key not in patterns:\n patterns.append(key)\n return patterns", "def mine_patterns(self, threshold):\n if self.tree_has_single_path(self.root):\n return self.generate_pattern_list()\n else:\n return self.zip_patterns(self.mine_sub_trees(threshold))", "def test_general_subset_invalid_space():\n pass", "def _MatchPatternLines(self, in_stream, re_pattern, num_lines=None):\n num_read = 0\n while True:\n line = in_stream.readline()\n if not line:\n return None\n num_read += 1\n m = re_pattern.match(line)\n if m is not None:\n return m\n if num_lines is not None and num_read >= num_lines:\n return None", "def relate_pattern(self, other, pattern): # -> bool:\n ...", "def _whole_word_mask(input_tokens, mlm_probability=0.15):\n\n cand_indexes = []\n for (i, token) in enumerate(input_tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n\n if len(cand_indexes) >= 1 and token.startswith(\"##\"):\n cand_indexes[-1].append(i)\n\n else:\n cand_indexes.append([i])\n\n random.shuffle(cand_indexes)\n num_to_predict = max(1, int(len(input_tokens) * mlm_probability))\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n masked_lms.append(index)\n\n assert len(covered_indexes) == len(masked_lms)\n mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]\n return mask_labels", "def __get_sentence_window_one_word(self, pattern, sentences, constraints):\n textsnippets = []\n for ind, sent in enumerate(sentences):\n tokens = self.tokenizer.tokenize(sent)\n for i, token in enumerate(tokens):\n if check_pattern(pattern, token):\n if constraints is not None:\n self.__check_constraints(constraints, (i, i), ind, pattern, sent, sentences, textsnippets, tokens)\n else:\n self.__get_sentence_window_help(i, ind, sentences, textsnippets)\n return textsnippets", "def programmer_subsequence(s):\n\n # Base case. Remove all instances where programmer repeats\n s = s.replace('programmerprogrammer', '')\n\n template = {\n 'p': 0,\n 'r': 0,\n 'o': 0,\n 'g': 0,\n 'a': 0,\n 'm': 0,\n 'e': 0\n }\n\n matches = [] # Tuple of start and ends\n i= 0\n P_LENGTH = 9 # Length of the word 'programmer'\n\n k = P_LENGTH # Temp\n while i + k < len(s):\n # Make substring\n sub_s = s[i:i + k]\n\n copy_t = deepcopy(template)\n # Check to see if all chars are contained within string\n for j in range(len(sub_s)):\n if sub_s[j] in ['r', 'm']:\n copy_t[sub_s[j]] += 1 if copy_t[sub_s[j]] < 2 else copy_t[sub_s[j]]\n elif sub_s[j] in copy_t:\n copy_t[sub_s[j]] = 1 \n\n # If we have all values, then inceremnt count and check the next P_LENGTH string\n if sum(copy_t.values()) == P_LENGTH:\n matches.append((i, k))\n i += P_LENGTH\n j = P_LENGTH # Reset J\n else:\n k += 1\n\n return len(matches)", "def solve(k, text, pattern):\n # print(k, text, pattern)\n base = pow(10, 9)\n M1 = base + 7\n M2 = base + 9\n X = 263\n len_p = len(pattern)\n len_t = len(text)\n pattern1, pattern2 = pre_compute_hashes(pattern, M1, M2, X)\n text1, text2 = pre_compute_hashes(text, M1, M2, X)\n res = []\n p_hash1, p_hash2 = get_hash_value(\n pattern1, M1, X, 0, len_p), get_hash_value(pattern2, M2, X, 0, len_p)\n for i in range(len_t-len_p+1): # all possible candidates\n subs_hash1 = get_hash_value(text1, M1, X, i, len_p)\n subs_hash2 = get_hash_value(text2, M2, X, i, len_p)\n is_valid = find_num_matches(pattern1, pattern2, text1, text2, M1, M2, X, k, len_p, i)\n if is_valid:\n res.append(i)\n return res", "def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)", "def distance_between_pattern_and_strings(pattern, dna):\n\n k = len(pattern)\n distance = 0\n\n for text in dna:\n hamming_distance = 1000000\n for i in range(len(text) - k + 1):\n if hamming_distance > compute_hamming_distance(pattern, text[i:i + k]):\n hamming_distance = compute_hamming_distance(pattern, text[i:i + k])\n distance = distance + hamming_distance\n return distance", "def get_word_window(self, pattern, tokens, constraints):\n split_pattern = pattern.split()\n if len(split_pattern) > 1:\n textsnippets = self.__get_word_window_more_words_help(split_pattern, tokens, constraints)\n else:\n textsnippets = self.__get_word_window_one_word_help(pattern, tokens, constraints)\n print(textsnippets)\n return textsnippets", "def set_split(self):\n #Regular expressions; try 1 first, then 2, etc.\n rex1 = re.compile('F?LD')\n rex2 = re.compile('[LF]?LQ')\n \n #For regular expression, check if there is a match that is >10 AA from the end\n if re.search(rex1, self.sequence) and len(re.split(rex1, self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex1.finditer(self.sequence)][-1]\n# end += 16 #TODO why +15/16?\n elif re.search(rex2, self.sequence) and len(re.split(rex2,self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex2.finditer(self.sequence)][-1]\n# end += 15\n else:\n self.split_index = -1\n self.core = self.sequence\n self.leader = ''\n return\n self.split_index = end\n self.leader = self.sequence[:end]\n self.core = self.sequence[end:]", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def matches(self, feature):\n pass", "def same_length(words, pattern):\r\n new_list = list()\r\n for i in words:\r\n if len(pattern) == len(i):\r\n new_list.append(i)\r\n return new_list", "def plaintext_regex_search(pattern, plaintext_data, concordancing=False, **kwargs):\n import re\n if concordancing:\n pattern = r'(.{,140})\\b(' + pattern + r')\\b(.{,140})'\n compiled_pattern = compiler(pattern)\n if compiled_pattern == 'Bad query':\n return 'Bad query'\n matches = re.findall(compiled_pattern, plaintext_data)\n if concordancing:\n matches = [list(m) for m in matches]\n if not concordancing:\n for index, i in enumerate(matches):\n if isinstance(i, tuple):\n matches[index] = i[0]\n if countmode:\n return len(matches)\n else:\n return matches", "def _matchCPattern(cPattern, node):\n if isinstance(cPattern, str):\n return _matchFeatureConstraints(dPattern=cPattern, node=node)\n # Match Root\n if _matchFeatureConstraints(dPattern=cPattern[0], node=node):\n if _matchCPatternChildren(cPattern[1], node.leftChild) and\\\n _matchCPatternChildren(cPattern[2], node.rightChild):\n return True\n return False", "def _preprocessing(self, pattern):\n\n pat_len = len(pattern)\n if pat_len == 1:\n return [1]\n\n Z = [0 for _ in pattern]\n Z[0] = pat_len\n Z[1] = self._matched_len(pattern, 0, 1)\n for i in range(2, 1 + Z[1]):\n Z[i] = Z[1] - i + 1\n # Defines boundaries for z-box\n left = 0\n right = 0\n for i in range(2 + Z[1], pat_len):\n if i <= right: # z-box contains i\n k = i - left\n b = Z[k]\n a = right - i + 1\n if b < a: # b ends within existing z-box\n Z[i] = b\n else: # b ends at or after the end of the z-box, we need to do an explicit match to the right of the z-box\n Z[i] = b + self._matched_len(pattern, a, right+1)\n left = i\n right = i + Z[i] - 1\n else: # z-box does not contain i\n Z[i] = self._matched_len(pattern, 0, i)\n if Z[i] > 0:\n left = i\n right = i + Z[i] - 1\n return Z", "def __checkForPattern(self):\n if self._keyCode in self._patterns:\n assert(self.notify.debug(\"Pattern Match: \" + self._keyCode))\n messenger.send(KeyCodes.PATTERN_MATCH_EVENT, [self._keyCode])\n self.reset()\n \n # If the key code is longer than the longest pattern possible,\n # Then reset! \n elif self._keyCodeCount == self._patternLimit or len(self.getPossibleMatchesList()) == 0:\n assert(self.notify.debug(\"No pattern match!\"))\n messenger.send(KeyCodes.PATTERN_NO_MATCH_EVENT)\n self.reset()", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def faster_frequent_words(text, k):\n frequent_patterns = []\n freq_array = compute_freq(text, k)\n max_count = max(freq_array)\n for i in range(0, len(text)-k+1):\n if freq_array[i] == max_count:\n pattern = number_to_pattern(i, k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def _line_fits_pattern(self, logline):\n for (fieldname, pattern) in self._excludepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return False\n if len(self._includepatterns) == 0:\n return True # no includepatterns means 'accept everything'\n for (fieldname, pattern) in self._includepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return True\n return False", "def label_segments(segs, truths, detected):\n for seg in segs:\n for truth in truths:\n if time_overlap(seg, truth): \n seg[\"label\"] = truth[\"label\"]\n for det in detected:\n if time_overlap(seg, det):\n if det[\"label\"] == truth[\"label\"]:\n seg[\"match\"] = True\n else:\n seg[\"match\"] = False\n return segs" ]
[ "0.6140303", "0.5622849", "0.56000507", "0.5588581", "0.5576052", "0.5576052", "0.5569588", "0.5569588", "0.55573744", "0.55385894", "0.55361515", "0.5330484", "0.5278464", "0.5258368", "0.5252977", "0.5252737", "0.5247074", "0.5231641", "0.52287656", "0.52200884", "0.5199797", "0.51691073", "0.5160198", "0.51392853", "0.50973576", "0.50874853", "0.5085479", "0.5082638", "0.5074397", "0.50696343", "0.505011", "0.50392354", "0.5037699", "0.50359243", "0.5020668", "0.50045294", "0.50024307", "0.49993363", "0.49953207", "0.49761665", "0.49586985", "0.49546978", "0.49546978", "0.49521202", "0.49510098", "0.49464446", "0.4945528", "0.49394125", "0.49390838", "0.49349347", "0.4931354", "0.49255282", "0.49211022", "0.4916137", "0.49096733", "0.49001464", "0.4893102", "0.48685923", "0.48554242", "0.48410934", "0.48400626", "0.48362783", "0.48225072", "0.48066807", "0.4789205", "0.47880977", "0.47854665", "0.47835502", "0.47816968", "0.4779934", "0.47774628", "0.47716588", "0.4770025", "0.47690707", "0.47674337", "0.4767166", "0.47665864", "0.476309", "0.47629097", "0.47602078", "0.47534347", "0.47500587", "0.47435924", "0.47397026", "0.47267407", "0.47216776", "0.47214404", "0.47207534", "0.4718362", "0.47127363", "0.47115844", "0.4709837", "0.47069195", "0.47056296", "0.4697802", "0.46943116", "0.4693063", "0.46929067", "0.4690559", "0.46881634" ]
0.54910386
11
Return segments matching a feature mask, a dict of features
def all_segs_matching_fts(self, ft_mask): matching_segs = [ipa for (ipa, fts) in self.segments if fts >= ft_mask] return sorted(matching_segs, key=lambda x: len(x), reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def filter_segmap(segimage, id_keep, output, blur_kernel=\"\", threshold=0.1):\n seg = pyfits.getdata(segimage)\n mask = np.zeros(seg.shape, 'int')\n # Loop through all IDs... is there a better way??\n for x in id_keep:\n mask = np.where(seg==x, 1, mask)\n seg_masked = np.where(mask==1, 1, 0)\n if os.path.exists(output):\n os.system('rm %s' % output)\n # Now convolve with a blurring kernel if desired\n if len(blur_kernel):\n mask = blur_mask(mask, blur_kernel, threshold=threshold)\n # k = pyfits.getdata(blur_kernel)\n # mask = hconvolve.hconvolve(mask, )\n pyfits.append(output, data=seg_masked, header=pyfits.getheader(segimage))\n return mask", "def get_features_from_segment_raw(seg_raw_df, feature_func_dict):\n # parse input\n if type(feature_func_dict) == str: # it's a json filename\n import json\n feature_func_str = open(feature_func_dict).read()\n feature_func_dict = json.loads(feature_func_str)\n print \"===========start computing features=================\"\n print \"===========feature function dictionary==============\"\n print feature_func_dict\n grouped = seg_raw_df.groupby(s_info.segment_col)\n # parse feature function dictionary\n result = {}\n for feature_name in feature_func_dict:\n print \"==========compute \" + feature_name + \"================\"\n feature = feature_func_dict[feature_name]\n if len(feature['paras']) == 0: # no parameter need to be set, easiest case\n # find out the function\n func_name = feature['handler']\n if hasattr(np, func_name):\n func = getattr(np, func_name)\n elif hasattr(sp_stats, func_name):\n func = getattr(sp_stats, func_name)\n elif hasattr(s_feature, func_name):\n func = getattr(s_feature, func_name)\n else:\n func = func_name\n # prepare columns\n temp = grouped[feature['apply']].aggregate(func)\n result[feature_name] = temp\n else: # has parameters, will compute column one by one\n paras = feature['paras']\n print paras\n # find out the function\n func_name = feature['handler']\n if hasattr(s_feature, func_name):\n func = getattr(s_feature, func_name)\n elif hasattr(np, func_name):\n func = getattr(np, func_name)\n else:\n print func_name + \" can't be found, ignore this feature\"\n continue\n # iterate over columns\n temp = {}\n c = 0\n for col in feature['apply']:\n if paras.has_key('with'): # need another column\n paras['another'] = grouped[paras['with'][c]].copy(True)\n temp[col] = grouped[col].aggregate(func, paras)\n c += 1\n # construct DataFrame\n result[feature_name] = pd.DataFrame(temp)\n print \"Inf values: %s\" % np.any(np.isinf(result[feature_name]))\n print \"NaN values: %s\" % np.any(np.isnan(result[feature_name]))\n feature_raw_df = pd.concat(result, axis=1)\n # feature_raw_df = feature_raw_df.reset_index(drop=True)\n return feature_raw_df", "def fts_intersection(self, segs, normalize=True):\n return reduce(lambda a, b: a & b,\n [self.fts(s, normalize) for s in self.filter_segs(segs, normalize)])", "def _get_segments(img, mean_scale=1000, num_samples=16, return_enough_segments=False):\n # randomly choose the segmentation scale\n scale = np.random.uniform(0.5*mean_scale, 1.5*mean_scale)\n # run heuristic segmentation\n segments = skimage.segmentation.felzenszwalb(img, scale=scale,\n min_size=int(scale))\n # sample a set of segmentations to use; bias toward larger ones\n max_segment = segments.max()\n indices = np.arange(max_segment+1)\n seg_count = np.array([(segments == i).sum()+1 for i in indices])\n p = seg_count/seg_count.sum()\n # try this for error correction?\n if num_samples <= max_segment:\n sampled_indices = np.random.choice(indices, p=p, size=num_samples,\n replace=False)\n else:\n warnings.warn(\"not enough unique segments; sampling WITH replacement\")\n sampled_indices = np.random.choice(indices, size=num_samples, replace=True)\n # build normalized segment occupancy masks for each segment we choose\n seg_tensor = np.stack([(segments == i)/seg_count[i] for i in sampled_indices],\n -1).astype(np.float32)\n\n if return_enough_segments:\n enough_segs = num_samples <= max_segment\n return seg_tensor, enough_segs\n return seg_tensor", "def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def extract_features(self, clip):\n #sr, clip_array = wav_read(io.BytesIO(clip.data))\n sr = 16000\n # clip_decoded = base64.decodestring(clip.data)\n # clip_array = np.frombuffer(clip_decoded, dtype=np.float16)\n clip_array = np.array(clip.data)\n if clip_array.ndim > 1:\n clip_array = clip_array[:, 0]\n segments = frame_breaker.get_frames(clip_array, sample_rate=sr)\n segments_encoded = [self.np2base64(s, sr) for s in segments]\n segment_features = [\n [f.feature_value for f in self.extract_feats_for_segment(s).features]\n for s in segments_encoded\n ]\n return segment_features", "def get_regions_mask(self, input):", "def features_from_labels(audio_file, segments):\n segments_features = []\n #for each segment\n for segment in segments:\n features = features_from_label(audio_file, segment)\n #and append it to the list\n segments_features.append(features)\n return segments_features", "def label_segments(segs, truths, detected):\n for seg in segs:\n for truth in truths:\n if time_overlap(seg, truth): \n seg[\"label\"] = truth[\"label\"]\n for det in detected:\n if time_overlap(seg, det):\n if det[\"label\"] == truth[\"label\"]:\n seg[\"match\"] = True\n else:\n seg[\"match\"] = False\n return segs", "def get_segment_colour_map(self, features):\n\n hashList = {'1' : 'Grey',\n '2':'Red',\n '3':'Green',\n '4':'greenyellow',\n '5':'Pink',\n '6':'Orange',\n '7':'goldenrod',\n '8':'indianred',\n '9':'peachpuff',\n '10':'deepskyblue',\n '11':'firebrick',\n '12':'orchid',\n '13': 'moccasin',\n '14':'slateblue',\n '15':'turquoise',\n '16':'tomato',\n '17':'darkmagenta',\n '18':'olivedrab'}\n return hashList", "def get_features(feature_list, these_feature):\n features = {}\n def feat_filter(feature, this):\n try:\n mapper = lambda x, feat: filter(lambda y: feat in y, x.split(\" \"))[0]\n val = mapper(this, feature)\n if '+' in val:\n return TRUE\n return FALSE\n except:\n return UNDEF\n for feat in feature_list:\n features[feat] = feat_filter(feat, these_feature)\n return features", "def select_features(features_list, data):\n\n '''Initalize arrays'''\n f_index = []\n f_dic = {}\n t_index = []\n t_dic = {}\n\n '''\n double loop over both coloum headings and config\n -> very ugly, if time improve\n '''\n for x in range(0, len(data[1].columns.names)):\n for y in range(0, len(features_list)):\n\n if data[1].columns.names[x] == features_list[y]\\\n and features_list[y][:1] != '!' \\\n and features_list[y][:1] != '#':\n f_index = np.append(f_index, x)\n f_dic.update({features_list[y]: x})\n\n if features_list[y][:1] == '!' \\\n and features_list[y][1:] == data[1].columns.names[x]:\n t_index = np.append(t_index, x)\n t_dic.update({features_list[y][1:]: x})\n\n return f_index.astype(int), f_dic, t_index.astype(int), t_dic", "def filter_segs(self, segs, normalize=True):\n return list(filter(lambda seg: self.seg_known(seg, normalize), segs))", "def parse_mask(mask):\n mask_dict = dict()\n for i, val in enumerate(mask):\n mask_dict[i] = val\n\n return mask_dict", "def find_intersection_mask(self, seg_tags, inter_mask='MASK_INTER'):\n wcs = self.images['MUSE_WHITE'].wcs\n yc, xc = wcs.sky2pix((self.DEC, self.RA), unit=u.deg)[0]\n maps = {}\n for tag in seg_tags:\n if tag[0:4] == 'SEG_':\n maps[tag[4:]] = self.images[tag].data.data\n else:\n maps[tag] = self.images[tag].data.data\n\n r = findCentralDetection(maps, yc, xc, tolerance=3)\n self.images[inter_mask] = Image(\n wcs=wcs, dtype=np.uint8, copy=False,\n data=intersection(list(r['seg'].values()))\n )", "def get_contour_features(mask,selectcell=\"centered\"):\r\n \r\n #binarize image (everything above 0 becomes 1)\r\n mask = np.clip(mask,a_min=0,a_max=1)\r\n\r\n #for contours, dont use RETR_TREE, but RETR_EXTERNAL as we are not interested in internal objects\r\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contours = list(contours)\r\n \r\n #in case there is no contour found, add a dummy contour\r\n if len(contours)==0:\r\n contours = [np.array([[[0, 0]],[[0, 1]],[[1, 1]],[[1, 0]]])] #generate a dummy contour\r\n\r\n #Sort contours, longest first\r\n contours.sort(key=len,reverse=True)\r\n contours = [c for c in contours if len(c)>4] #proper contour should have at least 5 points\r\n hulls = [cv2.convexHull(contour,returnPoints=True) for contour in contours]\r\n\r\n mu_origs = [cv2.moments(contour) for contour in contours]\r\n mu_hulls = [cv2.moments(hull) for hull in hulls]\r\n\r\n area_origs = [mu_orig[\"m00\"] for mu_orig in mu_origs]\r\n area_hulls = [mu_hull[\"m00\"] for mu_hull in mu_hulls]\r\n\r\n #drop events where area is zero\r\n hulls = [hulls[i] for i in range(len(hulls)) if area_origs[i]>0] \r\n contours = [contours[i] for i in range(len(contours)) if area_origs[i]>0]\r\n mu_origs = [mu_origs[i] for i in range(len(mu_origs)) if area_origs[i]>0]\r\n mu_hulls = [mu_hulls[i] for i in range(len(mu_hulls)) if area_origs[i]>0]\r\n area_hulls = [area_hulls[i] for i in range(len(area_hulls)) if area_origs[i]>0]\r\n area_origs = [area_origs[i] for i in range(len(area_origs)) if area_origs[i]>0]\r\n \r\n \r\n pos_x = [int(mu_orig['m10']/mu_orig['m00']) for mu_orig in mu_origs]\r\n pos_y = [int(mu_orig['m01']/mu_orig['m00']) for mu_orig in mu_origs]\r\n\r\n \r\n if selectcell == \"smooth\":\r\n #compute the area ratio (roughness of contour)\r\n area_ratio = np.array(area_hulls)/np.array(area_origs)\r\n #get the contour with minimum roughness (smooth contour)\r\n sorter = np.argsort(area_ratio) #smallest first\r\n\r\n if selectcell == \"centered\":\r\n #select contour that is closest to the center of the image. \r\n #In iPAC, cells are usually in the center.\r\n mid_x,mid_y = mask.shape[0]/2,mask.shape[1]/2 #middle of the image\r\n BB = [cv2.boundingRect(c) for c in contours] #get a bounding box around the object\r\n distances = [np.sqrt((mid_x-bb[0])**2 + (mid_y-bb[1])**2) for bb in BB]\r\n sorter = np.argsort(distances) #smallest first\r\n \r\n #sort values with respect to chosen metric (area_ratio or distance)\r\n contours = [contours[s] for s in sorter]\r\n hulls = [hulls[s] for s in sorter]\r\n pos_x = [pos_x[s] for s in sorter]\r\n pos_y = [pos_y[s] for s in sorter]\r\n mu_origs = [mu_origs[s] for s in sorter]\r\n area_origs = [area_origs[s] for s in sorter]\r\n area_hulls = [area_hulls[s] for s in sorter]\r\n \r\n # draw mask of the chosen contour\r\n mask = np.zeros_like(mask)\r\n cv2.drawContours(mask,contours,0,1,cv2.FILLED)# produce a contour that is filled inside\r\n\r\n hull = hulls[0]#[0:n_contours]\r\n pos_x = pos_x[0]\r\n pos_y = pos_y[0] \r\n mu_orig = mu_origs[0]#[0:n_contours]\r\n area_orig = area_origs[0]#[0:n_contours]\r\n area_hull = area_hulls[0]#[0:n_contours]\r\n \r\n if area_orig>0:\r\n area_ratio = area_hull/area_orig\r\n else:\r\n area_ratio = np.nan\r\n\r\n arc = cv2.arcLength(hull, True) \r\n circularity = 2.0 * np.sqrt(np.pi * mu_orig[\"m00\"]) / arc\r\n\r\n\r\n dic = {\"mask\":mask,\"pos_x\":pos_x,\"pos_y\":pos_y,\"area_orig\":area_orig,\"area_hull\":area_hull,\\\r\n \"area_ratio\":area_ratio,\"circularity\":circularity}\r\n return dic", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def _series_merging_map(self, map_list, feature_option=\"sift\"):\n print(\" --- Start ---\")\n # Transform state into 3 specified values\n for i in range(len(map_list)):\n map_list[i] = cv2.cvtColor(map_list[i], cv2.COLOR_RGB2GRAY)\n map_list[i] = MF._transform_state(map_list[i])\n \n\n map_ref = map_list[0]\n for i in range(len(map_list)-1):\n map_align = map_list[i+1]\n\n \n if feature_option == \"orb\":\n orb = cv2.ORB_create()\n key_points_1, descriptor_1 = orb.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = orb.detectAndCompute(map_align, None)\n \n elif feature_option == \"surf\":\n surf = cv2.xfeatures2d.SURF_create(400)\n key_points_1, descriptor_1 = surf.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = surf.detectAndCompute(map_align, None)\n else:\n siftDetector = cv2.xfeatures2d.SIFT_create()\n key_points_1, descriptor_1 = siftDetector.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = siftDetector.detectAndCompute(map_align, None)\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(descriptor_1, descriptor_2, k=2)\n\n good = []\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good.append(m)\n \n pts_1, pts_2 = [], []\n for i in good:\n query_idx = i.queryIdx\n train_idx = i.trainIdx\n\n pts_1.append([\n key_points_1[query_idx].pt[0],\n key_points_1[query_idx].pt[1],\n ])\n pts_2.append([\n key_points_2[train_idx].pt[0],\n key_points_2[train_idx].pt[1],\n ])\n \n pts1 = np.array(pts_1)\n pts2 = np.array(pts_2)\n\n # relation, value, _ = RMM._ransac_find_rotation_translation(pts_set_1=pts2, pts_set_2=pts1, sigma=0.5, max_iter=5000)\n # print(\"- Inlier Percent: %f\"%value)\n # # Because the coordinates between the maps and the SIFT features are different:\n # # SIFT Features: Right: +x, Down: +y\n # # Maps: Down: +x, Right: +y\n # # Hence the dx and dy should be changed.\n # dx = relation[1]\n # dy = relation[0]\n # dyaw = relation[2]\n # print(\"- (x, y, t): (%f, %f, %f)\"%(dx,dy,dyaw))\n\n # # index, agr, dis = RMM._similarity_index(x=[dy, dx, dyaw], map1=map_ref, map2=map_align)\n # # print(\"Similarity Index: %f\\nAgree Number: %f\\nDisargee Number: %f\"%(index, agr, dis))\n # index, agr, dis, _ = RMM._similarity_index_2(x=[dx, dy, dyaw], map1=map_ref, map2=map_align)\n # print(\"- Similarity Index: %f\\n- Agree Number: %f\\n- Disargee Number: %f\"%(index, agr, dis))\n \n # map_merged = MF._merging_map(dx=dx, dy=dy, dtheta=dyaw, map1=map_ref, map2=map_align)\n # map_ref = map_merged.astype(np.uint8)\n # map_ref = MF._modify_map_size(merged_map=map_ref)\n\n relation, value, _ = RANSAC_Map_Merging()._ransac_find_all(pts_set_1=pts2, pts_set_2=pts1, sigma=5, max_iter=2000)\n dx = relation[1]\n dy = relation[0]\n dyaw = relation[2]\n dr = relation[3]\n print(\"- Inlier Percent: %f\"%value)\n print(\"- (dx, dy, dyaw, dr) = %f, %f, %f, %f\"%(dx,dy,dyaw, dr))\n map_merged = MAP_Function()._merging_map_ratio(dx=dx, dy=dy, dtheta=dyaw, dr=dr, map1=map_ref, map2=map_align)\n map_ref = map_merged.astype(np.uint8)\n map_ref = MF._modify_map_size(merged_map=map_ref)\n\n # return map_ref, (dx, dy, dyaw)\n return map_ref, (dx, dy, dyaw, dr)", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def extract_data_for_mask_loss_from_matches(\n proposals_targets: Iterable[Instances], estimated_segm: torch.Tensor\n) -> DataForMaskLoss:\n data = DataForMaskLoss()\n masks_gt = []\n offset = 0\n assert estimated_segm.shape[2] == estimated_segm.shape[3], (\n f\"Expected estimated segmentation to have a square shape, \"\n f\"but the actual shape is {estimated_segm.shape[2:]}\"\n )\n mask_size = estimated_segm.shape[2]\n num_proposals = sum(inst.proposal_boxes.tensor.size(0) for inst in proposals_targets)\n num_estimated = estimated_segm.shape[0]\n assert (\n num_proposals == num_estimated\n ), \"The number of proposals {} must be equal to the number of estimates {}\".format(\n num_proposals, num_estimated\n )\n\n for proposals_targets_per_image in proposals_targets:\n n_i = proposals_targets_per_image.proposal_boxes.tensor.size(0)\n if not n_i:\n continue\n gt_masks_per_image = proposals_targets_per_image.gt_masks.crop_and_resize(\n proposals_targets_per_image.proposal_boxes.tensor, mask_size\n ).to(device=estimated_segm.device)\n masks_gt.append(gt_masks_per_image)\n offset += n_i\n if masks_gt:\n data.masks_est = estimated_segm\n data.masks_gt = torch.cat(masks_gt, dim=0)\n return data", "def encode_segmap(self, mask):\n for voidc in self.void_labels:\n mask[mask == voidc] = self.ignore_index\n for validc in self.valid_labels:\n mask[mask == validc] = self.class_map[validc]\n # remove extra idxs from updated dataset\n mask[mask > 33] = self.ignore_index\n return mask", "def Get_Label_Features(mask_in, feature_dict, convert_length = 0.2204315, eps_factor = 0.025, area_thresh = 2): \n\n nfeatures = len(feature_dict)\n Image_Features = pd.DataFrame({'Type':[],'Feature_Area':[], 'x':[], 'y':[]})\n Contours_List = []\n\n # Expand mask into one-hot mask if input is flat\n if len(mask_in.shape)==2:\n mask_in = Expand_Mask(mask_in, feature_dict = feature_dict)\n \n # Loop through mask layers (i.e., feature types) and calculate contours \n for i in range(nfeatures):\n Contours_List.append(list()) \n for ii in feature_dict.keys():\n nii = int(ii)\n mask = mask_in[:,:,nii] \n mask = 255*mask.round().astype('uint8')\n mask = np.stack((mask,mask, mask),-1)\n mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY);\n ret, thresh = cv2.threshold(mask, 127.5, 255, cv2.THRESH_BINARY)\n contours,hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n for cnt in contours:\n # arcLength args: Contours, flag of whether curve is closed or not\n epsilon = eps_factor*cv2.arcLength(cnt,True)\n # approxPolyDP args: Contours, epsilon for wiggliness, closed shape or not\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n # Extract area and convert to square meters \n area = convert_length**2 * cv2.contourArea(approx)\n if area > area_thresh: ## Filter small features / noise\n ## Compute centroid from moments\n M = cv2.moments(cnt)\n cx = int(M['m10']/(1e-5 + M['m00']))*convert_length\n cy = int(M['m01']/(1e-5 + M['m00']))*convert_length\n Image_Features = Image_Features.append({'Type':feature_dict[ii], 'Feature_Area':area, \n 'x':cx, 'y':cy}, ignore_index = True)\n Contours_List[nii].append(cnt)\n return Contours_List, Image_Features.copy()", "def matching_function_segment(self, idx):\n start = self.matching_function_startpoint(idx)\n end = self.matching_function_endpoint(idx)\n return [start, end]", "def _idx_pair_to_mask(beg_indices, end_indices, inputs, tgt_len, num_predict):\n non_func_mask = tf.not_equal(inputs, FLAGS.eos_id)\n all_indices = tf.where(\n non_func_mask,\n tf.range(tgt_len, dtype=tf.int64),\n tf.constant(-1, shape=[tgt_len], dtype=tf.int64))\n candidate_matrix = tf.cast(\n tf.logical_and(\n all_indices[None, :] >= beg_indices[:, None],\n all_indices[None, :] < end_indices[:, None]),\n tf.float32)\n cumsum_matrix = tf.reshape(\n tf.cumsum(tf.reshape(candidate_matrix, [-1])),\n [-1, tgt_len])\n masked_matrix = (tf.cast(cumsum_matrix <= num_predict, tf.float32)\n * candidate_matrix)\n target_mask = tf.reduce_sum(masked_matrix, axis=0)\n is_masked = tf.cast(target_mask, tf.bool)\n\n segment_range = tf.cast(tf.range(1, tf.shape(candidate_matrix)[0] + 1),\n dtype=candidate_matrix.dtype)\n segment_matrix = segment_range[:, None] * candidate_matrix\n segment_ids = tf.reduce_sum(segment_matrix * masked_matrix, axis=0)\n segment_ids = tf.cast(segment_ids, dtype=inputs.dtype)\n\n pos_mat = tf.cumsum(candidate_matrix, axis=1, exclusive=True)\n pos_seq = tf.reduce_sum(pos_mat * masked_matrix, axis=0)\n\n return is_masked, segment_ids, pos_seq", "def find_feature(self, pattern):\n idxs = []\n for idx, header in enumerate(self.headers):\n header = header.decode('utf-8')\n lp = len(pattern)\n\n # Find continuations\n if header == pattern:\n idxs.append(idx)\n elif header[:lp] == pattern and header[lp] in [str(i) for i in range(0, 10)]:\n idxs.append(idx)\n\n return idxs", "def encode_segmap(self, mask):\n mask = mask.astype(int)\n label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)\n for ii, label in enumerate(self.get_pascal_labels()):\n label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii\n label_mask = label_mask.astype(int)\n return label_mask", "def ipa_segs(self, word, normalize=True):\n if normalize:\n word = FeatureTable.normalize(word)\n return self._segs(word, include_invalid=False, normalize=normalize)", "def pairwise_algorithm(segments):\n \n segment_pairs = [(x, y) for x in segments for y in segments if x[\"name\"] < y[\"name\"]]\n \n # key is a segment name, value is a set of those features that are contrastive\n # for that segment\n contrastive_features = defaultdict(set)\n \n for x, y in segment_pairs:\n assert x.keys() == y.keys()\n contrastive_feature = None\n for k, v in x.items():\n if k != \"name\" and v != y[k]:\n if contrastive_feature is None:\n contrastive_feature = k\n else:\n contrastive_feature = None\n break\n if contrastive_feature:\n contrastive_features[x[\"name\"]].add(contrastive_feature)\n contrastive_features[y[\"name\"]].add(contrastive_feature)\n \n return contrastive_features", "def select_regions(binary,f,min=0,nbest=100000):\n if binary.max() == 1:\n labels,_ = label(binary)\n else:\n labels = binary.astype(uint8)\n objects = find_objects(labels)\n scores = [f(o) for o in objects]\n best = argsort(scores)\n keep = zeros(len(objects)+1,'i')\n if nbest > 0:\n for i in best[-nbest:]:\n if scores[i]<=min: continue\n keep[i+1] = 1\n # print scores,best[-nbest:],keep\n # print sorted(list(set(labels.ravel())))\n # print sorted(list(set(keep[labels].ravel())))\n return keep[labels]", "def map_vert_to_class(self, mask_seg):\n mask_seg[mask_seg==self.vertebra_range[0]] = 1\n mask_seg[mask_seg==self.vertebra_range[1]] = 2\n mask_seg[mask_seg==self.vertebra_range[2]] = 3\n mask_seg[mask_seg==self.vertebra_range[3]] = 4\n mask_seg[mask_seg==self.vertebra_range[4]] = 5\n vert_values=np.arange(1,6)\n found_vert = np.in1d(mask_seg, vert_values)\n found_vert = np.reshape(found_vert, mask_seg.shape)\n mask_seg[found_vert==False] = 0\n return mask_seg", "def get_spatial_features(idx, topk, database_dict):\n\n features = database_dict['features_database'][idx]\n segments = database_dict['segments_database'][idx]\n\n if len(features) < 7:\n return []\n\n seg_tdists, seg_tdist_ids = get_segment_MTD(segments, topk)\n pooled_softmax_features = np.zeros((len(features), np.shape(features)[1]))\n\n # For each segment, pool features from related segments\n for c in range(len(segments)):\n dist = seg_tdists[c]\n ind = seg_tdist_ids[c]\n exp_dists = np.exp(-0.1*dist)\n exp_dists /= np.sum(exp_dists)\n\n for nn_idx in range(min(topk, len(ind))):\n f_vec = features[ind[nn_idx]]\n pooled_softmax_features[c] += exp_dists[nn_idx]*f_vec\n\n return pooled_softmax_features", "def slicing(features, seeds_features, seeds_label, label_map, adjacency,\n sigma=1., resize_shape=(480, 854)):\n label_map_flatten = np.reshape(label_map, [-1])\n num_seeds = np.max(label_map)+1\n # Label_map_one_hot [num_pixels, num_seeds_current]\n label_map_one_hot = np.zeros((label_map_flatten.shape[0], num_seeds), dtype=np.int16)\n label_map_one_hot[np.arange(label_map_flatten.shape[0]), label_map_flatten] = 1\n # weight_idx: [num_pixels, num_seeds_cur_prev_following_frame]\n # Only neighbouring seeds have weights > 0\n weight_idx = np.matmul(label_map_one_hot, adjacency)\n feature_dim = features.shape[2]\n\n # This implementation is not very efficient\n # It computes pairwise distance between all pixels and all seeds (from 3 frames)\n # dist: [num_pixels, num_seeds_cur_prev_following_frame]\n dist = euclidean_distances(np.reshape(features, [-1, feature_dim]), seeds_features)\n weight = np.exp(-dist*dist/sigma/sigma)\n weight *= weight_idx\n fg_votes = np.max(weight*np.expand_dims(seeds_label==1, 0), axis=1)\n bg_votes = np.max(weight*np.expand_dims(seeds_label==0, 0), axis=1)\n height = features.shape[0]\n width = features.shape[1]\n fg_votes = fg_votes.reshape((height, width))+1e-8\n bg_votes = bg_votes.reshape((height, width))+1e-8\n fg_votes = cv2.resize(fg_votes, (resize_shape[1], resize_shape[0]),\n interpolation=cv2.INTER_LINEAR)\n bg_votes = cv2.resize(bg_votes, (resize_shape[1], resize_shape[0]),\n interpolation=cv2.INTER_LINEAR)\n\n prob = np.stack([bg_votes, fg_votes], axis=2)\n dist_vis = utils.get_heatmap(np.concatenate([fg_votes, bg_votes], axis=0))\n prob = prob/np.sum(prob, axis=2, keepdims=True)\n\n return prob, dist_vis", "def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask", "def get_segments(input_path):\n with open(input_path, 'r') as segments_file:\n segments = []\n for line in segments_file:\n words = line.split('\\t')\n sg_dict = {}\n sg_dict['start'] = float(words[0].replace(',', '.'))\n sg_dict['end'] = float(words[1].replace(',', '.'))\n sg_dict['class'] = words[2][:-1]\n segments.append(sg_dict)\n return segments", "def postprocess_segments(self):\n # make segs a list of mask arrays, it's easier to store\n # as there is a hdf5 equivalent\n for iseg, seg in enumerate(self.segs):\n mask = np.zeros(self.X.shape[0], dtype=bool)\n mask[seg] = True\n self.segs[iseg] = mask\n # convert to arrays\n self.segs = np.array(self.segs)\n self.segs_tips = np.array(self.segs_tips)", "def _extract_sample(self, features, masks, imin, imax, shapev, needslabels=False, one_hot=True):\n\n # prepare containers\n tempdata = np.zeros([len(features)] + self.w, dtype=np.float32)\n featuredata = [f.squeeze() for f in features]\n templabels = []\n\n # accumulate mean and std for normalization\n if self.whiten and not self.whiten_subvolumes:\n numvoxs = [\n np.prod([s if g is None else g for g, s in zip(self.presize_for_normalization, f.squeeze().shape)]) for\n f in featuredata]\n means = [np.sum(f) * 1.0 / n for f, n in zip(featuredata, numvoxs)]\n stddevs = [np.sqrt(np.abs(np.mean((featuredata[i] - means[i]) ** 2))) for i in range(len(featuredata))]\n\n if np.sum(self.deform) + np.sum(self.rotation) + np.sum(self.scaling) + np.sum(\n self.shift) == 0 and not self.interpolate_always: # No deformation/scaling/rotation\n # infer the valid part of subvolume in both source and target\n ranges = np.zeros((len(imin), 2), dtype=np.int32)\n ranges[:, 1] = 1\n ranges[:len(self.w), 1] = self.w\n imin = np.int32(imin)\n imax = np.int32(imax)\n for i in range(len(imin)):\n if imin[i] < 0:\n ranges[i, 0] -= imin[i]\n imin[i] -= imin[i]\n if imax[i] >= shapev[i]:\n ranges[i, 1] -= ((imax[i] - shapev[i]))\n imax[i] -= ((imax[i] - shapev[i]))\n # now index accordingly:\n targetindex = tuple([slice(None)] + [slice(np.int32(r[0]), np.int32(r[1])) for r in ranges])\n sourcesindex = tuple([slice(np.int32(mi), np.int32(ma)) for mi, ma in zip(imin, imax)])\n tempdata[targetindex] = np.asarray([f[sourcesindex] for f in featuredata])\n\n if len(masks):\n templabels = np.zeros(self.w, dtype=np.uint8)\n templabels[targetindex[1:]] = np.asarray([f.squeeze()[sourcesindex] for f in masks])\n if one_hot and not self.regression:\n templabels = self._one_hot_vectorize(templabels, self.nclasses, zero_out_label=self.zero_out_label)\n\n\n else: # we need to interpolate\n coords = np.float64(np.mgrid[[slice(np.int32(imi), np.int32(ima)) for imi, ima in zip(imin, imax)]])\n # coords = np.mgrid[imin[0]:imax[0],imin[1]:imax[1],imin[2]:imax[2]]\n coords = self.transformAffine(coords)\n if np.sum(self.deform):\n # create deformationfield:\n deform = self._get_deform_field_dm\n\n self.deformfield = deform()\n coords += self.deformfield\n\n # and set accordingly:\n if len(masks):\n if one_hot and not self.regression:\n if len(masks) > 1:\n logging.getLogger('data').error(\n 'cant have more than one mask with one_hot encoding in griddatacollection')\n if self.softlabels:\n mask = self._one_hot_vectorize(np.int32(masks[0]), self.nclasses,\n zero_out_label=self.zero_out_label)\n templabels = [map_coordinates(mask[..., c].squeeze(), coords, order=1, cval=np.float32(c == 0))\n for c in range(self.nclasses)]\n templabels = np.concatenate([np.expand_dims(l, -1) for l in templabels], axis=-1)\n else:\n templabels = map_coordinates(masks[0].squeeze(), coords, order=0)\n templabels = self._one_hot_vectorize(templabels, self.nclasses,\n zero_out_label=self.zero_out_label)\n\n if needslabels:\n if np.sum(np.asarray(templabels[..., self.minlabel:])) == 0:\n return [], []\n\n else:\n # logging.getLogger('data').warning(\n # 'maybe you want to revise this section before using! when do we not need a onehot?')\n templabels = np.asarray(\n [map_coordinates(f.squeeze(), coords, order=1 if self.softlabels else 0) for f in masks])\n templabels = templabels.transpose([i for i in range(1, len(templabels.shape))] + [0])\n if needslabels:\n if np.sum(templabels >= self.minlabel) == 0:\n return [], []\n tempdata = [map_coordinates(np.float32(f).squeeze(), coords, mode=self.padding_rule,\n order=self.interpolation_order) for f in features]\n tempdata = [x.reshape((self.w + [1])) for x in tempdata] # FIXME: maybe we can just use expand_dims?\n if self.whiten:\n if self.whiten_subvolumes:\n raise Exception('not supported anymore')\n # for i in range(len(tempdata)):\n # tempdata[i] = tempdata[i] - np.mean(tempdata[i])\n # tempdata[i] /= np.sqrt(np.mean(tempdata[i] ** 2)) + 1e-20\n elif self.half_gaussian_clip:\n raise Exception('not supported anymore')\n # tempdata = [np.clip((x - means[i]) / (5 * stddevs[i]) - 1, -0.99999, 0.99999) for i, x in\n # enumerate(tempdata)]\n else:\n tempdata = [(x - means[i]) / stddevs[i] for i, x in enumerate(tempdata)]\n if self.vary_mean > 0 or self.vary_stddev > 0:\n tempdata = [x * ((self.deformrandomstate.rand() - 0.5) * self.vary_stddev + 1) + (\n self.deformrandomstate.rand() - 0.5) * self.vary_mean for x in tempdata]\n tempdata = np.concatenate(tempdata, -1)\n\n if np.sum(self.mirror):\n fr = []\n orig = []\n for i in self.mirror:\n fr.append(slice(None, None, np.int32(1 - self.deformrandomstate.randint(2) * i * 2)))\n orig.append(slice(None))\n fr.append(slice(None)) # features / labels\n orig.append(slice(None))\n tempdata[orig] = tempdata[fr]\n templabels[orig] = templabels[fr]\n if self.gaussiannoise > 0:\n tempdata *= (1 + (self.deformrandomstate.rand(*tempdata.shape) - 0.5) * self.gaussiannoise)\n return tempdata, templabels", "def create_mask_list(self, seg_img, K):\n all_ids = np.unique(seg_img)\n chosen_ids = np.random.choice(all_ids, K)\n\n return [(seg_img == ID).astype(np.float32) for ID in chosen_ids]", "def seg_to_mask(seg, width=1.0, height=1.0):\n if type(seg) == list:\n rles = mask_utils.frPyObjects(seg, height, width)\n rle = mask_utils.merge(rles)\n elif type(seg['counts']) == list:\n rle = mask_utils.frPyObjects(seg, height, width)\n else:\n rle = seg\n return mask_utils.decode(rle)", "def get_seg_masks(self, mask_pred, det_bboxes, det_labels,\n ori_shape, scale_factor, rescale):\n if isinstance(mask_pred, torch.Tensor):\n mask_pred = mask_pred.sigmoid().cpu().numpy()\n assert isinstance(mask_pred, np.ndarray)\n # when enabling mixed precision training, mask_pred may be float16\n # numpy array\n mask_pred = mask_pred.astype(np.float32)\n\n cls_segms = [[] for _ in range(80)]\n bboxes = det_bboxes.cpu().numpy()[:, :4]\n labels = det_labels.cpu().numpy() + 1\n\n if rescale:\n img_h, img_w = ori_shape[:2]\n else:\n img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)\n img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)\n scale_factor = 1.0\n\n for i in range(bboxes.shape[0]):\n bbox = (bboxes[i, :] / scale_factor).astype(np.int32)\n bbox[0] = max(bbox[0], 0)\n bbox[1] = max(bbox[1], 0)\n bbox[2] = min(bbox[2], img_w-1)\n bbox[3] = min(bbox[3], img_h-1)\n label = labels[i]\n w = max(bbox[2] - bbox[0] + 1, 1)\n h = max(bbox[3] - bbox[1] + 1, 1)\n\n if not None:\n mask_pred_ = mask_pred[i, label, :, :]\n else:\n mask_pred_ = mask_pred[i, 0, :, :]\n im_mask = np.zeros((img_h, img_w), dtype=np.uint8)\n\n bbox_mask = mmcv.imresize(mask_pred_, (w, h))\n bbox_mask = (bbox_mask > 0.5).astype(np.uint8)\n im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask\n rle = mask_util.encode(\n np.array(im_mask[:, :, np.newaxis], order='F'))[0]\n cls_segms[label - 1].append(rle)\n\n return cls_segms", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def subset_keypoints(self, item, point_features):\n\t\tthreshold = self.cfg.KEYPOINT_WEIGHT.THRESHOLD\n\t\tpoint_features = point_features.permute(0, 2, 1)\n\t\tmask = (item['P_keypoint_seg'] > threshold)\n\t\tpoint_features = point_features[mask, :]\n\t\tif len(point_features.shape) < 3:\n\t\t\tpoint_features = point_features.view(-1, *point_features.shape) # Add lost dimension\n\t\tpoint_features = point_features.permute(0, 2, 1) \n\t\titem['keypoints'] = item['keypoints'][mask] \n\t\treturn item, point_features", "def get_seg_masks(self, preds, img_metas, pad_shape):\n\n mask_pred, reg_pred = preds\n h_pad, w_pad = pad_shape\n cell_region_mask, gp_mask_hor, gp_mask_ver = [], [], []\n for i, meta in enumerate(img_metas):\n h_img, w_img, _ = meta['img_shape']\n h_ori, w_ori, _ = meta['ori_shape']\n if isinstance(mask_pred, torch.Tensor):\n mask_pred = mask_pred.sigmoid().cpu().numpy()\n if isinstance(reg_pred, torch.Tensor):\n reg_pred = reg_pred.cpu().numpy()\n\n mask_pred_ = mask_pred[i, 0, :, :]\n mask_pred_resize = mmcv.imresize(mask_pred_, (w_pad, h_pad))\n mask_pred_resize = mmcv.imresize(mask_pred_resize[:h_img, :w_img], (w_ori, h_ori))\n mask_pred_resize = (mask_pred_resize > 0.5)\n cell_region_mask.append(mask_pred_resize)\n\n reg_pred1_ = reg_pred[i, 0, :, :]\n reg_pred2_ = reg_pred[i, 1, :, :]\n reg_pred1_resize = mmcv.imresize(reg_pred1_, (w_pad, h_pad))\n reg_pred2_resize = mmcv.imresize(reg_pred2_, (w_pad, h_pad))\n reg_pred1_resize = mmcv.imresize(reg_pred1_resize[:h_img, :w_img], (w_ori, h_ori))\n reg_pred2_resize = mmcv.imresize(reg_pred2_resize[:h_img, :w_img], (w_ori, h_ori))\n gp_mask_hor.append(reg_pred1_resize)\n gp_mask_ver.append(reg_pred2_resize)\n\n return list(zip(cell_region_mask, gp_mask_hor, gp_mask_ver))", "def selectData(self, features, target, d):\n\n scan_mask, self.scan_starts, self.scan_ends = self.GetScanPositions(d)\n\n selectFeature = self.featureBits(features.astype(float), self.ifeature)\n\n self.select_mask = (scan_mask & selectFeature)\n \n return self.select_mask", "def features_from_label(audio_file, segment):\n duration = segment['end'] - segment['start']\n audio, sample_rate = librosa.core.load(\n audio_file,\n duration=duration,\n offset=segment['start']\n )\n features = fe.get_features(audio, sample_rate)\n return features", "def masktoregions(in_mask):\n regions = []\n for i in [0,1]: # do the thing for the first and second strands\n current_strand = in_mask[i].copy().astype(float)\n current_strand[-1] = np.nan # set final position to np.nan to avoid overlap issues\n transitions = current_strand - np.roll(current_strand,1)\n true_start = np.where(transitions == 1)[0]\n true_end = np.where(transitions == -1)[0] - 1\n if current_strand[0] == 1: # if starts on True, add True start to front end\n true_start = np.r_[0,true_start]\n if in_mask[i][-1] == True: # if ends on True, add True end to back end\n true_end = np.r_[true_end, len(current_strand)-1]\n if in_mask[i][-2] == False: # if the one before is False, it's a single point True\n true_start = np.r_[true_start,len(current_strand)-1]\n if np.all(in_mask[i][-2:] == [True, False]):\n true_end = np.r_[true_end, len(current_strand)-2]\n regions.append(np.asarray([np.zeros(len(true_start))+i,true_start,true_end]).T)\n out_regions = np.concatenate(regions,axis=0).astype(int)\n return out_regions", "def get_segments(weights, threshold):\n marker_list = [True if i >= threshold else False for i in weights]\n i = 0\n final_pairs = []\n while i < len(weights):\n if marker_list[i]:\n start = i\n while i < len(weights) and marker_list[i]:\n i = i + 1\n end = i - 1\n if end-start > 1:\n final_pairs.append(start)\n final_pairs.append(end)\n i = i + 1\n return np.array(final_pairs)", "def Expand_Mask(mask, feature_dict):\n new_mask = np.zeros(mask.shape + (len(feature_dict),))\n for i in feature_dict.keys():\n ni = int(i)\n new_mask[mask == ni,ni] = 1 \n return new_mask", "def get_processed_masks(segm: torch.Tensor):\n maxpool_segm1 = nn.MaxPool3d(kernel_size=(3, 4, 4))\n maxpool_segm2 = nn.MaxPool3d(kernel_size=(3, 8, 8))\n maxpool_segm3 = nn.MaxPool3d(kernel_size=(3, 16, 16))\n\n true_mask1 = maxpool_segm1(segm)\n true_mask2 = maxpool_segm2(segm)\n true_mask3 = maxpool_segm3(segm)\n\n true_mask_inv1 = 1 - true_mask1\n true_mask_inv2 = 1 - true_mask2\n true_mask_inv3 = 1 - true_mask3\n\n true_masks = [true_mask1, true_mask2, true_mask3]\n invert_masks = [true_mask_inv1, true_mask_inv2, true_mask_inv3]\n\n return true_masks, invert_masks", "def extract_features(input_feature_map, points=conv43Points):\n arr = []\n for y,x in points:\n arr.append(input_feature_map[:,y,x,:])\n return tf.stack(arr, axis=1, name=\"extracted_features\"), len(points)", "def randomize_regions(features, probs, mask):\n targets = torch.ones_like(probs) / probs.shape[-1]\n targets_mask = torch.zeros_like(mask)\n\n p = torch.rand_like(mask.float()) * mask.float()\n\n # set targets for masked regions\n thresh = 0.85\n targets[p >= thresh] = probs[p >= thresh]\n targets_mask[p >= thresh] = 1\n\n # replace 90% of the masked features with zeros\n thresh = 0.85 + 0.15 * 0.1\n features[p >= thresh] = 0\n\n return features, targets, targets_mask", "def test_masking_functions(sersic_2d_image, segm_and_cat):\n\n cat, segm, segm_deblend = segm_and_cat\n\n source_label = cat[0].label\n\n # Testing segm_mask\n mask = pf.segm_mask(source_label, segm_deblend)\n assert mask.shape == sersic_2d_image.shape\n unique_labels_in_mask = np.unique(segm_deblend.data[mask])\n assert set(unique_labels_in_mask) == {0, source_label}\n\n # Testing masked_segm_image\n masked_image = pf.masked_segm_image(source_label, sersic_2d_image, segm_deblend, fill=-9999)\n assert masked_image.shape == sersic_2d_image.shape\n assert np.all(masked_image[np.invert(mask)] == -9999)\n assert np.all(masked_image[mask] == sersic_2d_image[mask])", "def get_segmented_point_clouds(seg_masks, depth): \n obj_labels = np.unique(seg_masks)\n num_objs = obj_labels.shape[0]+1\n rows, cols = seg_masks.shape\n cm = plt.get_cmap('gist_rainbow')\n colors = [cm(1. * i/num_objs) for i in range(num_objs)]\n \n object_dict = {}\n # key - object label; val - depth array of that object\n for i in obj_labels:\n object_dict[i] = np.zeros((rows,cols), dtype = np.float32)\n\n for i in range(rows):\n for j in range(cols):\n if seg_masks[i][j] != 0 and seg_masks[i][j] != -1:\n object_dict[seg_masks[i][j]][i][j] = depth[i][j]\n \n segmented_pcds = []\n for key, val in object_dict.items():\n if key == -1 or key == 0:\n continue\n img = o3d.geometry.Image(val)\n pcd_from_depth = o3d.geometry.PointCloud.create_from_depth_image(\n img,\n o3d.camera.PinholeCameraIntrinsic(\n o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n\n # Multiply with Transformation matrix to get correct view of the PCD\n pcd_from_depth.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n pcd_from_depth.paint_uniform_color(np.array(colors[key][:3], dtype = np.uint8) * 255)\n segmented_pcds.append(pcd_from_depth)\n return segmented_pcds", "def find_regions(directives):\n regions = {}\n for directive in directives:\n if directive.startswith(\"sequence-region\"):\n try:\n _, accession, start, end = directive.split(\" \")\n regions[accession] = (int(start), int(end))\n except ValueError:\n # likely sequence-region without coordinates\n pass\n return regions", "def Feature(imgs, name = 'SIFT', scale = 0.6):\n bf = cv2.BFMatcher()\n if name == 'SIFT':\n process = cv2.xfeatures2d.SIFT_create()\n elif name == 'SURF':\n process = cv2.xfeatures2d.SURF_create()\n else :\n process = cv2.ORB_create(200)\n bf = cv2.BFMatcher(cv2.NORM_HAMMING)\n\n kps = []\n dess = []\n for img in imgs:\n kp,des= process.detectAndCompute(img, None)\n kps.append(kp)\n dess.append(des)\n\n results = []\n des = dess[0]\n for e_kp, e_des in zip(kps[0], dess[0]):\n results.append([(e_kp, e_des)])\n\n for i in range(1,len(imgs)):\n matches = bf.knnMatch(des, dess[i], k=2)\n goods = [m for m, n in matches if m.distance < scale * n.distance]\n new_results = []\n new_des = []\n for good in goods: \n results[good.queryIdx].append((kps[i][good.trainIdx],dess[i][good.trainIdx]))\n new_results.append(results[good.queryIdx])\n new_des.append(des[good.queryIdx])\n results = new_results\n des = np.array(new_des)\n\n return results", "def calc_bounds(segmentation: np.ndarray) -> Dict[int, BoundInfo]:\n bound_info = {}\n count = np.max(segmentation)\n for i in range(1, count + 1):\n component = np.array(segmentation == i)\n if np.any(component):\n points = np.nonzero(component)\n lower = np.min(points, 1)\n upper = np.max(points, 1)\n bound_info[i] = BoundInfo(lower=lower, upper=upper)\n return bound_info", "def _find_intersecting_feature_values(self, pt):\n fldvals = {}\n # Intersect with spatial index to get ID (fid) of intersecting features\n intersect_fids = self._intersect_with_spatial_index(pt)\n if not intersect_fids:\n if self._is_disjoint is False:\n raise GeoException(\"Failed to find polygon in contiguous spatial index\")\n else:\n # Pull attributes of interest from intersecting feature\n fid = self._intersect_with_polygons(pt, intersect_fids)\n if fid is None:\n raise GeoException(\n f\"Failed to intersect with any of {len(intersect_fids)} features\")\n\n # Retrieve values from intersecting polygon\n for fn in self.bison_spatial_fields:\n fldvals[fn] = self.spatial_feats[fid][fn]\n\n return fldvals", "def get_feature_masks(feature, mask_dimension, road_width_px, include_bezier=True, driving_line_road_px=5, bezier_offset=(0,0)):\n np_mask_dim = (mask_dimension[1], mask_dimension[0])\n feature_masks = []\n to_feature = np.zeros(np_mask_dim)\n col = (255,255,255)\n feature_point = feature[0]\n approach_point = feature[1]\n exit_point = feature[2]\n cv2.line(to_feature, approach_point, feature_point, col, thickness=road_width_px)\n feature_masks.append(to_feature.astype(np.uint8))\n print(\"road_width_px=\",road_width_px)\n \n n = len(feature)\n print(n)\n if len(feature) > 2:\n for i in range(2, n):\n mask = np.zeros(np_mask_dim)\n cv2.line(mask, feature_point, feature[i], col, thickness=road_width_px)\n feature_masks.append(mask.astype(np.uint8))\n \n \n p1 = np.add(feature_point, bezier_offset)\n p2 = np.add(approach_point, bezier_offset)\n p3 = np.add(exit_point, bezier_offset)\n print(\"driving_line_road_px=\",driving_line_road_px)\n curve_mask=bezier.get_curve_mask(p1, p2, p3, width=driving_line_road_px, img_dimensions=mask_dimension)[:,:,0]\n \n print(\"TEST\")\n print(mask_dimension)\n print(curve_mask.shape)\n\n if include_bezier:\n feature_masks.append(curve_mask)\n\n combined_mask = np.sum(feature_masks, axis=0).astype(np.uint8)\n\n cv2.imshow(\"curve_mask\",curve_mask)\n cv2.imshow(\"combined_mask\",combined_mask)\n cv2.waitKey(0)\n\n return feature_masks, combined_mask, curve_mask", "def _load_semantic_seg_3d(self, results):\n pts_semantic_mask_path = results[\"ann_info\"][\"pts_semantic_mask_path\"]\n\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n try:\n mask_bytes = self.file_client.get(pts_semantic_mask_path)\n # add .copy() to fix read-only bug\n pts_semantic_mask = np.frombuffer(\n mask_bytes, dtype=self.seg_3d_dtype\n ).copy()\n except ConnectionError:\n mmcv.check_file_exist(pts_semantic_mask_path)\n pts_semantic_mask = np.fromfile(pts_semantic_mask_path, dtype=np.long)\n\n results[\"pts_semantic_mask\"] = pts_semantic_mask\n results[\"pts_seg_fields\"].append(\"pts_semantic_mask\")\n return results", "def decode_segmap(label_mask, n_classes, hex_color_dict, dataset, plot=False):\r\n\r\n r = label_mask.copy()\r\n g = label_mask.copy()\r\n b = label_mask.copy()\r\n for ll in range(0, n_classes):\r\n r[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[0]\r\n g[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[1]\r\n b[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[2]\r\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\r\n rgb[:, :, 0] = r / 255.0\r\n rgb[:, :, 1] = g / 255.0\r\n rgb[:, :, 2] = b / 255.0\r\n\r\n return rgb", "def sub_select_features(features, strategy):\n\n def extract_one_index(y_val):\n index_ones = []\n y_prev = 0\n start_stop = []\n if y_val[-1] == 1:\n y_val = y_val.tolist() + [0]\n for i, y in enumerate(y_val):\n if y_prev == 0 and y == 1:\n start_stop = [i]\n if y_prev == 1 and y == 0:\n start_stop.append(i)\n index_ones.append(start_stop)\n y_prev = y\n return index_ones\n\n def wrapper(start_stop, maxi):\n size = start_stop[1] - start_stop[0]\n bound = (size+1)//2\n return [max(0, start_stop[0]-bound), min(maxi, start_stop[1]+bound)]\n\n def deduce_index_to_keep(one_index, maxi):\n wrapped = [wrapper(start_stop, maxi) for start_stop in one_index]\n to_keep = [idx for idx in range(wrapped[0][0], wrapped[0][1])]\n for start_stop in wrapped[1:]:\n to_keep += [idx for idx in range(start_stop[0], start_stop[1]) if idx > to_keep[-1]]\n return to_keep\n\n if strategy == 0:\n new_features = features # We do nothing\n\n else:\n new_features = dict()\n for which in ['train', 'test']:\n one_id = extract_one_index(features['y_'+which])\n true_idx = deduce_index_to_keep(one_id, len(features['y_'+which]))\n try:\n new_features['x_'+which] = features['x_'+which][true_idx]\n new_features['y_'+which] = features['y_'+which][true_idx]\n except IndexError as e:\n print(which)\n print(features['x_'+which].shape)\n print(features['y_'+which].shape)\n print(one_id)\n raise e\n\n return new_features", "def _populate_mask_data(self, filename: str) -> None:\n if self.seg_images.get(filename) is None:\n return None\n\n mask = cv2.imread(self.seg_targets[filename])\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)\n\n # convert pixel masks to multidimentional\n height, width = mask.shape[:2]\n segmentation_mask = np.zeros((height, width, len(VOC_COLORMAP)), dtype=np.float32)\n for label_index, label in enumerate(VOC_COLORMAP):\n segmentation_mask[:, :, label_index] = np.all(mask == label, axis=-1).astype(float)\n\n return segmentation_mask", "def find_overlapping_segments(pos, seg, columns):\n seg = seg.sort_values(['start', 'end'])\n\n if seg.duplicated(['start', 'end']).any():\n raise ValueError('duplicate columns')\n\n start_idx = np.searchsorted(seg['start'].values, pos['coord'].values) - 1\n end_idx = np.searchsorted(seg['end'].values, pos['coord'].values)\n\n mask = (start_idx == end_idx)\n\n results = pos.copy()\n\n for col in columns:\n results[col] = np.nan\n results.loc[mask, col] = seg[col].iloc[end_idx[mask]].values\n\n return results", "def postprocess_segments(self):\n # make segs a list of mask arrays, it's easier to store\n # as there is a hdf5 equivalent\n for iseg, seg in enumerate(self.segs):\n mask = np.zeros(self._adata.shape[0], dtype=bool)\n mask[seg] = True\n self.segs[iseg] = mask\n # convert to arrays\n self.segs = np.array(self.segs)\n self.segs_tips = np.array(self.segs_tips)", "def addr2features(address):\n return [Parser.get_current_and_neighbor_features(i, address) for i in range(len(address))]", "def get_f2sIndsBySeg(seg, sopuids):\n \n f2sInds = get_f2sInds(seg, sopuids)\n \n divs = get_DIVs(seg)\n \n f2sIndsBySeg = group_list_by_seg(listToGroup=f2sInds, divs=divs)\n \n return f2sIndsBySeg", "def get_features(self, request, **kwargs):\n if hasattr(request, 'GET'):\n reference, start, stop = parse_das_segment(request)\n query_seg = {'id': reference, 'start':start, 'stop':stop}\n if 'chrom' in self.fields:\n pass\n try:\n reference = int(reference)\n except ValueError:\n reference = reference\n self.is_authenticated(request)\n # :TODO put this throught the regular filter\n try:\n if start:\n base_object_list = self.get_object_list(request).filter(\n Q(start__range=(start, stop)) |\\\n Q(end__range=(start, stop)),\n chrom__exact = reference)\n else:\n base_object_list = self.get_object_list(request).filter(\n chrom__exact = reference)\n # :TODO authorization check\n except ValueError:\n raise ValueError('Invalid Request')\n bundles = [self.build_bundle(obj=obj, request=request) for obj in\\\n base_object_list]\n to_be_serialized = [self.full_dehydrate(bundle) for bundle in bundles]\n # passing reqeust into options is, maybe I should pass in the whole\n # request? \n options = {'query': query_seg, \n 'method': self._meta.method, \n 'request_string': request.META['QUERY_STRING'],\n 'request_path': request.path,\n }\n content = self.serialize(request, to_be_serialized, 'xml',\n options=options)\n response = HttpResponse(content = content,\n content_type = 'application/xml')\n response = add_das_headers(response)\n return response", "def find_coordinates(hmms, bit_thresh):\n # get coordinates from cmsearch output\n seq2hmm = parse_hmm(hmms, bit_thresh)\n seq2hmm = best_model(seq2hmm)\n group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps]\n for seq, info in list(seq2hmm.items()):\n group2hmm[seq] = {}\n # info = [model, [[hit1], [hit2], ...]]\n for group_num, group in enumerate(hit_groups(info[1])):\n # group is a group of hits to a single 16S gene\n # determine matching strand based on best hit\n best = sorted(group, reverse = True, key = itemgetter(-1))[0]\n strand = best[5]\n coordinates = [i[0] for i in group] + [i[1] for i in group]\n coordinates = [min(coordinates), max(coordinates), strand]\n # make sure all hits are to the same strand\n matches = [i for i in group if i[5] == strand]\n # gaps = [[gstart, gend], [gstart2, gend2]]\n gaps = check_gaps(matches)\n group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps]\n return group2hmm", "def get_feature_distributions(cls,gen,folder,chosen_seg):\n\n filename = 'feature_distributions{}.txt'.format(str(gen))\n path = os.path.join(folder,filename)\n try:\n with open(path, mode='r', encoding='utf-8 sig') as f:\n lines = [line.strip() for line in f.readlines()]\n except FileNotFoundError:\n return 'error'\n\n features = collections.defaultdict(dict)\n foundit = False\n for line in lines:\n if line == '' or line == '\\n':\n continue\n line = line.split('(')\n if len(line)>1:\n\n if line[1].rstrip(')') == chosen_seg:\n foundit = True\n feature = line[0].strip()\n feature = feature.strip('\\ufeff')\n else:\n foundit = False\n else:\n line = line[0]\n if line[0].isdigit() and foundit:\n bin_,value = line.split(':')\n bin_ = float(bin_.split('-')[0])\n value = int(value)\n features[feature][bin_] = value\n return features", "def _find_features_for_cluster(service_data, ns_data, cl_data={}):\n\n features = []\n\n for node in service_data.keys():\n if cl_data and node in cl_data and cl_data[node] and not isinstance(cl_data[node], Exception):\n if service_data[node] and not isinstance(service_data[node], Exception):\n service_data[node].update(cl_data[node])\n else:\n service_data[node] = cl_data[node]\n\n for feature, keys in FEATURE_KEYS.iteritems():\n for node, d in service_data.iteritems():\n\n ns_d = None\n\n if node in ns_data and not isinstance(ns_data[node], Exception):\n ns_d = ns_data[node]\n\n if _check_feature_by_keys(d, keys[0], ns_d, keys[1]):\n features.append(feature)\n break\n\n return features", "def calculate_mapping(self, mask):\n K, F, _ = mask.shape\n\n # (K, F, T)\n features = mask / np.linalg.norm(mask, axis=-1, keepdims=True)\n\n mapping = np.repeat(np.arange(K)[:, None], F, axis=1)\n\n for iterations, start, end in self.alignment_plan:\n for _ in range(iterations):\n # (K, T)\n centroid = np.sum(features[:, start:end, :], axis=1)\n centroid /= np.linalg.norm(centroid, axis=-1, keepdims=True)\n\n break_flag = False\n for f in range(start, end):\n reverse_permutation = self._align_segment(\n features[:, f, :], centroid,\n )\n if not (reverse_permutation == list(range(K))).all():\n break_flag = True\n features[:, f, :] = features[reverse_permutation, f, :]\n mapping[:, f] = mapping[reverse_permutation, f]\n if break_flag:\n break\n\n return mapping", "def get_sample_mask(self):", "def segment_finder(reference, vectorDB, keyword, namebase):\n\n segment_DB = {}\n segment_id = None\n seg_init = True\n\n for symbol in reference:\n\n synteny = False\n ref_pos = reference.index(symbol)\n\n while True:\n\n for v_key in vectorDB:\n\n synteny = False\n query = vectorDB[v_key][keyword]\n\n try:\n position1 = query.index(reference[ref_pos])\n except IndexError:\n break\n except ValueError:\n break\n else:\n if position1:\n try:\n position2 = query.index(reference[ref_pos+1])\n except IndexError:\n break\n except ValueError:\n break\n else:\n if position2:\n if position2 == position1+1:\n synteny = True\n else:\n break\n else:\n break\n\n ref_pos +=1\n break\n\n if synteny:\n if seg_init:\n segment_id = namebase+str(len(segment_DB)+1)\n # not sure what's up with indexing but this works\n segment_DB[segment_id] = [reference[ref_pos-1],\n reference[ref_pos]]\n # switch off initialization\n seg_init = False\n else:\n segment_DB[segment_id].append(reference[ref_pos])\n else:\n seg_init = True\n return segment_DB", "def segment_by_shape(self, dt, criterion):\n enumerate_crit = Criterion(\n 'patterns', criterion.column_name+'_derivatives_patterns_clusters',\n 'enumerate')\n return self.segment_by_enumerate(dt, enumerate_crit)", "def get_feature_instance_segmentation(\n self, split, curr_id):\n\n bitmask_file = os.path.join(\n BDD_100K_PAN_SEG,\n self.instance_seg_file.format(self.split_name(split), curr_id))\n\n if not tf.io.gfile.exists(bitmask_file):\n return self.feature_utils.get_fake_feature('instance_segmentation'), False\n\n bit = utils.load_image(bitmask_file)\n # Description: https://doc.bdd100k.com/format.html#bitmask:\n # the B channel and A channel store the “ann_id” for instance segmentation\n # and “ann_id” for segmentation tracking, respectively, which can be\n # computed as (B << 8) + A:\n instance_ids = bit[:, :, 2] * 256 + bit[:, :, 3]\n instance_mask = np.unique(instance_ids, return_inverse=True)[1]\n desired_shape = list(instance_ids.shape[:2]) + [1]\n instance_mask = np.reshape(instance_mask, desired_shape)\n return instance_mask.astype(np.uint16), True", "def findfeatures(xarr, farr, sl, sf, ws, mdiff=20, wdiff=20, sigma=5, niter=5,\n sections=3):\n\n # detect lines in the input spectrum and identify the peaks and peak values\n xp, xf = find_points(xarr, farr, kernal_size=sigma, sections=sections)\n\n # return no solution if no peaks were found\n if len(xp) == 0:\n return None\n\n # find the best match to the lines\n wp = findmatch(xarr, farr, xp, xf, sl, sf, ws, xlimit=mdiff, wlimit=wdiff)\n\n try:\n for i in range(len(xp)):\n if wp[i] > -1:\n pass\n except Exception as e:\n message = 'Unable to match line lists because %s' % e\n raise SpecError(message)\n return xp, wp", "def getFeaturesByBBox(self,bboxtuple, srsname):\n raise NotImplementedError", "def extractFeatures(image, mask, name, binCount=8, features=\"all\"):\n def extractType(func, type_name):\n name = []\n values = []\n feat = func(image,mask, binCount=binCount)\n feat.enableAllFeatures() \n feat.execute()\n for (key,val) in six.iteritems(feat.featureValues):\n name.append(key+f'_{type_name}')\n values.append(val)\n return pd.DataFrame([values], columns=name)\n\n dim = image.GetDimension()\n\n features_array = np.array([\"FO\", f\"S{dim}D\", \"GLCM\", \"GLSZM\", \"GLRLM\", \"NGTDM\", \"GLDM\"])\n features_func = np.array([firstorder.RadiomicsFirstOrder, eval(f\"shape{'2D'*(dim == 2)}.RadiomicsShape{'2D'*(dim==2)}\"), \n glcm.RadiomicsGLCM, glszm.RadiomicsGLSZM, glrlm.RadiomicsGLRLM, ngtdm.RadiomicsNGTDM, \n gldm.RadiomicsGLDM])\n if features != \"all\":\n if features is str:\n print(\"Type wrong. Returning None.\")\n return None\n index = pd.Index(features_array).isin(features)\n features_array = features_array[index]\n features_func = features_func[index]\n\n list_feat = list(map(lambda i: extractType(features_func[i], features_array[i]), np.arange(len(features_array))))\n df = pd.concat([pd.DataFrame([name], columns=[\"Caso\"])] + list_feat, axis=1)\n return df", "def _get_features_geo_filtered(self, pdb_id):\n features, geometry = self._get_features_geo(pdb_id)\n mask = self._get_mask_selected_atoms_pocket(pdb_id)\n features_filtered, geometry_filtered = features[mask, :], geometry[mask, :]\n features_filtered = torch.from_numpy(features_filtered).squeeze()\n geometry_filtered = torch.from_numpy(geometry_filtered).squeeze()\n return features_filtered, geometry_filtered", "def get_segm_by_sel(*args):\n return _ida_segment.get_segm_by_sel(*args)", "def extract_features(self, stack, buf, arcs, sent):\n def get_lc(k):\n return sorted([arc[1] for arc in arcs if arc[0] == k and arc[1] < k])\n\n def get_rc(k):\n return sorted([arc[1] for arc in arcs if arc[0] == k and arc[1] > k],\n reverse=True)\n\n features = [TOK2ID[NULL]] * (3 - len(stack)) + [sent[x] for x in stack[-3:]]\n features += [sent[x] for x in buf[:3]] + [TOK2ID[NULL]] * (3 - len(buf))\n for i in range(2):\n if i < len(stack):\n k = stack[-i-1]\n lc = get_lc(k)\n rc = get_rc(k)\n llc = get_lc(lc[0]) if len(lc) > 0 else []\n rrc = get_rc(rc[0]) if len(rc) > 0 else []\n\n features.append(sent[lc[0]] if len(lc) > 0 else TOK2ID[NULL])\n features.append(sent[rc[0]] if len(rc) > 0 else TOK2ID[NULL])\n features.append(sent[lc[1]] if len(lc) > 1 else TOK2ID[NULL])\n features.append(sent[rc[1]] if len(rc) > 1 else TOK2ID[NULL])\n features.append(sent[llc[0]] if len(llc) > 0 else TOK2ID[NULL])\n features.append(sent[rrc[0]] if len(rrc) > 0 else TOK2ID[NULL])\n else:\n features += [TOK2ID[NULL]] * 6\n\n assert len(features) == self.n_features\n return features", "def matches(self, feature):\n pass", "def get_instance_features(instance, tokenizer_src, tokenizer_trg, max_seq_length, bucket):\n def _find_bucket_length(source_tokens, target_tokens):\n source_ids = tokenizer_src.convert_tokens_to_ids(source_tokens)\n target_ids = tokenizer_trg.convert_tokens_to_ids(target_tokens)\n num = max(len(source_ids), len(target_ids))\n assert num <= bucket[-1]\n for index in range(1, len(bucket)):\n if bucket[index - 1] < num <= bucket[index]:\n return bucket[index]\n return bucket[0]\n\n def _convert_ids_and_mask(tokenizer, input_tokens, seq_max_bucket_length):\n input_ids = tokenizer.convert_tokens_to_ids(input_tokens)\n input_mask = [1] * len(input_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < seq_max_bucket_length:\n input_ids.append(1)\n input_mask.append(0)\n\n assert len(input_ids) == seq_max_bucket_length\n assert len(input_mask) == seq_max_bucket_length\n\n return input_ids, input_mask\n\n seq_max_bucket_length = _find_bucket_length(instance.source_tokens, instance.target_tokens)\n source_ids, source_mask = _convert_ids_and_mask(tokenizer_src, instance.source_tokens, seq_max_bucket_length)\n target_ids, target_mask = _convert_ids_and_mask(tokenizer_trg, instance.target_tokens, seq_max_bucket_length)\n\n features = collections.OrderedDict()\n features[\"source_ids\"] = np.asarray(source_ids)\n features[\"source_mask\"] = np.asarray(source_mask)\n features[\"target_ids\"] = np.asarray(target_ids)\n features[\"target_mask\"] = np.asarray(target_mask)\n\n return features, seq_max_bucket_length", "def find_features(pyr):\n feature_pnts = spread_out_corners(pyr[0], SPREAD_N, SPREAD_M ,SPREAD_CORNERS_RADIUS)\n descriptors = sample_descriptor(pyr[2], feature_pnts, SAMPLE_RAD)\n return feature_pnts, descriptors", "def getSequenceFeatures(seqfile, summitpeaks, peaks, spidx, l, rthresh):\n numpeaks = len(peaks)\n rv = np.zeros((numpeaks, 5, l),dtype=float)\n idx = np.zeros((numpeaks), dtype=object)\n curidx = 0\n with open(seqfile, 'r') as f:\n while True:\n try:\n metadata = next(f)\n peakid = int(metadata.split(\"\\t\")[3].strip())\n reference = next(f).strip()\n fv1 = u.getFrequencyVector(next(f))\n fv2 = u.getFrequencyVector(next(f))\n fv3 = u.getFrequencyVector(next(f))\n fv4 = u.getFrequencyVector(next(f))\n if peakid in spidx:\n idx[curidx] = peakid\n rv[curidx, 0, :] = fv1 #A\n rv[curidx, 1, :] = fv2 #C\n rv[curidx, 2, :] = fv3 #G\n rv[curidx, 3, :] = fv4 #T\n updateReferenceAndNormalize(rv[curidx], reference, rthresh)\n curidx += 1\n except StopIteration as se:\n break\n\n sortedidx = np.argsort(idx)\n idx = idx[sortedidx]\n assert np.all(spidx == idx)\n rv = rv[sortedidx]\n \n rv[:, 4, :] = u.getPeakPositions(summitpeaks, peaks, l)\n \n return rv", "def _parse_features(cls, node: OMNode) -> Dict[str, Dict[str, bool]]:\n features = {}\n for sectname in node:\n section = node[sectname]\n if not isinstance(section, dict) or '_types' not in section:\n continue\n features[sectname] = {}\n for opt in section:\n if not opt.startswith('has'):\n continue\n value = section[opt]\n if not isinstance(value, bool):\n continue\n option = opt[3:]\n features[sectname][option] = value\n return features", "def index_feats_dict(self):\n doc_features_dict = {}\n\n for index, doc in zip(self.index, self.series):\n # Sets for a doc and feature words\n doc_set = set(doc.split())\n feat_set = set(self.features)\n\n # Shared words between the two sets\n interset_words = doc_set.intersection(feat_set)\n\n # Append to doc_features_dict\n doc_features_dict[index] = list(interset_words)\n\n return doc_features_dict", "def mask_features(feat_file_name):\n with open(saved_feature_path + '/f1_reduced_mask', 'rb') as fp:\n mask = pickle.load(fp)\n\n with open(saved_feature_path + '/' + feat_file_name, 'rb') as fp:\n features = pickle.load(fp)\n\n feature_mat = list()\n for i in range(len(features)):\n feature_vec = list()\n for j in range(len(mask)):\n if mask[j]:\n feature_vec.append(features[i][j])\n feature_mat.append(feature_vec)\n\n feature_mat = np.asarray(feature_mat)\n return feature_mat", "def segment(self):\n start = self.alignment.matching_function_startpoint(self.idx)\n end = self.alignment.matching_function_endpoint(self.idx)\n return [start, end]", "def mask_from_file(filename, name, mesh):\n with open(filename) as f:\n features = json.load(f)[\"features\"]\n for feature in features:\n if feature[\"properties\"][\"name\"] == name:\n geom = shapely.geometry.shape(feature[\"geometry\"])\n mask = shapely.vectorized.contains(geom, mesh.x2, mesh.y2)\n return mask", "def get_features(words, vectors):\n result = [vectors.loc[word].values for word in words if word in df_keys.values.reshape(-1)]\n if result:\n return np.stack(result)\n return None", "def find_16S(fastas, hmms, bit_thresh = float(20), length_thresh = 500, masking = True, buffer = 0):\n # identify start/stop positions\n # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps]\n group2hmm = find_coordinates(hmms, bit_thresh)\n # get sequences from fasta file\n for fasta in fastas:\n for seq in parse_fasta(fasta):\n id = seq[0].split('>')[1].split()[0]\n if id not in group2hmm:\n continue\n seq[1] = seq[1].upper()\n count = 0 # how many 16S genes are there on the contig?\n for group, info in list(group2hmm[id].items()):\n model, strand, coords, matches, gaps = info\n # count insertion bases (ib) from gaps\n ib = sum([i[1] - i[0] + 1 for i in gaps])\n # calcualte length of non-insertion regions (don't include buffer)\n tl = coords[1] - coords[0] + 1\n length = tl - ib\n if length < length_thresh:\n continue \n # count sequence\n count += 1\n # set retrieval coords based on buffer\n ret_coords = [max([coords[0] - buffer, 1]), \\\n min([coords[1] + buffer, len(seq[1])]), coords[2]]\n buffer_ends = check_buffer(coords, len(seq[1]), buffer)\n # mask insertion sequences\n if masking is True:\n seq[1] = mask_sequence(seq[1], gaps)\n S = seq[1][(ret_coords[0] - 1):(ret_coords[1])]\n inserts = [gap[1] - gap[0] + 1 for gap in gaps]\n inserts.append('end')\n model_pos = ';'.join(['%s-%s(%s)' % (match[2], match[3], insert) for match, insert in zip(matches, inserts)])\n header = '%s 16SfromHMM::model=%s seq=%s pos=%s-%s strand=%s total-len=%s 16S-len=%s model-pos(ins-len)=%s buffer-len=%s/%s ins-bases=%s' % \\\n (seq[0], model, count, ret_coords[0], ret_coords[1], strand, tl, length, model_pos, buffer_ends[0], buffer_ends[1], ib)\n # reverse complement if strand is reverse\n if strand == '-':\n S = rc(['', S])[1]\n yield [header, S]", "def findFeatures(self):\n\t\tpass", "def __init__(\n self,\n input_ids: List[int],\n input_mask: List[int],\n segment_ids: List[int],\n input_ids_for_subwords: List[int],\n input_mask_for_subwords: List[int],\n segment_ids_for_subwords: List[int],\n character_pos_to_subword_pos: List[int],\n fragment_indices: List[Tuple[int, int, int]],\n labels_mask: List[int],\n labels: List[int],\n spans: List[Tuple[int, int, int]],\n default_label: int,\n ) -> None:\n input_len = len(input_ids)\n if not (\n input_len == len(input_mask)\n and input_len == len(segment_ids)\n and input_len == len(labels_mask)\n and input_len == len(labels)\n and input_len == len(character_pos_to_subword_pos)\n ):\n raise ValueError(\"All feature lists should have the same length ({})\".format(input_len))\n\n input_len_for_subwords = len(input_ids_for_subwords)\n if not (\n input_len_for_subwords == len(input_mask_for_subwords)\n and input_len_for_subwords == len(segment_ids_for_subwords)\n ):\n raise ValueError(\n \"All feature lists for subwords should have the same length ({})\".format(input_len_for_subwords)\n )\n\n self.features = OrderedDict(\n [\n (\"input_ids\", input_ids),\n (\"input_mask\", input_mask),\n (\"segment_ids\", segment_ids),\n (\"input_ids_for_subwords\", input_ids_for_subwords),\n (\"input_mask_for_subwords\", input_mask_for_subwords),\n (\"segment_ids_for_subwords\", segment_ids_for_subwords),\n (\"character_pos_to_subword_pos\", character_pos_to_subword_pos),\n (\"fragment_indices\", fragment_indices),\n (\"labels_mask\", labels_mask),\n (\"labels\", labels),\n (\"spans\", spans),\n ]\n )\n self._default_label = default_label", "def decode_segmap(label_mask, num_classes):\n label_colours = get_capsicum_labels()\n\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, num_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3)).astype(np.uint8)\n # rgb[:, :, 0] = r / 255.0\n # rgb[:, :, 1] = g / 255.0\n # rgb[:, :, 2] = b / 255.0\n rgb[:, :, 0] = r\n rgb[:, :, 1] = g\n rgb[:, :, 2] = b\n return rgb", "def get_segmentations(self, aids):\n sseg_list = []\n for aid in aids:\n ann = self.dset.anns[aid]\n coco_sseg = ann.get('segmentation', None)\n if coco_sseg is None:\n sseg = None\n else:\n sseg = kwimage.MultiPolygon.coerce(coco_sseg)\n sseg_list.append(sseg)\n return sseg_list", "def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents", "def bg_mask(query_imgs, method):\n print(\"Obtaining masks\")\n segmentation_method = get_method(method)\n return [segmentation_method(img) for img in query_imgs]", "def find_segment(bv: binaryninja.binaryview.BinaryView, name: str) -> List[Tuple[int, int]]:\n result = []\n for sn in bv.sections:\n sec = bv.get_section_by_name(sn)\n if sec.name == name:\n result.append((sec.start, sec.end))\n return result" ]
[ "0.5785783", "0.57444733", "0.56591356", "0.5636957", "0.5626444", "0.5615696", "0.5559518", "0.5535667", "0.55190414", "0.5504409", "0.54510874", "0.5436337", "0.54250914", "0.5373862", "0.5363101", "0.5299349", "0.5298827", "0.52629185", "0.52567637", "0.52518916", "0.5242554", "0.5230351", "0.5187208", "0.51869196", "0.51406395", "0.5139024", "0.51378363", "0.5123044", "0.511853", "0.51181966", "0.5112712", "0.508614", "0.508401", "0.5078598", "0.5075921", "0.5066935", "0.50650465", "0.50619286", "0.5051337", "0.50456893", "0.5009977", "0.5009977", "0.5008941", "0.5000224", "0.49977297", "0.4989543", "0.49772295", "0.4971351", "0.49698862", "0.49668902", "0.49632588", "0.49348372", "0.49325177", "0.49296498", "0.49253157", "0.491887", "0.4917308", "0.49170846", "0.49090022", "0.49076834", "0.48986915", "0.48944572", "0.4893946", "0.4889147", "0.48862872", "0.48850724", "0.48812428", "0.48806015", "0.4879367", "0.48761296", "0.4875666", "0.4871494", "0.48670295", "0.4865453", "0.48629534", "0.48603907", "0.4850104", "0.48438084", "0.48407853", "0.48397368", "0.48375803", "0.48354688", "0.48240227", "0.48226693", "0.4820969", "0.4819109", "0.48165503", "0.48139474", "0.48055965", "0.48014393", "0.47994292", "0.47992733", "0.47945207", "0.47896224", "0.47891653", "0.47883072", "0.47882238", "0.47785103", "0.4775693", "0.47746623" ]
0.61812437
0
Given a string describing features masks for a sequence of segments, return a compiled regex matching the corresponding strings.
def compile_regex_from_str(self, pat): s2n = {'-': -1, '0': 0, '+': 1} seg_res = [] for mat in re.findall(r'\[[^]]+\]+', pat): ft_mask = {k: s2n[v] for (v, k) in re.findall(r'([+-])(\w+)', mat)} segs = self.all_segs_matching_fts(ft_mask) seg_res.append('({})'.format('|'.join(segs))) regexp = ''.join(seg_res) return re.compile(regexp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_regex_search(search_string):\n\n sspat = None\n valid_flags = {\n 'i': re.IGNORECASE\n }\n if search_string:\n try:\n search_string, flag_letters = re.match(r'^(.+?)(?:/([a-z]+))?$', search_string).groups()\n flags = 0\n # if flags are given, OR together all the valid flags\n # see https://docs.python.org/3/library/re.html#re.compile\n if flag_letters:\n for letter in flag_letters:\n if letter in valid_flags:\n flags = flags | valid_flags[letter]\n sspat = re.compile(search_string, flags)\n except re.error:\n sspat = None\n\n return sspat", "def add_regex_flag(vocab, pattern_str):\n flag_id = vocab.add_flag(re.compile(pattern_str).match)\n return flag_id", "def _regex_from_encoded_pattern(s):\r\n if s.startswith('/') and s.rfind('/') != 0:\r\n # Parse it: /PATTERN/FLAGS\r\n idx = s.rfind('/')\r\n pattern, flags_str = s[1:idx], s[idx+1:]\r\n flag_from_char = {\r\n \"i\": re.IGNORECASE,\r\n \"l\": re.LOCALE,\r\n \"s\": re.DOTALL,\r\n \"m\": re.MULTILINE,\r\n \"u\": re.UNICODE,\r\n }\r\n flags = 0\r\n for char in flags_str:\r\n try:\r\n flags |= flag_from_char[char]\r\n except KeyError:\r\n raise ValueError(\"unsupported regex flag: '%s' in '%s' \"\r\n \"(must be one of '%s')\"\r\n % (char, s, ''.join(list(flag_from_char.keys()))))\r\n return re.compile(s[1:idx], flags)\r\n else: # not an encoded regex\r\n return re.compile(re.escape(s))", "def compile_regex(self, paths):\r\n if isinstance(paths, list):\r\n ret = []\r\n for regex in paths:\r\n ret.append(re.compile(regex, re.I))\r\n return ret\r\n else:\r\n return re.compile(paths, re.I)", "def main(string, negative, mode):\n\n gen = generateRegex(string, negative)\n result = None\n if mode == \"regex\":\n # generate regex from string\n gen.generate_regex_from_string()\n result = gen.found_patterns\n elif mode == \"string\":\n # generate string from regex\n result = gen.generate_from_regex(string)\n if result:\n print(f\"OUTPUT:\\n\\n\", result)\n else:\n print(\"No matches found!\")", "def to_regex(*args:List[str], flags:int=0, compile:bool=True) -> Union[str, re.compile]:\n pattern = \"\".join(args)\n\n if compile:\n return re.compile(pattern, flags=flags)\n else:\n flagstring = re_flags_to_string(flags)\n pattern = f\"{flagstring}{pattern}\"\n return pattern", "def _MakeRE(regex_str):\n return re.compile(regex_str.format(**SHORTHAND))", "def _regex_flags_from_bits(self, bits):\n flags = 'tilmsux'\n return ''.join(flags[i] if (1 << i) & bits else '' for i in xrange(6))", "def generate_regex_from_string(self):\n tries = 0\n while tries < self.max_tries:\n try:\n tries += 1\n if tries % 100 == 0:\n print(f\"Tries: {tries}\", end=\"\\r\")\n patterns_to_try = self.generate_regex_pattern()\n for _, pattern in patterns_to_try:\n if re.fullmatch(pattern, self.string):\n self.found_patterns.add(pattern)\n else:\n print(f\"Doesn't Match! {pattern} -> {self.string}\")\n except Exception as e:\n pass\n if self.negative_string:\n self.found_patterns = self.best_pattern()", "def _compile_fnmatch(pattern: str) -> re.Pattern:\n return re.compile(translate(pattern))", "def match_regex(regex: str, string: str):\n postfix_regex = infix_to_postfix(regex)\n nfa = create_nfa_from_postfix(postfix_regex)\n return input_string_to_nfa(string, nfa)", "def from_regex(pattern:str) -> str:\n raise NotImplementedError()", "def _combined_regex(regexes, flags=re.IGNORECASE, use_re2=False, max_mem=None):\n joined_regexes = \"|\".join(r for r in regexes if r)\n if not joined_regexes:\n return None\n\n if use_re2:\n import re2\n return re2.compile(joined_regexes, flags=flags, max_mem=max_mem)\n return re.compile(joined_regexes, flags=flags)", "def define_regex(module_names=modules, doc_types=types, exts=exts):\n letters = ''\n module_codes = ''\n file_exts = ''\n # Populate code letter String\n for letter in doc_types:\n letters += letter\n # Populate extension string\n for ext in exts:\n if ext != exts[-1]:\n file_exts += f\"{ext}|\"\n else:\n file_exts += ext\n # Populate modules string\n for module in modules:\n if module != modules[-1]:\n module_codes += f\"{module}|\"\n else:\n module_codes += module\n regex = r\"(\" + module_codes + \"){1}[\" + letters + \"]{1}\\_[^.]*\\.(\" + file_exts + \")\" \n return regex", "def match_regex_4(s, r):\n s_len = len(s)\n r_len = len(r)\n stack = [(0, 0)]\n explored = set() # States we've already explored.\n def explore(s_idx, r_idx):\n if (s_idx, r_idx) not in explored:\n explored.add((s_idx, r_idx))\n stack.append((s_idx, r_idx))\n while stack:\n s_idx, r_idx = stack.pop()\n # Case: string is empty.\n if s_idx == s_len:\n if r_idx == r_len:\n return True\n if r[r_idx] == '*':\n explore(s_idx, r_idx + 1)\n continue\n # Case: string is not empty.\n if r_idx == r_len:\n continue\n regex_instruction = r[r_idx]\n if regex_instruction in ('.', s[s_idx]):\n explore(s_idx + 1, r_idx + 1)\n if regex_instruction == '*':\n explore(s_idx + 1, r_idx + 1)\n explore(s_idx + 1, r_idx)\n return False", "def gen_regex(df):\n # Save synonym columns in list, to loop over synonyms per target\n columns = []\n for column in df.columns:\n columns.append(column)\n # Remove category and literal, because these don't contain synonyms\n columns.remove(\"category\", \"literal\")\n\n # Initialize new DataFrame to store new values:\n # Two columns: literal and regex\n new_df = pd.DataFrame(columns=[\"category\", \"literal\", \"regex\"])\n new_df_index = 0\n\n # Generate the regex and literal strings per row of the input df\n for row_index in df.index:\n # Literal can be copied directly\n lit = df.at[row_index, \"literal\"]\n cat = df.at[row_index, \"category\"]\n\n synonyms = []\n # Synonyms extracted from the columns\n for syn_col in columns:\n synonym = df.at[row_index, syn_col]\n # If particular cell is empty, don't append to list\n if pd.isna(synonym):\n # print(\"empty string\")\n pass\n else:\n synonyms.append(synonym)\n\n # Generate regex pattern including all synonyms:\n regex = \"\"\n i = 0\n n = len(synonyms)\n for synonym in synonyms:\n i += 1\n # If current loop is last synonym of list:\n if i == n:\n # Don't add another | <or> operator to regex pattern\n addition = f\"({synonym})\"\n else:\n # Include '|' to pattern, for following additions\n addition = f\"({synonym})|\"\n\n regex = regex + addition\n # Add values to new row in df\n new_df.loc[new_df_index] = \\\n pd.Series({\"category\": cat, \"literal\": lit, \"regex\": regex})\n new_df_index += 1\n return(new_df)", "def rc(*parts):\n rexp = r'\\s+'.join(parts) + r'\\s*$'\n return re.compile(rexp)", "def build_regex(self) -> typing.Pattern:\n self._regex = re.compile(\"|\".join(sorted(self._includes)))\n return self._regex", "def _get_re_from_pool(pool):\n no_mask = pool.rsplit('/', 1)[0]\n no_last_octet = no_mask.rsplit('.', 1)[0]\n regex = re.escape(no_last_octet) + r'\\.\\d{1,3}/\\d{1,2}\\s+blackhole'\n return regex", "def compile_filename_patterns(pattern_list):\n\n pats=list(pattern_list)\n for i in range(len(pats)):\n if isinstance(pats[i],str):\n if pats[i].startswith('re:'):\n pats[i]=pats[i][3:]\n else:\n pats[i]=fnmatch.translate(pats[i])\n pats[i]=re.compile(pats[i])\n return pats", "def get_compiled(self, flags=0):\n return re.compile(self.get_regex(), flags)", "def regex_compiled():\n return re.compile(SBE19DataParticle.regex())", "def target_pattern(lst_tag_types):\n return ''.join([r'\\s{1}\\@(',\n '|'.join(lst_tag_types),\n r')\\(([^\\)]+)\\)'])", "def get_pattern(topic):\n variants = get_variants(topic)\n sub_patterns = [r'(.*\\b)%s\\b(.*)' % variant.lower() for variant in variants]\n return re.compile(r'|'.join(sub_patterns), flags=re.IGNORECASE)", "def _suggest_regexes(content):\n # Grab all regular expressions and compile them\n suggested_regexes = set()\n regex_keywords = TagRegex.objects.all()\n\n # Look for our regular expressions in the content\n for r in regex_keywords:\n if re.search(r.regex, content):\n suggested_regexes.add(r.tag_id)\n\n return suggested_regexes", "def _compile_regexes(tokdict):\r\n for key, value in tokdict.items():\r\n tokdict[key] = re.compile('^(?:%s)$' % value, re.I).match\r\n return tokdict", "def _assign_regex(literal, regex):\n if regex:\n return regex.lower().strip()\n else:\n return r'\\b%s\\b'%literal.lower().strip()", "def create_src_file_exts_regex(input_exts: list[str] = []) -> Pattern[str]:\n import re\n\n DEFAULT = r\"\\.[fF](77|90|95|03|05|08|18|[oO][rR]|[pP]{2})?\"\n EXPRESSIONS = [DEFAULT]\n try:\n EXPRESSIONS.extend(input_exts)\n # Add its expression as an OR and force they match the end of the string\n return re.compile(rf\"(({'$)|('.join(EXPRESSIONS)}$))\")\n except re.error:\n # TODO: Add a warning to the logger\n return re.compile(rf\"({DEFAULT}$)\")", "def reg_name(nstr:str) -> object :\r\n\r\n elements=nstr.split(\" \")\r\n combs=word_combination(elements)\r\n lregex=[]\r\n for comb in combs :\r\n if len(comb) > 1 :\r\n lregex.append(\"(?i:{})\".format('[\\.\\- _,;:]?'.join(comb))) #Here to change character seperation between the words\r\n elif len(comb) == 1 :\r\n lregex.append(\"(?i:{})\".format(comb))\r\n else :\r\n pass\r\n \r\n return re.compile('({})'.format(\"|\".join(lregex)))", "def _compile_regex(self, regex, flags=re.MULTILINE):\n pattern = None\n try:\n pattern = re.compile(pattern=regex, flags=flags)\n except Exception as e:\n self.logger.error(msg=\"Error while compiling regex '{}'. Exception: {}\".format(regex, repr(e)))\n return pattern", "def regex_filter(regex_str, versions):\n regex = re.compile(regex_str)\n return [v for v in versions if regex.search(v)]", "def fn_to_reg(self, searchItems):\n return [re.compile(fnmatch.translate(s)) for s in searchItems]", "def get_hostmask_regex(mask):\n mask = re.escape(mask)\n mask = mask.replace(r'\\*', '.*')\n return re.compile(mask + '$', re.I)", "def match_regex_2(s, r):\n s_len = len(s)\n r_len = len(r)\n @memoize\n def match(s_idx, r_idx):\n \"\"\"Matches string s[s_idx:] to regex r[r_idx:].\"\"\"\n # Case: string is empty.\n if s_idx == s_len:\n if r_idx == r_len:\n return True\n if r[r_idx] == '*':\n return match(s_idx, r_idx + 1)\n return False\n # Case: string is not empty.\n if r_idx == r_len:\n return False\n regex_instruction = r[r_idx]\n if regex_instruction in ('.', s[s_idx]):\n return match(s_idx + 1, r_idx + 1)\n if regex_instruction == '*':\n return match(s_idx + 1, r_idx + 1) or match(s_idx + 1, r_idx)\n return False\n return match(0, 0)", "def _compile_regex(self):\n self.raw = self._compile('rawLevel')\n self.run = self._compile('runLevel')\n self.sample = self._compile('sampleLevel')\n self.agg = self._compile('aggLevel')", "def _create_regex(pattern, ignore_case=False, whole_words=False, literal_pattern=False):\n if literal_pattern:\n pattern = re.escape(pattern)\n if whole_words:\n b = r'\\b' if isinstance(pattern, str) else br'\\b'\n pattern = b + pattern + b\n\n regex = re.compile(pattern, re.I if ignore_case else 0)\n return regex", "def regex_compiled():\n return re.compile(SBE19HardwareParticle.regex(), re.DOTALL)", "def match_regex_3(s, r):\n s_len = len(s)\n r_len = len(r)\n stack = [(0, 0)]\n while stack:\n s_idx, r_idx = stack.pop()\n # Case: string is empty.\n if s_idx == s_len:\n if r_idx == r_len:\n return True\n if r[r_idx] == '*':\n stack.append((s_idx, r_idx + 1))\n continue\n # Case: string is not empty.\n if r_idx == r_len:\n continue\n regex_instruction = r[r_idx]\n if regex_instruction in ('.', s[s_idx]):\n stack.append((s_idx + 1, r_idx + 1))\n if regex_instruction == '*':\n stack.append((s_idx + 1, r_idx + 1))\n stack.append((s_idx + 1, r_idx))\n return False", "def compile_patterns(patterns: List[str], anchor: Optional[str]):\n start = ending = ''\n if anchor == 'start':\n patterns = [pattern[1:] for pattern in patterns]\n start = '^'\n elif anchor == 'end':\n patterns = [pattern[:-1] for pattern in patterns]\n ending = '$'\n\n if patterns:\n core = '|'.join(patterns)\n else:\n core = CompanyCleaner.MATCH_NOTHING # If iter is empty, return regex that can match nothing.\n\n return re.compile(start + '(?:' + core + ')+' + ending)", "def c_regex(exp, flags=0, group=0) -> Parser:\n if isinstance(exp, (str, bytes)):\n exp = re.compile(exp, flags)\n if isinstance(group, (str, int)):\n group = (group,)\n\n @Parser\n def regex_parser(stream, index):\n match = exp.match(stream, index)\n if match:\n return Result.success(match.end(), match.group(*group))\n else:\n return Result.failure(index, exp.pattern)\n\n return regex_parser", "def parse_pattern(pattern):\n return map(lambda x: True if x == '1' else False, pattern)", "def prepare_regexp(regexp):\n if regexp.startswith('/'):\n groups = re.match(r'^/(.*)/([A-Za-z]*)$', regexp).groups()\n regexp, flags_str = groups\n flags = 0\n for flag in explode(flags_str):\n flag = flag.upper()\n if hasattr(re, flag):\n flags |= getattr(re, flag)\n return validators.RegexValidator(re.compile(regexp, flags))\n else:\n return validators.RegexValidator(re.compile(regexp))", "def test_constructPossibleSequenceRegex(self):\n test_cases = [\n ['file03.03.rgb', [r'file(\\d+).03.rgb', r'file03.(\\d+).rgb']],\n ['file3030.030', [r'file(\\d+).030', r'file3030.(\\d+)']],\n ]\n for x, (fileName, regexStrings) in enumerate(test_cases):\n with self.subTest(i=x):\n result = path_core._core.FolderContainer._constructPossibleSequenceRegex(fileName)\n expectedResult = [re.compile(regexString) for regexString in regexStrings]\n self.assertEqual(expectedResult, result)", "def _create_regexes():\n space = r'(?:[^\\S\\n]|&nbsp;|&\\#0*160;|&\\#[Xx]0*[Aa]0;)'\n spaces = r'{space}+'.format(space=space)\n space_dash = r'(?:-|{space})'.format(space=space)\n tags = [\n 'gallery',\n 'math',\n 'nowiki',\n 'pre',\n 'score',\n 'source',\n 'syntaxhighlight',\n ]\n # Based on pywikibot.textlib.compileLinkR\n # and https://gist.github.com/gruber/249502\n url = r'''(?:[a-z][\\w-]+://[^\\]\\s<>\"]*[^\\]\\s\\.:;,<>\"\\|\\)`!{}'?«»“”‘’])'''\n _regexes.update(\n {\n 'bare_url': re.compile(r'\\b({})'.format(url), flags=re.I),\n 'bracket_url': re.compile(\n r'(\\[{}[^\\]]*\\])'.format(url), flags=re.I\n ),\n 'ISBN': re.compile(\n r'\\bISBN(?P<separator>{spaces})(?P<value>(?:97[89]{space_dash}'\n r'?)?(?:[0-9]{space_dash}?){{9}}[0-9Xx])\\b'.format(\n spaces=spaces, space_dash=space_dash\n )\n ),\n 'PMID': re.compile(\n r'\\bPMID(?P<separator>{spaces})(?P<value>[0-9]+)\\b'.format(\n spaces=spaces\n )\n ),\n 'RFC': re.compile(\n r'\\bRFC(?P<separator>{spaces})(?P<value>[0-9]+)\\b'.format(\n spaces=spaces\n )\n ),\n 'tags': re.compile(\n r'''(<\\/?\\w+(?:\\s+\\w+(?:\\s*=\\s*(?:(?:\"[^\"]*\")|(?:'[^']*')|'''\n r'''[^>\\s]+))?)*\\s*\\/?>)'''\n ),\n 'tags_content': re.compile(\n r'(<(?P<tag>{})\\b.*?</(?P=tag)>)'.format(r'|'.join(tags)),\n flags=re.I | re.M,\n ),\n }\n )", "def get_pattern(flags: dict, input_tensors: list,\n output_tensors: list, tensor_list: list, tensor_map: dict):\n # If nothing matches, default pattern would be opaque pattern\n matched_pattern = OpPatternRecognizer._apply_pattern_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n matched_subpattern = OpPatternRecognizer.apply_subpattern_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n matched_special_op = OpPatternRecognizer.apply_spec_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n return matched_pattern, matched_subpattern, matched_special_op", "def construct_variable_regex(before, after, mismatches):\n\tif mismatches == 0:\n\t\treturn f\"{before}(.*){after}\"\n\t\n\t# get a regex for a mismatch in every place in before and after sequences\n\tbefores = create_mismatches_regex([before], mismatches)\n\tafters = create_mismatches_regex([after], mismatches)\n\t\n\t# combine each before and after regex with (.+) in the middle\n\tregexes = []\n\tfor b in befores.split(\"|\"):\n\t\tfor a in afters.split(\"|\"):\n\t\t\tregexes.append(f\"{b}(.*){a}\")\n\treturn \"|\".join(regexes)", "def process_regex_form_data(pattern, flags, text, methods, method):\n multi_match = ''\n single_match = ''\n flags = \"|\".join(flags)\n regex = eval('re.compile(r\"{}\", {})'.format(pattern, flags))\n # if the user fails to select a method it defaults to the re.match method\n if not method:\n match = regex.match(text)\n # else convert the selected method from a string to a regex object by\n # searching regex_method returned by the regex_methods function.\n else:\n match = methods[method](regex, text)\n # if a match is found ...\n if match is not None:\n # check if the method used is the \"re.findall\" or \"re.finditer\"\n # method as these do not support the match.group() method\n if method == 're.findall':\n multi_match = match\n elif method == 're.finditer':\n multi_match = [i.group() for i in match]\n else:\n single_match = match.group()\n return single_match, multi_match", "def get_regex_format(self, case_sensitive=True):\n\n if case_sensitive is True:\n c = self.cursor()\n c.execute('PRAGMA case_sensitive_like=true')\n elif case_sensitive is False:\n c = self.cursor()\n c.execute('PRAGMA case_sensitive_like=false')\n elif case_sensitive is None:\n pass\n else:\n raise errors.UnknownCaseSensitiveError(value=case_sensitive)\n\n return \"{target:s} REGEXP {pattern:s}\"", "def _binary_command_regexes(self):\n patterns = {}\n for intent, keys in self.keywords.get(\"binary\").items():\n if keys:\n patterns[intent] = re.compile(r'\\b' + r'\\b|\\b'.join(keys) + r'\\b')\n return patterns", "def get_stem_regex(stems, aff_is_prefix):\n # If the stems contain special characters, despecialize.\n stems = \" --- \".join(stems)\n stems = stems.replace(\"*\", \"\\*\")\n stems = stems.replace(\"?\", \"\\?\")\n stems = stems.replace(\".\", \"\\.\")\n stems = stems.replace(\"|\", \"\\|\")\n stems = stems.split(\" --- \")\n if aff_is_prefix:\n regex = \"(\" + \"|\".join([b+\"$\" for b in stems]) + \")\"\n else: \n regex = \"(\" + \"|\".join([\"^\"+b for b in stems]) + \")\"\n return re.compile(regex).search", "def parse_pattern(s: str) -> str:\n # Escape regex metacharacters\n for c in [\"\\\\\", \".\", \"(\", \")\", \"[\", \"]\", \"^\", \"$\", \"*\", \"+\", \"?\", \"|\"]:\n s = s.replace(c, \"\\\\\" + c)\n\n s = re.sub(\"~+\", \".*\", s)\n s = \"^\" + s + \"$\"\n return s", "def buffer_build_regex(buffer):\n\n\thdata = weechat.hdata_get(\"buffer\")\n\tinput = weechat.hdata_string(hdata, buffer, \"input_buffer\")\n\texact = weechat.hdata_integer(hdata, buffer, \"text_search_exact\")\n\twhere = weechat.hdata_integer(hdata, buffer, \"text_search_where\")\n\tregex = weechat.hdata_integer(hdata, buffer, \"text_search_regex\")\n\n\tif not regex:\n\t\tinput = re.escape(input)\n\n\tif exact:\n\t\tinput = \"(?-i)%s\" % input\n\n\tfilter_regex = None\n\tif where == 1: # message\n\t\tfilter_regex = input\n\telif where == 2: # prefix\n\t\tfilter_regex = \"%s\\\\t\" % input\n\telse: # prefix | message\n\t\tfilter_regex = input # TODO: impossible with current filter regex\n\n\treturn \"!%s\" % filter_regex", "def compile(domain_rule):\n\n regex_parts = []\n\n def _build_regex(rule):\n for p_rule, p_var in parse_rule(rule):\n if p_rule:\n regex_parts.append(re.escape(p_rule))\n if p_var:\n regex_parts.append('(?P<%s>[^/]{1,})' % p_var)\n\n _build_regex(domain_rule)\n\n regex = r'^%s$' % (u''.join(regex_parts))\n _regex = re.compile(regex, re.UNICODE)\n print regex\n return _regex", "def _compiled_format_regexps(date_formats, time_formats):\n # List of all combinations of date_formats and time_formats\n date_time_formats = []\n for df in date_formats:\n for tf in time_formats:\n date_time_formats.append(df + ' ' + tf)\n\n # Add date-only formats\n for df in date_formats:\n date_time_formats.append(df)\n\n # Add time-only formats\n for tf in time_formats:\n date_time_formats.append(tf)\n\n # (format, compiled_regexp) for each supported format\n format_regexps = []\n for dt_format in date_time_formats:\n format, regexp = format_regexp(dt_format)\n # Compile the regexp\n format_regexps.append(\n (format, re.compile(regexp, re.IGNORECASE))\n )\n\n return format_regexps", "def matcher(string):\n rec = re.compile(rexp, re.VERBOSE)\n groups = set(rec.groupindex) # index nos of no interest; discard\n m = rec.search(string)\n if m is None: return None\n # Match succeeded at this point\n # match-data -> Python\n mapped_d = {gname : m.group(gname) for gname in groups}\n # postprocess and done!\n return {k : ppers[k](mapped_d[k]) for k in mapped_d}", "def __convert_zone_to_regex(zone_queried):\n regex_string = \"^(.*\\\\.)*\" + re.escape(zone_queried) + \"$\"\n return re.compile(regex_string)", "def compile_regex(self, fmt, query):\n return re.compile(fmt.format(\n query.pattern.replace('.', '\\.').replace('*', '[^\\.]*').replace(\n '{', '(').replace(',', '|').replace('}', ')')\n ))", "def get_pattern(src_string, regex):\n ret = None\n pattern = re.search(regex, src_string)\n if pattern is not None:\n ret = int(pattern.group(2))\n return ret", "def test_regex_featurizer():\n from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer\n sentence, expected, labeled_tokens = (\n \"hey how are you today\",\n [\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n ],\n [0],\n )\n patterns = [\n {\"pattern\": \"[0-9]+\", \"name\": \"number\", \"usage\": \"intent\"},\n {\"pattern\": \"\\\\bhey*\", \"name\": \"hello\", \"usage\": \"intent\"},\n {\"pattern\": \"[0-1]+\", \"name\": \"binary\", \"usage\": \"intent\"},\n ]\n ftr = RegexFeaturizer({}, known_patterns=patterns)\n\n # adds tokens to the message\n tokenizer = SpacyTokenizer({})\n message = Message(sentence, data={RESPONSE: sentence})\n assert show_message(message, False) == {\n \"response\": \"hey how are you today\",\n \"text\": \"hey how are you today\"\n }\n message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))\n tokenizer.process(message)\n # assert show_message(message) == {'response': 'hey how are you today', 'text_spacy_doc': spacy_nlp(\"hey how are you today\"),\n # 'tokens': ['hey', 'how', 'are', 'you', 'today', '__CLS__'],\n # 'text': 'hey how are you today'}\n # result = ftr._features_for_patterns(message, TEXT)\n ftr.process(message) # [TEXT, RESPONSE]\n show_message(message)\n assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0", "def _get_pattern(\n notebook: str, command: str, cell_mapping: Mapping[int, str]\n) -> Sequence[Tuple[str, Union[str, Callable[[Match[str]], str]]]]:\n standard_substitution = partial(_line_to_cell, cell_mapping=cell_mapping)\n\n relative_path, absolute_path = get_relative_and_absolute_paths(notebook)\n\n if command == \"black\":\n return [\n (\n rf\"(?<=^error: cannot format {re.escape(relative_path)}: Cannot parse: )\\d+\"\n rf\"|(?<=^error: cannot format {re.escape(absolute_path)}: Cannot parse: )\\d+\",\n standard_substitution,\n ),\n (r\"(?<=line )\\d+(?=\\)\\nOh no! )\", standard_substitution),\n (r\"line cell_(?=\\d+:\\d+\\)\\nOh no! )\", \"cell_\"),\n ]\n\n if command == \"doctest\":\n return [\n (\n rf'(?<=^File \"{re.escape(relative_path)}\", line )\\d+'\n rf'|(?<=^File \"{re.escape(absolute_path)}\", line )\\d+',\n standard_substitution,\n ),\n (\n rf'(?<=^File \"{re.escape(relative_path)}\",) line'\n rf'|(?<=^File \"{re.escape(absolute_path)}\",) line',\n \"\",\n ),\n ]\n\n # This is the most common one and is used by flake, pylint, mypy, and more.\n return [\n (\n rf\"(?<=^{re.escape(absolute_path)}:)\\d+\"\n rf\"|(?<=^{re.escape(relative_path)}:)\\d+\",\n standard_substitution,\n )\n ]", "def prep_filename_masks(mask:str)->(list,list):\n mask = mask.strip()\n if '\"' in mask:\n # Temporary replace all ' ' into \"\" to '·'\n re_binqu= re.compile(r'\"([^\"]+) ([^\"]+)\"')\n while re_binqu.search(mask):\n mask= re_binqu.sub(r'\"\\1·\\2\"', mask) \n masks = mask.split(' ')\n masks = [m.strip('\"').replace('·', ' ') for m in masks if m]\n else:\n masks = mask.split(' ')\n fi_masks= [m for m in masks if m and m[0]!='/']\n fo_masks= [m[1:] for m in masks if len(m)>1 and m[0]=='/']\n return (fi_masks, fo_masks)", "def _build_regex(path):\n re_list = ['^']\n var_list = list()\n is_var = False\n for v in _RE_ROUTE.split(path):\n if is_var:\n var_name = v[1:]\n var_list.append(var_name)\n re_list.append(r'(?P<%s>[^\\/]+)' % var_name)\n else:\n s = ''\n for ch in v:\n if ch in _LETTERS_DIGITS:\n s += ch\n else:\n s += '\\\\' + ch\n re_list.append(s)\n is_var = not is_var\n re_list.append('$')\n return ''.join(re_list)", "def by_regex(cls, *patterns):\n return cls(*(to_matcher(RegexMatcher, p) for p in patterns))", "def find_pattern_in_str(pattern, source):\n pattern = re.compile(pattern)\n for match in re.finditer(pattern,source):\n return match.groups()\n return None", "def compile_regexes(bugs):\n for bug in bugs:\n try:\n bug['regex'] = re.compile(bug['pattern'])\n except Exception as exc:\n print \"regex error: \", bug['pattern'], bug['id']\n bug['regex'] = None\n return bugs", "def match_replace(string_to_match, list_regex):\n matched = []\n for regex in list_regex:\n match = re.search(regex[0], string_to_match)\n if match:\n string_to_match = re.sub(regex[0], regex[1], string_to_match)\n matched.append(regex[0].pattern)\n return string_to_match, matched", "def filter_on_regexp(strings, pattern):\n filtered_strings = filter(lambda x: re.search(pattern, x), strings)\n return filtered_strings", "def make_class_masks(subject_names):\n masks = Munch()\n\n masks.has_dash = subject_names.apply(test_dash_in)\n masks.first_alpha = subject_names.apply(test_starts_letter)\n masks.last_alpha = subject_names.apply(test_ends_letter)\n\n return masks", "def build_phone_regexp_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n string1 = ''\r\n for char in string:\r\n if re.search(\"[^0-9]\", char) is None:\r\n string1 += char\r\n tmp[field] = '.*'+SparqlTranslator.SparqlTranslator._strip_initial_zeros(string1)\r\n answer['regexp'] = tmp\r\n return answer", "async def match_regex(text, opts):\n\n def is_case_sensitive():\n if opts[\"case_sensitive\"]:\n return False\n return regex.IGNORECASE\n\n if opts[\"matching_condition\"].lower() == \"search\":\n matched_regex = regex.search(opts[\"expression\"], text, is_case_sensitive())\n elif opts[\"matching_condition\"].lower() == \"fullmatch\":\n matched_regex = regex.fullmatch(opts[\"expression\"], text, is_case_sensitive())\n else:\n matched_regex = regex.match(opts[\"expression\"], text, is_case_sensitive())\n return matched_regex", "def ra_code(string):\n code_pattern = 'ra{0,1}[efgk]s{0,1}\\d{2}[a-z][0-9a-z]{0,1}'\n code = re.search(code_pattern, string.lower())\n if not code:\n print \"No code found\"\n return\n c = code.group()\n if c[:1] == 'rk':\n code = 'raks' + c[2:]\n elif c[:1] == 're':\n code = 'raes' + c[2:]\n elif c[:1] == 'rg':\n code = 'rags' + c[2:]\n elif c[:1] == 'rf':\n code = 'rafs' + c[2:]\n else:\n code = c\n return code", "def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match", "def glob_to_regex(glob):\n res = \"\"\n for c in glob:\n if c == \"*\":\n res = res + \".*\"\n elif c == \"?\":\n res = res + \".\"\n else:\n res = res + re.escape(c)\n\n # \\A anchors at start of string, \\Z at end of string\n return re.compile(r\"\\A\" + res + r\"\\Z\", re.IGNORECASE)", "def create_src_file_exts_str(input_exts: list[str] = []) -> Pattern[str]:\n import re\n\n input_exts = [re.escape(ext) for ext in input_exts]\n return create_src_file_exts_regex(input_exts)", "def _compile_audience_pattern(self, pattern):\n re_pattern = fnmatch.translate(pattern)\n if \"://\" not in pattern:\n re_pattern = \"[a-z]+://\" + re_pattern\n return re.compile(re_pattern)", "def main(self, regex_string):\n sql_sen = regex_string[0][0]\n reg = \"\\$\\w+\"\n if re.search(reg, sql_sen, re.I):\n\n p = re.compile(reg)\n match = p.findall(sql_sen)\n return match\n return None", "def compile_re(self,src):\r\n # \"reLst\" is a list of match-terms. Each term is a pair:\r\n # [props,variants]. props gives the qualifiers (if any) and\r\n # variants is a list of variants for the term.\r\n reLst = []\r\n # canonicalize space\r\n src = src.strip()\r\n reBar = re.compile(r'\\s*\\|\\s*')\r\n src = reBar.sub('|',src)\r\n lsrc = len(src)\r\n i = 0\r\n while i<lsrc:\r\n while src[i] == ' ':\r\n i += 1\r\n continue\r\n variants = []\r\n term = [0,variants]\r\n reLst.append(term)\r\n # collect alternatives for this term\r\n while i<lsrc:\r\n i = self.compile_re_term(variants,src,i)\r\n if i>=lsrc:\r\n break\r\n c = src[i]\r\n i += 1\r\n if c == '|':\r\n # get additional alternatives\r\n continue\r\n # if c is a qualifier, it ends the term\r\n if c == '*':\r\n term[0] = _zero_or_more\r\n elif c == '+':\r\n term[0] = _one_or_more\r\n elif c == '?':\r\n term[0] = _is_option\r\n # this term is complete: advance to next\r\n break\r\n return reLst", "def compile_regex(self):\n\n self.compiled_vbs_var_regex = re.compile(\n self.vbs_var_regex.replace(\"MIN_VAR_LENGTH\", str(self.min_var_len)).replace(\n \"MAX_VAR_LENGTH\", str(self.max_var_len)\n )\n )\n\n self.compiled_vbs_func_regex = re.compile(\n self.vbs_func_regex.replace(\n \"MIN_FUNC_LENGTH\", str(self.min_func_len)\n ).replace(\"MAX_FUNC_LENGTH\", str(self.max_func_len))\n )", "def compile_route_to_regex(self):\n # Split the route\n split_given_route = self.url.split(\"/\")\n # compile the provided url into regex\n url_list = []\n regex = \"^\"\n for regex_route in split_given_route:\n if \"@\" in regex_route:\n if \":\" in regex_route:\n try:\n param_name, compiler_name = regex_route.split(\":\")\n regex += self.compilers[compiler_name]\n self._active_regex.update(\n {param_name.replace(\"@\", \"\"): compiler_name}\n )\n except KeyError:\n raise InvalidRouteCompileException(\n 'Route compiler \"{}\" is not an available route compiler. '\n \"Verify you spelled it correctly or that you have added it using the compile() method.\".format(\n regex_route.split(\":\")[1]\n )\n )\n\n else:\n regex += self.compilers[\"default\"]\n\n regex += r\"\\/\"\n\n # append the variable name passed @(variable):int to a list\n url_list.append(regex_route.replace(\"@\", \"\").split(\":\")[0])\n elif \"?\" in regex_route:\n # Make the preceding token match 0 or more\n regex += \"?\"\n\n if \":\" in regex_route:\n\n try:\n param_name, compiler_name = regex_route.split(\":\")\n regex += self.compilers[compiler_name] + \"*\"\n self._active_regex.update(\n {param_name.replace(\"@\", \"\"): compiler_name}\n )\n except KeyError:\n if self.request:\n raise InvalidRouteCompileException(\n 'Route compiler \"{}\" is not an available route compiler. '\n \"Verify you spelled it correctly or that you have added it using the compile() method.\".format(\n regex_route.split(\":\")[1]\n )\n )\n self._compiled_regex = None\n self._compiled_regex_end = None\n return\n\n else:\n regex += self.compilers[\"default\"] + \"*\"\n\n regex += r\"\\/\"\n\n url_list.append(regex_route.replace(\"?\", \"\").split(\":\")[0])\n else:\n regex += regex_route + r\"\\/\"\n\n self.url_list = url_list\n regex += \"$\"\n self._compiled_regex = re.compile(regex.replace(r\"\\/$\", r\"$\"))\n self._compiled_regex_end = re.compile(regex)\n\n return regex", "def parse_mask(string):\n return string.split(' = ')[1]", "def address_regex(self) -> Any:", "def _get_regex_for_pattern(self, pattern: bytes):\n # TODO: should blacksheep support \":\" in routes (using escape chars)?\n for c in _escaped_chars:\n if c in pattern:\n pattern = pattern.replace(c, b\"\\\\\" + c)\n\n if b\"*\" in pattern:\n # throw exception if a star appears more than once\n if pattern.count(b\"*\") > 1:\n raise RouteException(\n \"A route pattern cannot contain more than one star sign *. \"\n \"Multiple star signs are not supported.\"\n )\n\n if b\"/*\" in pattern:\n pattern = _route_all_rx.sub(br\"?(?P<tail>.*)\", pattern)\n else:\n pattern = _route_all_rx.sub(br\"(?P<tail>.*)\", pattern)\n\n # support for < > patterns, e.g. /api/cats/<cat_id>\n # but also: /api/cats/<int:cat_id> or /api/cats/<uuid:cat_id> for more\n # granular control on the generated pattern\n if b\"<\" in pattern:\n pattern = _angle_bracket_route_param_rx.sub(\n self._handle_rich_parameter, pattern\n )\n\n # support for mustache patterns, e.g. /api/cats/{cat_id}\n # but also: /api/cats/{int:cat_id} or /api/cats/{uuid:cat_id} for more\n # granular control on the generated pattern\n if b\"{\" in pattern:\n pattern = _mustache_route_param_rx.sub(self._handle_rich_parameter, pattern)\n\n # route parameters defined using /:name syntax\n if b\"/:\" in pattern:\n pattern = _route_param_rx.sub(br\"/(?P<\\1>[^\\/]+)\", pattern)\n\n # NB: following code is just to throw user friendly errors;\n # regex would fail anyway, but with a more complex message\n # 'sre_constants.error: redefinition of group name'\n # we only return param names as they are useful for other things\n param_names = []\n for p in _named_group_rx.finditer(pattern):\n param_name = p.group(1)\n if param_name in param_names:\n raise ValueError(\n f\"cannot have multiple parameters with name: \" f\"{param_name}\"\n )\n\n param_names.append(param_name)\n\n if len(pattern) > 1 and not pattern.endswith(b\"*\"):\n # NB: the /? at the end ensures that a route is matched both with\n # a trailing slash or not\n pattern = pattern + b\"/?\"\n return re.compile(b\"^\" + pattern + b\"$\", re.IGNORECASE), param_names", "def match(self, string: str) -> Tuple:\n re_match = None\n re_rule = None\n for regex_name in self.regexes:\n regex = self.regexes[regex_name]\n re_match = regex.match(string)\n if re_match is not None:\n re_rule = regex_name\n break\n return re_rule, re_match", "def compile_regex(x, n_times=\"+\"):\n y = \"(\" + \"|\".join(x) + \")\" + n_times\n return y", "def convert_pattern(pattern, pattern_type=None):\n\tif pattern_type == 'regex':\n\t\treturn re.compile(pattern)\n\telif pattern_type == 'wildcard':\n\t\treturn re.compile(fnmatch.translate(pattern))\n\treturn re.compile(re.escape(pattern))", "def get_regex(self, strict=True):\n _ctx = construction.Context(strict=strict)\n return self._get_regex(_ctx)", "def normalize_races(string):\n\n general_filter_regex = re.compile(r'(countywide|initiative|county of|city of|port|director|council|school|mayor)', re.IGNORECASE)\n presidential_regex = re.compile('president', re.IGNORECASE)\n senate_regex = re.compile(r'(senate|senator)', re.IGNORECASE)\n house_regex = re.compile(r'(house|representative)', re.IGNORECASE)\n governor_regex = re.compile('governor', re.IGNORECASE)\n treasurer_regex = re.compile('treasurer', re.IGNORECASE)\n auditor_regex = re.compile('auditor', re.IGNORECASE)\n sos_regex = re.compile('secretary', re.IGNORECASE)\n lt_gov_regex = re.compile(r'(lt|Lieutenant)', re.IGNORECASE)\n ospi_regex = re.compile(\n 'superintendent of public instruction',\n re.IGNORECASE)\n ag_regex = re.compile('attorney general', re.IGNORECASE)\n wcpl_regex = re.compile('commissioner of public lands', re.IGNORECASE)\n local_regex = re.compile(\n r'(^State\\b|Washington|Washington\\s+State|Local|Legislative District)',\n re.IGNORECASE)\n national_regex = re.compile(\n r'(U\\.S\\.|\\bUS\\b|Congressional|National|United\\s+States|U\\.\\s+S\\.\\s+)',\n re.IGNORECASE)\n\n \"\"\"\n The following chained if statements are ordered by the most frequent\n occurrences. As of August 26th, 2014 these are the results from\n running `egrep -rohi 'regex' . | wc -l`\n\n I've placed Lt. Governor's regex ahead of Governor's in order to\n be able to get the Lt. Governor's values and keep a simplified regex.\n\n These aren't exact, but give are a rough assessment of the number\n of occurrences.\n\n National: 935375\n Local: 953031\n\n *House: 417020\n Governor: 319836\n CPL: 344795\n *Senate: 186247\n Lt. Gov.: 161537\n SPI: 128783\n SoS: 122404\n Auditor: 103920\n AG: 85059\n President: 75183\n\n \"\"\"\n\n if re.search(general_filter_regex, string):\n return 'N/A'\n elif re.search(house_regex, string):\n if re.search(national_regex, string):\n return 'U.S. Representative'\n elif re.search(local_regex, string):\n return 'State Representative'\n else:\n return 'N/A'\n elif re.search(lt_gov_regex, string):\n return 'Lt. Governor'\n elif re.search(governor_regex, string):\n return 'Governor'\n elif re.search(wcpl_regex, string):\n return 'Commissioner of Public Lands'\n elif re.search(senate_regex, string):\n if re.search(national_regex, string):\n return 'U.S. Senator'\n elif re.search(local_regex, string):\n return 'State Senator'\n else:\n return 'N/A'\n elif re.search(ospi_regex, string):\n return 'Superintendent of Public Instruction'\n elif re.search(sos_regex, string):\n return 'Secretary of State'\n elif re.search(treasurer_regex, string):\n return 'Treasurer'\n elif re.search(auditor_regex, string):\n return 'Auditor'\n elif re.search(ag_regex, string):\n return 'Attorney General'\n elif re.search(presidential_regex, string):\n return 'President'\n else:\n return 'N/A'", "def regexp_predicate(value):\n return re.compile(value).match", "def create_masked_lm_predictions_based_given(tokens, max_predictions_per_seq, segment_ids):\n\n tokens_len = len(tokens)\n\n output_tokens = []\n masked_lm_positions = []\n masked_lm_labels = []\n segment_ids_new = []\n i=0\n idx=0\n num_masks = 0\n while i < tokens_len:\n tok = tokens[i]\n if tok==u'01':\n masked_token = \"[MASK]\"\n output_tokens.append(masked_token)\n masked_lm_positions.append(idx)\n i+=1\n num_masks += 1\n masked_lm_labels.append(tokens[i])\n segment_ids_new.append(segment_ids[i])\n idx+=1\n else:\n output_tokens.append(tok)\n segment_ids_new.append(segment_ids[i])\n idx+=1\n i+=1\n if num_masks>max_predictions_per_seq:\n print ('too many masks')\n # print (tokens)\n # print (output_tokens)\n # print (masked_lm_positions)\n # print (masked_lm_labels)\n # abc\n\n return (output_tokens, masked_lm_positions, masked_lm_labels, segment_ids_new)", "def uvm_glob_to_re(_str):\n if _str is None or _str == \"\":\n return \"\"\n if _str[0] == \"/\" and _str[-1] == \"/\":\n return _str\n # TODO replace * with .*\n res = _str.replace('.', '\\\\.')\n res = res.replace('*', '.*')\n res = res.replace('[', '\\\\[')\n res = res.replace(']', '\\\\]')\n res = res.replace('?', '.')\n # TODO add more substitutions\n return res", "def integrated_address_regex(self) -> Any:", "def regexp(regexp_list):\n def add_attribute(func):\n if not hasattr(func, \"regexp\"):\n func.regexp = []\n func.regexp.append(regexp_list)\n return func\n return add_attribute", "def AsRegEx(self):\n parts = _REGEX_SPLIT_PATTERN.split(self._value)\n result = u\"\".join(self._ReplaceRegExPart(p) for p in parts)\n\n return rdf_standard.RegularExpression(u\"(?i)\\\\A%s\\\\Z\" % result)", "def get_ip_pattern(ip):\n return re.compile(ip.replace('.', '[.]'))", "def _compile_object_search():\n class_or = b'(' + b'|'.join(CLASSES) + b')'\n for i in range(9):\n expr = class_or + struct.pack('b', i) + b'(?!\\xff\\xff)(?!\\x00\\x00)[\\x00-\\xff]{4}\\xff\\xff\\xff\\xff[^\\xff]'\n REGEXES[i] = re.compile(expr)", "def RunRegex(regex, string):\n m = regex.search(string)\n if m:\n return m.groups()[0]\n else:\n return None", "def to_regex(x):\n try:\n return re.compile(x)\n except TypeError:\n return None", "def compile(self, name, pattern):\n try:\n return self.get_pattern(name)\n except KeyError:\n return self.store_pattern(name, re.compile(pattern))", "def from_python_regex(cls, regex):\n return regular_expression.PythonRegex(regex)", "def simplify_standard_patterns(function: Function) -> Function:\n BodyPart = Union[Instruction, Label]\n PatternPart = Union[Instruction, Label, None]\n Pattern = List[Tuple[PatternPart, bool]]\n\n def make_pattern(*parts: str) -> Pattern:\n ret: Pattern = []\n for part in parts:\n optional = part.endswith(\"*\")\n part = part.rstrip(\"*\")\n if part == \"?\":\n ret.append((None, optional))\n elif part.endswith(\":\"):\n ret.append((Label(\"\"), optional))\n else:\n ins = parse_instruction(part, InstructionMeta.missing())\n ret.append((ins, optional))\n return ret\n\n div_pattern = make_pattern(\n \"bnez $x, .A\",\n \"?\", # nop or div\n \"break\",\n \".A:\",\n \"li $at, -1\",\n \"bne $x, $at, .B\",\n \"li $at, 0x80000000\",\n \"bne $y, $at, .B\",\n \"nop\",\n \"break\",\n \".B:\",\n )\n\n divu_pattern = make_pattern(\n \"bnez $x, .A\",\n \"nop\",\n \"break\",\n \".A:\",\n )\n\n mod_p2_pattern = make_pattern(\n \"bgez $x, .A\",\n \"andi $y, $x, LIT\",\n \"beqz $y, .A\",\n \"nop\",\n \"addiu $y, $y, LIT\",\n \".A:\",\n )\n\n div_p2_pattern_1 = make_pattern(\n \"bgez $x, .A\",\n \"sra $y, $x, LIT\",\n \"addiu $at, $x, LIT\",\n \"sra $y, $at, LIT\",\n \".A:\",\n )\n\n div_p2_pattern_2 = make_pattern(\n \"bgez $x, .A\",\n \"move $at, $x\",\n \"addiu $at, $x, LIT\",\n \".A:\",\n \"sra $x, $at, LIT\",\n )\n\n div_2_s16_pattern = make_pattern(\n \"sll $x, $x, LIT\",\n \"sra $y, $x, LIT\",\n \"srl $x, $x, 0x1f\",\n \"addu $y, $y, $x\",\n \"sra $y, $y, 1\",\n )\n\n div_2_s32_pattern = make_pattern(\n \"srl $x, $y, 0x1f\",\n \"addu $x, $y, $x\",\n \"sra $x, $x, 1\",\n )\n\n utf_pattern = make_pattern(\n \"bgez $x, .A\",\n \"cvt.s.w\",\n \"li $at, 0x4f800000\",\n \"mtc1\",\n \"nop\",\n \"add.s\",\n \".A:\",\n )\n\n ftu_pattern = make_pattern(\n \"cfc1 $y, $31\",\n \"nop\",\n \"andi\",\n \"andi*\", # (skippable)\n \"?\", # bnez or bneql\n \"?\",\n \"li*\",\n \"mtc1\",\n \"mtc1*\",\n \"li\",\n \"?\", # sub.fmt ?, X, ?\n \"ctc1\",\n \"nop\",\n \"?\", # cvt.w.fmt ?, ?\n \"cfc1\",\n \"nop\",\n \"andi\",\n \"andi*\",\n \"bnez\",\n \"nop\",\n \"mfc1\",\n \"li\",\n \"b\",\n \"or\",\n \".A:\",\n \"b\",\n \"li\",\n \"?\", # label: (moved one step down if bneql)\n \"?\", # mfc1\n \"nop\",\n \"bltz\",\n \"nop\",\n )\n\n lwc1_twice_pattern = make_pattern(\"lwc1\", \"lwc1\")\n swc1_twice_pattern = make_pattern(\"swc1\", \"swc1\")\n\n gcc_sqrt_pattern = make_pattern(\n \"sqrt.s\",\n \"c.eq.s\",\n \"nop\",\n \"bc1t\",\n \"?\",\n \"jal sqrtf\",\n \"nop\",\n )\n\n def matches_pattern(actual: List[BodyPart], pattern: Pattern) -> int:\n symbolic_registers: Dict[str, Register] = {}\n symbolic_labels: Dict[str, str] = {}\n\n def match_one(actual: BodyPart, exp: PatternPart) -> bool:\n if exp is None:\n return True\n if isinstance(exp, Label):\n name = symbolic_labels.get(exp.name)\n return isinstance(actual, Label) and (\n name is None or actual.name == name\n )\n if not isinstance(actual, Instruction):\n return False\n ins = actual\n if ins.mnemonic != exp.mnemonic:\n return False\n if exp.args:\n if len(exp.args) != len(ins.args):\n return False\n for (e, a) in zip(exp.args, ins.args):\n if isinstance(e, AsmLiteral):\n if not isinstance(a, AsmLiteral) or e.value != a.value:\n return False\n elif isinstance(e, Register):\n if not isinstance(a, Register):\n return False\n if len(e.register_name) <= 1:\n if e.register_name not in symbolic_registers:\n symbolic_registers[e.register_name] = a\n elif symbolic_registers[e.register_name] != a:\n return False\n elif e.register_name != a.register_name:\n return False\n elif isinstance(e, AsmGlobalSymbol):\n if e.symbol_name == \"LIT\" and not isinstance(a, AsmLiteral):\n return False\n elif isinstance(e, JumpTarget):\n if not isinstance(a, JumpTarget):\n return False\n if e.target not in symbolic_labels:\n symbolic_labels[e.target] = a.target\n elif symbolic_labels[e.target] != a.target:\n return False\n else:\n assert False, f\"bad pattern part: {exp}\"\n return True\n\n actuali = 0\n for (pat, optional) in pattern:\n if actuali < len(actual) and match_one(actual[actuali], pat):\n actuali += 1\n elif not optional:\n return 0\n return actuali\n\n def create_div_p2(bgez: Instruction, sra: Instruction) -> Instruction:\n assert isinstance(sra.args[2], AsmLiteral)\n shift = sra.args[2].value & 0x1F\n return Instruction.derived(\n \"div.fictive\", [sra.args[0], bgez.args[0], AsmLiteral(2 ** shift)], sra\n )\n\n def try_replace_div(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(div_pattern)]\n if not matches_pattern(actual, div_pattern):\n return None\n return ([actual[1]], i + len(div_pattern) - 1)\n\n def try_replace_divu(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(divu_pattern)]\n if not matches_pattern(actual, divu_pattern):\n return None\n return ([], i + len(divu_pattern) - 1)\n\n def try_replace_div_p2_1(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n # Division by power of two where input reg != output reg\n actual = function.body[i : i + len(div_p2_pattern_1)]\n if not matches_pattern(actual, div_p2_pattern_1):\n return None\n bnez = typing.cast(Instruction, actual[0])\n div = create_div_p2(bnez, typing.cast(Instruction, actual[3]))\n return ([div], i + len(div_p2_pattern_1) - 1)\n\n def try_replace_div_p2_2(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n # Division by power of two where input reg = output reg\n actual = function.body[i : i + len(div_p2_pattern_2)]\n if not matches_pattern(actual, div_p2_pattern_2):\n return None\n bnez = typing.cast(Instruction, actual[0])\n div = create_div_p2(bnez, typing.cast(Instruction, actual[4]))\n return ([div], i + len(div_p2_pattern_2))\n\n def try_replace_div_2_s16(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(div_2_s16_pattern)]\n if not matches_pattern(actual, div_2_s16_pattern):\n return None\n sll1 = typing.cast(Instruction, actual[0])\n sra1 = typing.cast(Instruction, actual[1])\n sra = typing.cast(Instruction, actual[4])\n if sll1.args[2] != sra1.args[2]:\n return None\n div = Instruction.derived(\n \"div.fictive\", [sra.args[0], sra.args[0], AsmLiteral(2)], sra\n )\n return ([sll1, sra1, div], i + len(div_2_s16_pattern))\n\n def try_replace_div_2_s32(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(div_2_s32_pattern)]\n if not matches_pattern(actual, div_2_s32_pattern):\n return None\n addu = typing.cast(Instruction, actual[1])\n sra = typing.cast(Instruction, actual[2])\n div = Instruction.derived(\n \"div.fictive\", [sra.args[0], addu.args[1], AsmLiteral(2)], sra\n )\n return ([div], i + len(div_2_s32_pattern))\n\n def try_replace_mod_p2(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(mod_p2_pattern)]\n if not matches_pattern(actual, mod_p2_pattern):\n return None\n andi = typing.cast(Instruction, actual[1])\n val = (typing.cast(AsmLiteral, andi.args[2]).value & 0xFFFF) + 1\n mod = Instruction.derived(\n \"mod.fictive\", [andi.args[0], andi.args[1], AsmLiteral(val)], andi\n )\n return ([mod], i + len(mod_p2_pattern) - 1)\n\n def try_replace_utf_conv(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(utf_pattern)]\n if not matches_pattern(actual, utf_pattern):\n return None\n cvt_instr = typing.cast(Instruction, actual[1])\n new_instr = Instruction.derived(\"cvt.s.u.fictive\", cvt_instr.args, cvt_instr)\n return ([new_instr], i + len(utf_pattern) - 1)\n\n def try_replace_ftu_conv(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(ftu_pattern)]\n consumed = matches_pattern(actual, ftu_pattern)\n if not consumed:\n return None\n sub = next(\n x\n for x in actual\n if isinstance(x, Instruction) and x.mnemonic.startswith(\"sub\")\n )\n cfc = actual[0]\n assert isinstance(cfc, Instruction)\n fmt = sub.mnemonic.split(\".\")[-1]\n args = [cfc.args[0], sub.args[1]]\n if fmt == \"s\":\n new_instr = Instruction.derived(\"cvt.u.s.fictive\", args, cfc)\n else:\n new_instr = Instruction.derived(\"cvt.u.d.fictive\", args, cfc)\n return ([new_instr], i + consumed)\n\n def try_replace_mips1_double_load_store(\n i: int,\n ) -> Optional[Tuple[List[BodyPart], int]]:\n # TODO: sometimes the instructions aren't consecutive.\n actual = function.body[i : i + 2]\n if not matches_pattern(actual, lwc1_twice_pattern) and not matches_pattern(\n actual, swc1_twice_pattern\n ):\n return None\n a, b = actual\n assert isinstance(a, Instruction)\n assert isinstance(b, Instruction)\n ra, rb = a.args[0], b.args[0]\n ma, mb = a.args[1], b.args[1]\n # TODO: verify that the memory locations are consecutive as well (a bit\n # annoying with macros...)\n if not (\n isinstance(ra, Register)\n and ra.is_float()\n and ra.other_f64_reg() == rb\n and isinstance(ma, AsmAddressMode)\n and isinstance(mb, AsmAddressMode)\n and ma.rhs == mb.rhs\n ):\n return None\n num = int(ra.register_name[1:])\n if num % 2 == 1:\n ra, rb = rb, ra\n ma, mb = mb, ma\n # Store the even-numbered register (ra) into the low address (mb).\n new_args = [ra, mb]\n new_mn = \"ldc1\" if a.mnemonic == \"lwc1\" else \"sdc1\"\n new_instr = Instruction.derived(new_mn, new_args, a)\n return ([new_instr], i + 2)\n\n def try_replace_gcc_sqrt(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(gcc_sqrt_pattern)]\n consumed = matches_pattern(actual, gcc_sqrt_pattern)\n if not consumed:\n return None\n sqrt = actual[0]\n assert isinstance(sqrt, Instruction)\n new_instr = Instruction.derived(\"sqrt.s\", sqrt.args, sqrt)\n return ([new_instr], i + consumed)\n\n def no_replacement(i: int) -> Tuple[List[BodyPart], int]:\n return ([function.body[i]], i + 1)\n\n new_function = function.bodyless_copy()\n i = 0\n while i < len(function.body):\n repl, i = (\n try_replace_div(i)\n or try_replace_divu(i)\n or try_replace_div_p2_1(i)\n or try_replace_div_p2_2(i)\n or try_replace_div_2_s32(i)\n or try_replace_div_2_s16(i)\n or try_replace_mod_p2(i)\n or try_replace_utf_conv(i)\n or try_replace_ftu_conv(i)\n or try_replace_mips1_double_load_store(i)\n or try_replace_gcc_sqrt(i)\n or no_replacement(i)\n )\n new_function.body.extend(repl)\n return new_function" ]
[ "0.5923782", "0.5887709", "0.5868116", "0.5849082", "0.57891506", "0.5740547", "0.5644862", "0.56026363", "0.55648553", "0.55601627", "0.5549869", "0.5541171", "0.55295944", "0.5511487", "0.54896027", "0.54493785", "0.54365784", "0.5426857", "0.54144484", "0.5406572", "0.5356964", "0.53311", "0.5304831", "0.52707964", "0.5246385", "0.52327424", "0.51988864", "0.51988363", "0.51905686", "0.5173471", "0.516298", "0.5156337", "0.5150163", "0.5133679", "0.5121468", "0.5110389", "0.5093696", "0.50825584", "0.50772905", "0.5044616", "0.5043143", "0.5030807", "0.5016606", "0.49728847", "0.495431", "0.49528655", "0.49492797", "0.49448693", "0.49443206", "0.49328855", "0.49305263", "0.49259737", "0.49141198", "0.4904554", "0.49016815", "0.4900922", "0.48950982", "0.48887172", "0.4878189", "0.48624104", "0.4858052", "0.48499835", "0.48472568", "0.48355862", "0.48278782", "0.48208532", "0.48140287", "0.48113152", "0.48106802", "0.48095134", "0.4807161", "0.48033983", "0.4779783", "0.477941", "0.47793674", "0.4775008", "0.47709525", "0.47632742", "0.47515017", "0.47458604", "0.4745239", "0.47442156", "0.47326314", "0.47315595", "0.4727884", "0.47199708", "0.47130662", "0.46872237", "0.46826655", "0.46793783", "0.4677072", "0.46755517", "0.46726614", "0.46671936", "0.466441", "0.46535748", "0.4650635", "0.46398726", "0.46328807", "0.4631897" ]
0.6694643
0
Given a Unicode IPA segment, return a list of feature specificiations in canonical order.
def segment_to_vector(self, seg, normalize=True): return self.fts(seg, normalize).strings()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def sort_features_human_friendly_order(tokens, features):\n preferred_ordered_features = []\n\n # Short features last\n features = sorted(features, key=len, reverse=True)\n \n for token in tokens:\n # Iterate from last (shortest features first), and remove in-place*\n for feature in reversed(features):\n # Only add those that begins with current token\n if feature.startswith(token):\n preferred_ordered_features.append(feature)\n features.remove(feature)\n return preferred_ordered_features", "def to_phoible_fts(self,ipa_seg):\n ipa_seg = self.epitran_to_phoible(ipa_seg)\n if ipa_seg not in PhonologicalEmbedding.__to_phoible_feats_dict.keys():\n raise KeyError(\"The ipa segment \"+str(ipa_seg)+\" was not found in the phoible ipa-to-features dict. \"+\\\n \"We use phoible data to work with features, while we use epitran to generate transliterations. \"+\\\n \"Even though both stick to a strict standard, IPA in unicode, they sometimes have different representations \"+\\\n \"which can cause this error.\\n Consider writing an exception into PhonologicalEmbedding.__epitran_phoible_replacements.\")\n return np.array(PhonologicalEmbedding.__to_phoible_feats_dict[ipa_seg],dtype='float32')", "def extract(ref: str, hyp: str) -> list:\n ref_words = ref.split()\n hyp_words = hyp.split()\n ambiguities = []\n if len(ref_words) == len(hyp_words): # Equal amount of words means ambiguity(-ies) is within one word\n for rw, hw in zip(ref_words, hyp_words):\n if rw != hw:\n error = hw\n correction = rw\n # Remove common prefix\n while len(error) > 1 and len(correction) > 1 and error[0] == correction[0]:\n error = error[1:]\n correction = correction[1:]\n # Remove common suffix\n while len(error) > 1 and len(correction) > 1 and error[-1] == correction[-1]:\n error = error[:-1]\n correction = correction[:-1]\n # Store results\n ambiguities.append((error, correction))\n return ambiguities", "def _features_of(entry: _LexiconEntry) -> str:\n return entry[\"features\"]", "def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents", "def get_feature_names(self):\n\t\treturn np.array(['nouns', 'adjectives', 'verbs', 'adverbs'])", "def _featurize(self, predictions: SequenceSample) -> List[np.ndarray]:\n feature_vectors: List[np.ndarray] = []\n source = predictions.origin_words\n\n char_nn_scores = self.char_nn_lm_score(predictions.paths)\n word_nn_scores = self.word_nn_lm_score(predictions.paths)\n\n for i, (score, hypothesis) in enumerate(zip(predictions.scores, predictions.paths)):\n obss = list(zip(hypothesis, source))\n length = len(source)\n feature_vector = np.array([\n 1.,\n length,\n self.language_model.score(hypothesis) / length,\n char_nn_scores[i],\n word_nn_scores[i],\n score / length,\n sum(w in self.language_model for w in hypothesis) / length,\n sum(h[:self.prefix_size] == s[:self.prefix_size] for h, s in obss) / length,\n sum(h[-self.suffix_size:] == s[-self.prefix_size:] for h, s in obss) / length,\n self.language_model.score(hypothesis) * score / length,\n np.mean([editdistance.eval(h, s) for h, s in obss]),\n np.mean([float(obs in self.train_set_uniq) for obs in obss]),\n np.mean([self.train_counter.get(obs, self.discount) for obs in obss]),\n ])\n feature_vectors.append(feature_vector)\n return feature_vectors", "def get_verbs(self) -> Set[str]:", "def lexicon_features(tokens, feats):\n# feats['neg_words'] = 0;\n# feats['pos_words'] = 0;\n# tokens = list([token.lower() for token in tokens])\n# feats['neg_words'] , feats['pos_words'] = np.count_nonzero(np.in1d(tokens, list(neg_words))), np.count_nonzero(np.in1d(tokens, list(pos_words)))\n neg_count=0\n pos_count=0\n for i in tokens:\n if i.lower() in neg_words:\n neg_count+=1\n if i.lower() in pos_words:\n pos_count+=1\n feats[\"neg_words\"]=neg_count\n feats[\"pos_words\"]=pos_count", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def features_from_labels(audio_file, segments):\n segments_features = []\n #for each segment\n for segment in segments:\n features = features_from_label(audio_file, segment)\n #and append it to the list\n segments_features.append(features)\n return segments_features", "def find_features(sentence: str) -> Set[str]:\n sent_dict = set()\n sentence = _NLP(sentence)\n for token in sentence:\n # check if the word is an opinion word, then assign sentiment\n if token.text in _OPINION_WORDS:\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n if (token.dep_ == \"advmod\"):\n continue\n elif (token.dep_ == \"amod\"):\n sent_dict.add(token.head.text.lower())\n # for opinion words that are adjectives, adverbs, verbs...\n else:\n for child in token.children:\n # if verb, check if there's a direct object\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n sent_dict.add(child.text.lower())\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text == \"and\":\n conj=1\n if (conj == 1) and (subchild.text != \"and\"):\n subchildren.append(subchild.text)\n conj = 0\n for subchild in subchildren:\n sent_dict.add(subchild)\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text + \" \" + noun\n sent_dict.add(noun)\n return set(word.lower() for word in sent_dict)", "def constituents(self, pnp=False):\n a = []\n for word in self.words:\n if pnp and word.pnp is not None:\n if len(a) == 0 or a[-1] != word.pnp:\n a.append(word.pnp)\n elif word.chunk is not None:\n if len(a) == 0 or a[-1] != word.chunk:\n a.append(word.chunk)\n else:\n a.append(word)\n return a", "def domain_features(domain, path_set):\n return string_features_v2(domain_string(domain, path_set))", "def bipa(sequence):\n return [_token2clts(segment)[0] for segment in sequence]", "def pairwise_algorithm(segments):\n \n segment_pairs = [(x, y) for x in segments for y in segments if x[\"name\"] < y[\"name\"]]\n \n # key is a segment name, value is a set of those features that are contrastive\n # for that segment\n contrastive_features = defaultdict(set)\n \n for x, y in segment_pairs:\n assert x.keys() == y.keys()\n contrastive_feature = None\n for k, v in x.items():\n if k != \"name\" and v != y[k]:\n if contrastive_feature is None:\n contrastive_feature = k\n else:\n contrastive_feature = None\n break\n if contrastive_feature:\n contrastive_features[x[\"name\"]].add(contrastive_feature)\n contrastive_features[y[\"name\"]].add(contrastive_feature)\n \n return contrastive_features", "def features(self, sentence, tags, index):\n return{\n 'word': sentence[ index ],\n 'prevWord': '' if index == 0 else sentence[ index - 1 ],\n 'nextWord': '' if index == len( sentence ) -1 else sentence[ index + 1 ],\n 'isFirst': index == 0,\n 'isLast': index == len( sentence ) - 1,\n 'isCapitalized': sentence[index][0].upper() == sentence[ index ][ 0],\n 'isAllCaps': sentence[ index ].upper() == sentence[ index ],\n 'isAllLowers': sentence[ index ].lower() == sentence[ index ],\n 'prefix-1': sentence[ index ][ 0 ],\n 'prefix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][:2],\n 'prefix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][:3],\n 'prefix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][:4],\n 'suffix-1': sentence[ index ][ -1 ],\n 'suffix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][-2:],\n 'suffix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][-3:],\n 'suffix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][-4:],\n 'tag-1': '' if index == 0 else tags[ index - 1 ],\n 'tag-2': '' if index < 2 else tags[ index - 2 ]\n }", "def features(sequence, i):\n seq = sequence[i].split(\"\\t\")[1]\n\n # first position in the sentence\n if i == 0:\n yield \"first\"\n\n if i == len(sequence) - 1:\n yield \"last\"\n\n # word's length\n yield \"len=\" + get_word_len(seq)\n\n # first 4 letters\n yield \"first_four_letters=\" + seq[:4] if len(seq) > 4 else seq\n\n # last 3 letters\n yield \"last_three_letters=\" + seq[-3:] if len(seq) > 3 else seq\n\n # word shape\n yield \"word_shape=\" + str(get_word_shape(seq))\n yield \"short_word_shape=\" + get_short_word_shape(seq)\n yield \"digits_count=\" + str(digits_count(seq))\n\n # currency\n if currency_pattern.search(seq):\n yield \"currency\"\n\n if has_affixes(seq):\n yield \"starts_with_affixes\"\n\n # contains -'its'\n if 'its' in seq or re.search(r'\\w+(tel|nik)', seq, re.I):\n yield \"with_tel_its\"\n\n # contains letter + 'к' suffix\n if re.search(r'\\w+[bjlmnpstvz]k', seq, re.I):\n yield \"with_k_suffix\"\n\n # contains letter + 'в' suffix\n if re.search(r'\\w+(st|z|o)v', seq, re.I):\n yield \"with_v_suffix\"\n\n if re.search(r'\\w+[eio]k', seq, re.I):\n yield \"with_eiok_suffix\"\n\n if re.search(r'\\w+stn', seq, re.I):\n yield \"with_stn_suffix\"\n\n if re.search(r'\\w+[dk]r', seq, re.I):\n yield \"with_dr_suffix\"\n\n if re.search(r'\\w+(sh|jj)k', seq, re.I):\n yield \"with_shk_suffix\"\n\n if re.search(r'\\w+[ln]`k', seq, re.I):\n yield \"with_lnk_suffix\"\n\n if re.search(r'l[aeio]?$', seq, re.I):\n yield \"ends_with_l\"\n\n # contains 'нн'\n if 'nn' in seq:\n yield \"with_nn\"\n\n # contains 'чн', 'чк'\n if 'chk' in seq or 'chn' in seq or 'schn' in seq:\n yield \"with_chk\"\n\n # contains letter + 'н' suffix\n if re.search(r'\\w+[jlmrstvz]n', seq, re.I):\n yield \"with_n_suffix\"\n\n # contains suffixes 'ющ', 'ящ', 'ищ', 'вш'\n if re.search(r'\\w+((y[au]|i)s?ch|vsh)', seq, re.I) or seq.endswith('v'):\n yield \"with_part_sch_suffixes\"\n\n # ends with 'ся'\n if seq.endswith(\"sya\") or seq.endswith('s\\''):\n yield \"ends_with_sya\"\n\n if seq.endswith('j') and len(seq) > 1 and is_vowel(seq[-2]):\n yield \"ends_with_j\"\n\n if seq.endswith('t') and len(seq) > 1 and is_vowel(seq[-2]):\n yield \"ends_with_t\"\n\n if seq.endswith('\\''):\n yield \"ends_with_apo\"\n\n if i > 0:\n prev = sequence[i - 1].split(\"\\t\")[1]\n # previous word's length\n yield \"prev_len=\" + str(get_word_len(prev))\n\n if i > 0:\n prev = sequence[i - 1].split(\"\\t\")[1]\n # last letters of the previous word\n yield \"prev_last_letters=\" + (prev[-3:] if len(prev) > 3 else prev)\n\n if i > 0:\n prev = sequence[i - 1].split(\"\\t\")[1]\n yield \"prev_short_word_shape=\" + get_short_word_shape(prev)\n\n if i < len(sequence) - 1:\n next = sequence[i + 1].split(\"\\t\")[1]\n # next word's length\n yield \"next_len=\" + str(get_word_len(next))\n\n if i < len(sequence) - 1:\n next = sequence[i + 1].split(\"\\t\")[1]\n # last letters of the next word\n yield \"next_last_letters=\" + (next[-3:] if len(next) > 3 else next)\n\n if i < len(sequence) - 1:\n next = sequence[i + 1].split(\"\\t\")[1]\n yield \"next_short_word_shape=\" + get_short_word_shape(next)", "def ipa_segs(self, word, normalize=True):\n if normalize:\n word = FeatureTable.normalize(word)\n return self._segs(word, include_invalid=False, normalize=normalize)", "def natsorted_icase(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key_icase)", "def extract_features(data, stopwords=STOPWORDS):\n tags = set()\n docs = []\n for document in data:\n doc_data = dict()\n doc_data['pmid'] = document['sourceid']\n text = document['text']\n\n # Insert PubTator annotations inside abstract\n denotations = document['denotations']\n sorted_denotations = []\n for denotation in denotations:\n begin = denotation['span']['begin']\n end = denotation['span']['end']\n obj = denotation['obj']\n for c in punctuation:\n obj = obj.replace(c, '')\n tags.add(obj)\n doc_data[obj] = doc_data.get(obj,0)+1\n sorted_denotations.append([begin,end,obj])\n sorted_denotations.sort()\n sorted_denotations.reverse()\n for begin, end, obj in sorted_denotations:\n text = text[:begin] + obj + ' ' + text[end:]\n\n doc_data['text'] = clean_text(text, stopwords)\n docs.append(doc_data)\n\n return docs", "def extract_features(self, clip):\n #sr, clip_array = wav_read(io.BytesIO(clip.data))\n sr = 16000\n # clip_decoded = base64.decodestring(clip.data)\n # clip_array = np.frombuffer(clip_decoded, dtype=np.float16)\n clip_array = np.array(clip.data)\n if clip_array.ndim > 1:\n clip_array = clip_array[:, 0]\n segments = frame_breaker.get_frames(clip_array, sample_rate=sr)\n segments_encoded = [self.np2base64(s, sr) for s in segments]\n segment_features = [\n [f.feature_value for f in self.extract_feats_for_segment(s).features]\n for s in segments_encoded\n ]\n return segment_features", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def lexicon_features(tokens, feats):\n ###TODO\n\n # step 1 -> make lower-case\n # not getting why need to make lower case here -> doc-test need to check\n word_list = [x.lower() for x in tokens]\n \n \n nw = 0\n pw = 0\n \n # step 2 -> count pos/neg words\n for token in word_list:\n if token in neg_words: # returns True/False -> faster\n nw += 1\n if token in pos_words:\n pw += 1\n\n # step 3 -> add feature to feats\n feats.setdefault('neg_words',nw)\n feats.setdefault('pos_words',pw)\n \n pass", "def features(self, tokens, index, history):\n\t\t# print history\n\t\t# print tokens\n\n\t\t# Pad the sequence with placeholders\n\t\ttokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'), ('[END2]', '[END2]')]\n\t\thistory = ['[START2]', '[START1]'] + list(history)\n\n\t\t# shift the index with 2, to accommodate the padding\n\t\tindex += 2\n\n\t\tword, pos = tokens[index]\n\t\tprevword, prevpos = tokens[index - 1]\n\t\tprevprevword, prevprevpos = tokens[index - 2]\n\t\tnextword, nextpos = tokens[index + 1]\n\t\tnextnextword, nextnextpos = tokens[index + 2]\n\t\tpreviob = history[index - 1]\n\t\tcontains_dash = '-' in word\n\t\tcontains_dot = '.' in word\n\t\tallascii = all([True for c in word if c in string.ascii_lowercase])\n\n\t\tallcaps = word == word.capitalize()\n\t\tcapitalized = word[0] in string.ascii_uppercase\n\n\t\tprevallcaps = prevword == prevword.capitalize()\n\t\tprevcapitalized = prevword[0] in string.ascii_uppercase\n\n\t\tnextallcaps = prevword == prevword.capitalize()\n\t\tnextcapitalized = prevword[0] in string.ascii_uppercase\n\n\t\treturn [word, str(self.stemmer.stem(word)), str(pos), str(allascii), str(nextword), str(self.stemmer.stem(nextword)), str(nextpos), str(nextnextword), str(nextnextpos), str(prevword), str(self.stemmer.stem(prevword)), str(prevpos), str(prevprevword), str(prevprevpos), str(previob), str(contains_dash), str(contains_dot), str(allcaps), str(capitalized), str(prevallcaps), str(prevcapitalized), str(nextallcaps), str(nextcapitalized)]", "def order_fermionic_term(fermionic_term):\n new_terms = order_fermionic_ops(fermionic_term)\n ordered_terms = []\n for new_term in new_terms:\n ordered_term = order_qubits(new_term)\n if ordered_term:\n ordered_terms.append(ordered_term)\n return ordered_terms", "def create_feature_space(sentences):\n splits = [s.split() for s in sentences]\n types = set(reduce(lambda x, y: x + y, splits))\n lookup = dict()\n for i, word in enumerate(types):\n lookup[word] = i\n return lookup", "def addr2features(address):\n return [Parser.get_current_and_neighbor_features(i, address) for i in range(len(address))]", "def extract_feats(word, nlp):\n feat_dict = {}\n feat_string = ''\n doc = nlp(word).to_dict()[0][0]\n if 'feats' in doc:\n for pair in doc['feats'].split('|'):\n feat, val = pair.split('=')\n feat_dict[feat] = val\n feat_string += feat + ': ' + val + ', '\n if feat_string:\n feat_string = ' (' + feat_string[:-2] + ')'\n return feat_dict, feat_string", "def classify(self, mutation) -> Set[Category]:\n def normalise(string):\n \"\"\"Remove double spaces, make lower case. Just remove some weirdness\"\"\"\n return re.sub(' +', ' ', string).lower()\n return {cat for string, cat in self.mapping.items()\n if normalise(string) in normalise(mutation.description)}", "def features_extract(document, wordset):\n words_doc = nltk.FreqDist(document)\n features = []\n for word in wordset:\n features.append(words_doc[word])\n return features", "def vocabulary(self):\n lst = []\n for key in self.frequencies().keys():\n lst.append(key)\n return sorted(lst)\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # for word in wordslst:\n # if word not in lst:\n # lst.append(word.lower())\n #return sorted(lst)", "def features_from_label(audio_file, segment):\n duration = segment['end'] - segment['start']\n audio, sample_rate = librosa.core.load(\n audio_file,\n duration=duration,\n offset=segment['start']\n )\n features = fe.get_features(audio, sample_rate)\n return features", "def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l", "def get_graph_embedding_features(fn='taxi_all.txt'):\n ge = []\n with open(fn, 'r') as fin:\n fin.readline()\n for line in fin:\n ls = line.strip().split(\" \")\n ge.append([float(i) for i in ls])\n ge = np.array(ge)\n ge = ge[np.argsort(ge[:,0])]\n return ge[:,1:]", "def vocall_category_info(with_background=True):\n label_map = pascalvoc_label(with_background)\n label_map = sorted(label_map.items(), key=lambda x: x[1])\n cats = [l[0] for l in label_map]\n\n if with_background:\n cats.insert(0, 'background')\n\n clsid2catid = {i: i for i in range(len(cats))}\n catid2name = {i: name for i, name in enumerate(cats)}\n\n return clsid2catid, catid2name", "def _get_consonants(sequence: str) -> list:\n consonants = []\n for char in sequence:\n if char in CONSONANTS:\n consonants.append(char)\n return consonants", "def extract_features_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon, mode='training'):\n instances = []\n for sent in sentence_dicts:\n # print(sent)\n for key, value in sent.items():\n features = {}\n if isinstance(key, int):\n if not_known_cue_word(value[3].lower(), cue_lexicon, affixal_cue_lexicon):\n sent[key]['not-pred-cue'] = True\n continue\n\n features['token'] = value[3].lower()\n features['lemma'] = value[4].lower()\n features['pos'] = value[5]\n\n if key == 0:\n features['bw-bigram1'] = 'null'\n else:\n features['bw-bigram1'] = \"%s_*\" %sent[key-1][4].lower()\n if not (key+1) in sent:\n features['fw-bigram1'] = 'null'\n else:\n features['fw-bigram1'] = \"*_%s\" %sent[key+1][4].lower()\n \n affix = get_affix_cue(value[3].lower(), affixal_cue_lexicon)\n if affix != None:\n base = value[3].lower().replace(affix, \"\")\n features['char-5gram1'], features['char-5gram2'] = get_character_ngrams(base, affix, 5)\n features['char-4gram1'], features['char-4gram2'] = get_character_ngrams(base, affix, 4)\n features['char-3gram1'], features['char-3gram2'] = get_character_ngrams(base, affix, 3)\n features['char-2gram1'], features['char-2gram2'] = get_character_ngrams(base, affix, 2)\n features['char-1gram1'], features['char-1gram2'] = get_character_ngrams(base, affix, 1)\n features['affix'] = affix\n else:\n features['char-5gram1'], features['char-5gram2'] = 'null','null'\n features['char-4gram1'], features['char-4gram2'] = 'null','null'\n features['char-3gram1'], features['char-3gram2'] = 'null','null'\n features['char-2gram1'], features['char-2gram2'] = 'null','null'\n features['char-1gram1'], features['char-1gram2'] = 'null','null'\n features['affix'] = 'null'\n \n instances.append(features)\n if mode == 'training':\n labels = extract_labels_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon)\n return sentence_dicts, instances, labels\n return sentence_dicts, instances", "def pos_tag_features(passage: str):\n pos_tags = [\"CC\", \"CD\", \"DT\", \"EX\", \"FW\", \"IN\", \"JJ\", \"JJR\", \"JJS\", \"LS\", \"MD\", \n \"NN\", \"NNS\", \"NNP\", \"NNPS\", \"PDT\", \"POS\", \"PRP\", \"RB\", \"RBR\", \"RBS\", \"RP\", \"TO\", \"UH\",\n \"VB\", \"VBD\", \"VBG\", \"VBZ\", \"WDT\", \"WP\", \"WRB\"]\n \n tags = pos_tag(word_tokenize(passage))\n tag_list= list()\n \n for tag in pos_tags:\n tag_list.append(len([i[0] for i in tags if i[1] == tag]))\n \n return tag_list", "def feature_dict(sent, i):\n palabra=sent[i] #suponinedo que al menos tiene una palabra\n especiales= [\"á\",\"é\",\"í\",\"ó\",\"ú\", \"ü\"] #solo chequeo minusculas porque pregunto sobre el lower del string\n\n #sobre la anterior\n if i==0: #primera de la oracion\n alower=\"\"\n aistitle=False\n aisupper=False\n aisnumeric=False\n aisplural=False\n #aunder=False\n aislower=False\n aespecial=False\n else:\n alower = sent[i-1].lower()\n aistitle = sent[i-1].istitle()\n aisupper = sent[i-1].isupper()\n aisnumeric = sent[i-1].isnumeric()\n aisplural= (sent[i-1][-1:].lower() == 's')\n #aunder= (sent[i-1].find('_') >= 0)\n aislower = sent[i-1].islower()\n aespecial = (1 in [c in sent[i-1].lower() for c in especiales]),\n\n #sobre la proxima\n if i==len(sent)-1: #si es la ultima\n plower = \"\"\n pistitle = False\n pisupper = False\n pisnumeric = False\n pisplural= False\n #punder=False\n pislower = False\n pespecial = False\n else:\n plower = sent[i + 1].lower()\n pistitle = sent[i + 1].istitle()\n pisupper = sent[i + 1].isupper()\n pisnumeric = sent[i + 1].isnumeric()\n pisplural= (sent[i + 1][-1:].lower() == 's')\n #punder = (sent[i + 1].find('_') >= 0)\n pislower = sent[i + 1].islower()\n pespecial = (1 in [c in sent[i+1].lower() for c in especiales]),\n\n return {\n 'lower': palabra.lower(),\n 'istitle': palabra.istitle(),\n 'isupper': palabra.isupper(),\n 'isnumeric': palabra.isnumeric(),\n 'isplural': (palabra[-1:].lower() == 's'),\n #'under': (palabra.find('_') >= 0),\n 'islower': palabra.islower(),\n 'especial': (1 in [c in palabra.lower() for c in especiales]),\n 'alower': alower,\n 'aistitle': aistitle,\n 'aisupper': aisupper,\n 'aisnumeric': aisnumeric,\n 'aisplural': aisplural,\n #'aunder': aunder,\n 'aespecial': aespecial,\n 'aislower': aislower,\n 'plower': plower,\n 'pistitle': pistitle,\n 'pisupper': pisupper,\n 'pisnumeric': pisnumeric,\n 'pisplural': pisplural,\n #'punder': punder,\n 'pislower': pislower,\n 'pespecial': pespecial,\n }", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def sample_handling(sample, lexicon, classification):\n\n # We have a list of lists [.... [ [0, 2, 1, 0, 0, ..., 0] [1] ] , .... ] with the bag of words and the class\n featureset = []\n\n # Open the sample text and parse through the document and generate feastures.\n with open(sample, 'r') as f:\n contents = f.readlines()\n for l in contents[:hm_lines]:\n current_words = word_tokenize(l.lower())\n current_words = [lemmatizer.lemmatize(i) for i in current_words]\n features = np.zeros(len(lexicon))\n for word in current_words:\n if word.lower() in lexicon:\n index_value = lexicon.index(word.lower())\n features[index_value] = 1\n features = list(features)\n featureset.append([features, classification])\n\n return featureset", "def clean_feature(f):\n\n if f.startswith(\"a \"):\n f = f[2:]\n\n if f.startswith(\"an \"):\n f = f[3:]\n\n return str(f)", "def clts(sequence):\n return [_token2clts(segment)[1] for segment in sequence]", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing \n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n \n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features", "def word_fts(self, word, normalize=True):\n return [self.fts(ipa, False) for ipa in self.ipa_segs(word, normalize)]", "def token2features(sent, i, add_neighs=True):\n \n def add_lexicon_feats(tpl, lookupLexiconDict, usedTags):\n if tpl in lookupLexiconDict:\n for cls in lookupLexiconDict[tpl]:\n if cls not in usedTags:\n ftrs.append(cls) #<--------------------\n usedTags[cls]=1\n else:\n usedTags[cls]+=1\n \n \n ftrs = []\n # bias\n ftrs.append(\"BIAS\")\n # position features\n if i == 0:\n ftrs.append(\"SENT_BEGIN\")\n if i == len(sent)-1:\n ftrs.append(\"SENT_END\")\n\n # the word itself\n word = unicode(sent[i])\n ftrs.append(\"WORD=\" + word)\n word_lcase = word.lower()\n ftrs.append(\"LCASE=\" + word_lcase)\n # some features of the word\n if word.isalnum():\n ftrs.append(\"IS_ALNUM\")\n if word.isnumeric():\n ftrs.append(\"IS_NUMERIC\")\n if word.isdigit():\n ftrs.append(\"IS_DIGIT\")\n if word.isupper():\n ftrs.append(\"IS_UPPER\")\n if word.islower():\n ftrs.append(\"IS_LOWER\")\n\n # USE LEXICONS################################################## !\n maxTries=5\n usedTags = {}\n \n #look front up to 5 places \n if type(sent[0])== str: lSent = map(str.lower, sent)\n else: lSent = map(unicode.lower, sent)\n while(maxTries!=0):\n\n if len(lSent)-i>=maxTries:\n tpl = tuple(lSent[i:maxTries+i])\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n maxTries-=1\n \n #also look backwards: lexicons\n if i>=1:\n tpl = tuple(lSent[i-1:i+1]) # size 2\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n if i<len(lSent) : \n tpl = tuple(lSent[i-1:i+2]) # size 3\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n \n #analyze and add bias towards max used classification \n if usedTags:\n usedTags = list(usedTags.iteritems())\n maxused = max(usedTags, key=operator.itemgetter(1))\n minused = min(usedTags, key=operator.itemgetter(1)) \n if minused[1]!=maxused[1]:\n ftrs.append('BIAS='+maxused[0])\n \n\n #R ************************************************\n if len(word) > 15:\n ftrs.append(\"IS_LENGTHY\")\n if word[0].upper():\n ftrs.append(\"IS_FIRST_UPPER\")\n if word.__contains__(\"http\"):\n ftrs.append(\"IS_HYPERLINK\")\n if any(x.isupper() for x in word):\n ftrs.append(\"IS_MIXEDCASE\")\n if word.isupper():\n ftrs.append(\"ALL_UPPERCASE\")\n if word.__contains__(\"@\"):\n ftrs.append(\"IS_TAG\")\n if word.__contains__(\"#\"):\n ftrs.append(\"IS_HASHTAG\")\n if word in stop_words:\n ftrs.append(\"IS_STOPWORD\")\n if word in ['ing','ly','ed','ious','ies','ive','es','s','ment']:\n ftrs.append(\"CONTAINS_SUFFIX\")\n ftrs.append( nltk.pos_tag([word])[0][1] )\n\n # previous/next word feats\n if add_neighs:\n if i > 0:\n for pf in token2features(sent, i-1, add_neighs = False):\n ftrs.append(\"PREV_\" + pf)\n if i < len(sent)-1:\n for pf in token2features(sent, i+1, add_neighs = False):\n ftrs.append(\"NEXT_\" + pf)\n \n \n \n # return it!\n return ftrs", "def get_ambiguous_words(self, sort_on=None):\n multis = [word for word in self.word_tag_dict.keys() if len(self.word_tag_dict[word]) > 1]\n if not sort_on:\n multis.sort()\n return multis", "def get_vos(mappings):\n regex = re.compile(\"^/(\\w+)/\")\n patterns = (m.pattern for m in mappings)\n matches = filter(None, (regex.match(p) for p in patterns))\n vo_groups = set(m.group(1).lower() for m in matches)\n\n return vo_groups", "def findPascalStrings(self, addressSet: ghidra.program.model.address.AddressSetView, minimumStringLength: int, alignment: int, includePascalUnicode: bool) -> List[ghidra.program.util.string.FoundString]:\n ...", "def featurize(tokens, feature_fns):\n answer = []\n for func in feature_fns:\n feats = defaultdict(lambda: 0)\n func(tokens,feats)\n answer.extend(feats.items())\n return sorted(answer, key= lambda x: x[0])", "def single2hgvs(s):\n _validate_str(s)\n t = re.findall(\"[A-Z*]\\d+[A-Z*]\", s)\n return [\"p.{}{}{}\".format(AA_CODES[x[0]], x[1:-1], AA_CODES[x[-1]]) for x in t]", "def getFeatureNames(self):\n return [\"f100\", \"f103\", \"f104\"]", "def segment_spanish(input_text):\n processed_document = nlp(input_text)\n\n tokens = drop_punctuation_and_numbers([word for word in processed_document])\n\n unique_tokens = set(tokens)\n return list(unique_tokens)", "def transform_dssp_to_vector(dssp):\n res = []\n for s in dssp:\n if s.casefold() == 'h':\n res.append(0)\n elif s.casefold() == 'e':\n res.append(1)\n elif s.casefold() == '-' or s.casefold() == 'c':\n res.append(2)\n return res", "def compute_label_feature(text, token_to_idx):\n tokens = list(text.strip().lower())\n feats = [token_to_idx[token] for token in tokens]\n return feats", "def _get_feature_tables_for_protein(feature_table, accession) -> str:\n if not feature_table:\n return \"\"\n\n if accession not in feature_table:\n return \"\"\n\n ft_str = \"\"\n for key in feature_table[accession].keys():\n if key == \"VARIANT\":\n for ft_var in feature_table[accession][key]:\n if len(ft_var[0]) == 3: # CASE Replacement\n ft_str += (\n ('''\\nFT VARIANT {position}\\n''' +\n '''FT /note=\"{from_aa} -> {to_aa} (in GEN_BY_PG; {desc})\"\\n''' +\n '''FT /id=\"CUSTOM_{id}\"''').format(\n position=ft_var[0][2], from_aa=ft_var[0][0], to_aa=ft_var[0][1],\n desc=ft_var[1], id=ft_var[2]\n )\n )\n elif len(ft_var[0]) == 2: # CASE Replacement\n ft_str += (\n ('''\\nFT VARIANT {position}\\n''' +\n '''FT /note=\"Missing (in GEN_BY_PG; {desc})\"\\n''' +\n '''FT /id=\"CUSTOM_{id}\"''').format(\n position=ft_var[0][1],\n desc=ft_var[1], id=ft_var[2]\n )\n )\n\n return ft_str", "def get_feature_names():\n return ['UserID', 'SessionID', 'TaskName', 'Orientation', 'TapType'] + get_numerical_feature_names()", "def get_all_features(config: Config) -> typing.List[str]:\n return [feature.name for feature in config.features]", "def tactic_comps(cls) -> Set[str]:\n return set([\"mmic_autodock_vina\"])", "def sort_suggestions(\n suggestions: List[Tuple[Set[str], float]]\n) -> List[Tuple[Set[str], float]]:\n confidence_list = [suggestion[1] for suggestion in suggestions]\n sort_index = sorted(range(len(confidence_list)), key=lambda k: confidence_list[k])\n # Inverse the sort\n sort_index = sort_index[::-1]\n return [suggestions[i] for i in sort_index]", "def get_feature_extraction_headers(self, pose: str) -> List[str]:\n simba_dir = os.path.dirname(simba.__file__)\n feature_categories_csv_path = os.path.join(\n simba_dir, Paths.SIMBA_FEATURE_EXTRACTION_COL_NAMES_PATH.value\n )\n check_file_exist_and_readable(file_path=feature_categories_csv_path)\n bps = list(pd.read_csv(feature_categories_csv_path)[pose])\n return [x for x in bps if str(x) != \"nan\"]", "def get_feature_names(self):\n return [self.char]", "def find_abecedarian_words():\n pass", "def verbrogentwogs(prot, twogs, prot_in_twogs):\n vt_lijst = []\n for x in prot_in_twogs:\n for y in prot_in_twogs:\n if indexfind(\"%s %s\" % (x, y), twogs):\n vt_lijst.append(\"%s %s\" % (x, y))\n vt_lijst.append(\"%s %s\" % (x, prot))\n vt_lijst.append(\"%s %s\" % (y, prot))\n return vt_lijst", "def chunked_tags(train):\n cfdist = nltk.ConditionalFreqDist()\n for t in train:\n for word, tag, chtag in tree2conlltags(t):\n if chtag == \"O\":\n cfdist[tag].inc(False)\n else:\n cfdist[tag].inc(True)\n return [tag for tag in cfdist.conditions() if cfdist[tag].max() == True]", "def convert_xnli_examples_to_features(self):\n features = self.features\n lang_filtered_features = []\n for ex_index, example in enumerate(self.examples):\n language = example.guid.split('-')[1]\n if language in self.lang_list:\n lang_filtered_features.append(features[ex_index] + [language])\n return lang_filtered_features", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n target_indices = find_target_indices(tokens_a, tokens)\n if target_indices is None:\n target_indices = (1, 1 + len(tokens_a))\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n target_indices=target_indices))\n return features", "def extract_labels_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon):\n labels = []\n for sent in sentence_dicts:\n for key, value in sent.items():\n if isinstance(key, int):\n if not_known_cue_word(value[3].lower(), cue_lexicon, affixal_cue_lexicon):\n continue\n if any(cue_position == key for (cue, cue_position, cue_type) in sent['cues']) or any(mw_pos == key for (mw_cue, mw_pos) in sent['mw_cues']):\n labels.append(1)\n else:\n labels.append(-1)\n return labels", "def uCSIsAlphabeticPresentationForms(code):\n ret = libxml2mod.xmlUCSIsAlphabeticPresentationForms(code)\n return ret", "def protein_from_orfs(dna):\n rna = dna.replace(\"T\", \"U\")\n reverse_complement_rna = complement_strand(dna).replace(\"T\", \"U\")\n\n candidate_proteins = set()\n\n for strand in [rna, reverse_complement_rna]:\n for index in [m.start() for m in re.finditer('AUG', strand)]:\n codons_list = codons(strand[index:])\n protein = \"\"\n\n if any(rna_codon_dict[codon] == \"Stop\" for codon in codons_list):\n for codon in codons_list:\n symbol = rna_codon_dict[codon]\n\n if symbol != \"Stop\":\n protein += symbol\n else:\n candidate_proteins.add(protein)\n break\n\n return candidate_proteins", "def get_feature_names(self):\n ...", "def get_lexicon(seg_sents:List[List[str]]) -> Dict[str, int]:\n\tlexicon = {}\n\n\tfor sent in seg_sents:\n\t\tfor word in sent:\n\t\t\tlexicon[word] = lexicon.get(word, 0) + 1\n\t# print(lexicon)\n\treturn lexicon", "def common_words(self):\n order_centroids = self.model.cluster_centers_.argsort()[:, ::-1]\n clusters = self.model.labels_.tolist()\n vocab = self.vectorizer.vocabulary_\n return [ [vocab.keys()[vocab.values().index(i)] for i in\n order_centroids[cluster, :10]] for cluster in sorted(set(clusters))]", "def _transform_compound(self, compound):\n assert isinstance(compound, str), \"Input is not a string!\"\n cmpd_features = np.array(compound_short_descriptors(compound),\n dtype=np.float)\n cmpd_features = np.pad(cmpd_features, (0, 80-cmpd_features.shape[0]),\n mode='constant')\n cmpd_features = np.nan_to_num(cmpd_features)\n\n return cmpd_features", "def hgvs2single(s):\n _validate_str(s)\n t = re_protein.findall(s)\n return [\"{}{}{}\".format(AA_CODES[m[1]], m[2], AA_CODES[m[3]]) for m in t]", "def get_feature_set_SC(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = 0.0\n obj_score = 0.0\n nrof_subwords = 0\n nrof_objwords = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n sub_score = sub_score + sentimentvalues[word][0]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][1]>0:\n sub_score = sub_score + sentimentvalues[word][1]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][2]>0:\n obj_score = obj_score + sentimentvalues[word][2]\n nrof_objwords = nrof_objwords + 1\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n if nrof_subwords>0:\n additional_freq[\"subjective_words\"] = nrof_subwords*1.0\n if nrof_objwords>0:\n additional_freq[\"objective_words\"] = nrof_objwords*1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def preprocess(self, collection: List[str]) -> List[List[str]]:\n # remove bad characters\n col_clean = self.remove_bad_chars(collection)\n # make lower case if applicable\n if self.lowercase:\n col_l = self.lower_case_corpus(col_clean)\n else:\n col_l = col_clean\n if self.n_grams == 1:\n return [s.split() for s in col_l]\n else:\n corpus_tokenized = [s.split() for s in col_l]\n col_n_grams = list()\n for doc in corpus_tokenized:\n# print(f\"doc is: {doc}\")\n if self.n_grams > len(doc):\n raise ValueError(\"n-grams value to large, choose\\\n smaller value\")\n doc_n_grams = list()\n for i in range(len(doc)):\n end_idx = self.n_grams + i\n doc_tmp = \"\"\n if end_idx <= len(doc):\n# print(f\"token added is: {' '.join(doc[i:end_idx])}\")\n doc_tmp = \" \".join(doc[i:end_idx])\n doc_n_grams.append(doc_tmp)\n else:\n break\n col_n_grams.append(doc_n_grams)\n# print(f\"final: {col_n_grams}\")\n return col_n_grams", "def get_characters_mapping(X, f=None):\n f = f or (lambda x: x)\n \n vocab = {\n '<pad>': 0,\n '<unk>': 1,\n }\n for sentence in X:\n for word in sentence:\n for letter in f(word):\n if letter not in vocab:\n vocab[letter] = len(vocab)\n return vocab", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def find_charity_sentences(subdoc, factory) -> List:\n\n calculate_distances_per_pattern(subdoc, factory, merge=True, pattern_prefix='x_charity_')\n\n slices = []\n vectors = filter_values_by_key_prefix(subdoc.distances_per_pattern_dict, 'x_charity_')\n vectors_i = []\n for v in vectors:\n if max(v) > 0.6:\n vector_i, _ = improve_attention_vector(subdoc.embeddings, v, relu_th=0.6, mix=0.9)\n vectors_i.append(vector_i)\n else:\n vectors_i.append(v)\n\n x = max_exclusive_pattern(vectors_i)\n x = relu(x, 0.8)\n subdoc.distances_per_pattern_dict['$at_x_charity_'] = x\n\n dups = {}\n for i in np.nonzero(x)[0]:\n bounds = get_sentence_bounds_at_index(i, subdoc.tokens)\n\n if bounds[0] not in dups:\n sl = slice(bounds[0], bounds[1])\n sum_ = sum(x[sl])\n confidence = 'x'\n # confidence = np.mean( np.nonzero(x[sl]) )\n nonzeros_count = len(np.nonzero(x[sl])[0])\n print('nonzeros_count=', nonzeros_count)\n confidence = 0\n\n if nonzeros_count > 0:\n confidence = sum_ / nonzeros_count\n print('confidence=', confidence)\n if confidence > 0.8:\n # GLOBALS__['renderer'].render_color_text(subdoc.tokens_cc[sl],\n # subdoc.distances_per_pattern_dict['$at_x_charity_'][sl], _range=(0, 1))\n print(i, sum_)\n\n slices.append((sl, confidence, sum_))\n\n dups[bounds[0]] = True\n\n return slices", "def succ(self):\n return [ self.simple_reflection(i) for i in self.descents(positive=True) ]", "def format_used_features(model_dir):\n feature_keys = {\n \"indel_complexity\": \"ICP\",\n \"dissimilarity\": \"DSM\",\n \"indel_size\": \"ISZ\",\n \"repeat\": \"REP\",\n \"is_uniq_mapped\": \"UQM\",\n \"is_near_boundary\": \"NEB\",\n \"equivalence_exists\": \"EQX\",\n \"is_bidirectional\": \"BID\",\n \"is_multiallelic\": \"MTA\",\n \"is_inframe\": \"FRM\",\n \"is_splice\": \"SPL\",\n \"is_truncating\": \"TRN\",\n \"is_in_cdd\": \"CDD\",\n \"indel_location\": \"LOC\",\n \"is_nmd_insensitive\": \"NMD\",\n \"ipg\": \"IPG\",\n \"cds_length\": \"LEN\",\n \"lc\": \"LC\",\n \"local_lc\": \"LLC\",\n \"gc\": \"GC\",\n \"local_gc\": \"LGC\",\n \"strength\": \"SG\",\n \"local_strength\": \"LSG\",\n \"is_ins\": \"INS\",\n \"is_at_ins\": \"ATI\",\n \"is_at_del\": \"ATD\",\n \"is_gc_ins\": \"GCI\",\n \"is_gc_del\": \"GCD\",\n \"ref_count\": \"REFC\",\n \"alt_count\": \"ALTC\",\n \"is_on_db\": \"SNP\",\n }\n\n feature_dict = make_feature_dict(model_dir)\n\n features_used_for_sni = [\n feature_keys[f] for f in feature_dict[\"single_nucleotide_indels\"]\n ]\n features_used_for_mni = [\n feature_keys[f] for f in feature_dict[\"multi_nucleotide_indels\"]\n ]\n features_used_for_sni.sort()\n features_used_for_mni.sort()\n\n d = {}\n d[\"##features_used_for_single_nucleotide_indels\"] = \";\".join(features_used_for_sni)\n d[\"##features_used_for_multi_nucleotide_indels\"] = \";\".join(features_used_for_mni)\n\n return d", "def get_terms(document):\n q = get_mapped(document)\n tokens = tockenizer(q)\n terms = analizer(tokens)\n\n return terms", "def back_transliterate(self, romaji: str) -> List[Tuple[str, float]]:\n # It is convenient to match on COMBINING MACRON and COMBINING CIRCUMFLEX\n # separately from their vowels\n normalized = unicodedata.normalize('NFKD', romaji).lower()\n try:\n input_fst = _make_input_fst(normalized)\n except ValueError:\n return []\n result_fst = fst.compose(input_fst, self._transducer)\n interned_results = _all_valid_strings(result_fst)\n deinterned_results = [(_deintern_tokens(ts[0]), ts[1]) for ts in interned_results]\n return sorted(deinterned_results, key=lambda x: x[1])", "def _get_ifunction_categories_list(self):\n category_list = [\"FitFunctions\"]\n func_cats = self.create_mantid_ifunction(self.algorithm_name()).categories()\n for cat in func_cats:\n # double up the category separators so they are not treated as escape characters\n category_list.append(cat.replace(\"\\\\\", \"\\\\\\\\\"))\n\n return category_list", "def string_features_hex(hexstr):\n out = dict([(x,0) for x in hexabet])\n ct = dict(Counter(hexstr.split()))\n N = len(hexstr.split())\n for k in out.keys():\n if k in ct.keys():\n out[k] += ct[k]\n out = [v[1] for v in sorted(out.iteritems(), key=lambda (k,v): k)]\n out = [float(x)/N for x in out]\n return out", "def extract_cds(seq_record):\n return [f for f in seq_record.features if f.type == \"CDS\"]", "def lookup_in_taxonomy(results):\n from unidecode import unidecode\n\n base_url = \"http://taxonomy.projectchronos.eu/space/dbpediadocs/{}\"\n labels = []\n resource = None\n for res in results:\n res = unidecode(res)\n try:\n # print base_url.format(res)\n resource = retrieve_json(base_url.format(res))\n except Exception as e:\n print Exception('Cannot fetch taxonomy: ' + res.encode('ascii', 'replace') + ' ' + str(e))\n\n if resource and 'relatedConcepts' in resource.keys():\n for c in resource['relatedConcepts']:\n if c:\n label = c[c.rfind('/') + 1:].replace('+', ' ')\n # print 'Found! ' + label\n labels.append(str(label))\n return set(labels)", "def _get_input_features(\n self, hyp: str, ref: str, tags: List[int]\n ) -> Tuple[List[int], List[int], List[int], List[int], List[int], List[str], List[int]]:\n\n labels_mask = []\n labels = []\n if tags is None:\n hyp_tokens, token_start_indices = self._split_to_wordpieces(hyp.split())\n else:\n hyp_tokens, labels, token_start_indices = self._split_to_wordpieces_with_labels(hyp.split(), tags)\n references = ref.split(\";\")\n all_ref_tokens = []\n all_ref_segment_ids = []\n for i in range(len(references)):\n ref_tokens, _ = self._split_to_wordpieces(references[i].split())\n all_ref_tokens.extend(ref_tokens + [\"[SEP]\"])\n all_ref_segment_ids.extend([i + 1] * (len(ref_tokens) + 1))\n\n input_tokens = [\"[CLS]\"] + hyp_tokens + [\"[SEP]\"] + all_ref_tokens # ends with [SEP]\n input_ids = self._tokenizer.convert_tokens_to_ids(input_tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = [0] + [0] * len(hyp_tokens) + [0] + all_ref_segment_ids\n if len(input_ids) != len(segment_ids):\n raise ValueError(\n \"len(input_ids)=\"\n + str(len(input_ids))\n + \" is different from len(segment_ids)=\"\n + str(len(segment_ids))\n )\n\n if tags:\n labels_mask = [0] + [1] * len(labels) + [0] + [0] * len(all_ref_tokens)\n labels = [0] + labels + [0] + [0] * len(all_ref_tokens)\n return (input_ids, input_mask, segment_ids, labels_mask, labels, hyp_tokens, token_start_indices)", "def one_hot(seq):\n prot_seq = seq\n BASES = 'ARNDCQEGHILKMFPSTWYV'\n bases = np.array([base for base in BASES])\n feat = np.concatenate(\n [[(bases == base.upper()).astype(int)] if str(base).upper() in BASES else np.array([[-1] * len(BASES)]) for base\n in prot_seq])\n return feat", "def lookup_pronunciations_for_word(word: Text) -> Sequence[Word]:\n return EnglishUtils.all_possible_forms_for(word)", "def getConstantSentenceForms(self):", "def getFeatureNames(self):\n feature_names = super().getFeatureNames()\n feature_names.extend([\"f101\", \"f102\", \"f105\", \"fNum\", \"fCapStart\", \"fCapNoStart\"])\n return feature_names", "def order_vep_by_csq(annotation_list):\n for ann in annotation_list:\n ann['major_consequence'] = worst_csq_from_csq(ann['Consequence'])\n return sorted(annotation_list, key=(lambda ann:csq_order_dict[ann['major_consequence']]))", "def part_of_speech(text):\n temp = nltk.pos_tag(text)\n return [word for word, tag in temp if \n (tag == \"NN\") or \n (tag == \"NNS\") or\n (tag == \"NNP\") or \n (tag == \"NNPS\")]", "def cladistic(tree, taxa):\n tips = []\n taxa = set(taxa)\n for tip in tree.tips():\n if tip.name in taxa:\n tips.append(tip)\n n = len(taxa)\n if len(tips) < n:\n raise ValueError('Taxa not found in the tree.')\n return ('uni' if n == 1 else\n ('mono' if len(tree.lca(tips).subset()) == n else\n 'poly'))", "def A(document, row):\n vocabulary = set(flat(document))\n document = [set(s) for s in document]\n return [[row[f] if f in s and f in row else 0 for s in document] for f in vocabulary]" ]
[ "0.61101604", "0.6075151", "0.55478764", "0.54786366", "0.5440633", "0.5432097", "0.5407608", "0.53747076", "0.53084356", "0.5303873", "0.5279023", "0.5274519", "0.52741605", "0.5229276", "0.520493", "0.5192467", "0.5183173", "0.51240844", "0.5107907", "0.5059009", "0.5028215", "0.50221395", "0.5020031", "0.50125444", "0.5008814", "0.5005811", "0.5001931", "0.49993068", "0.49923363", "0.49905008", "0.4978592", "0.49756405", "0.4972768", "0.49594083", "0.49513105", "0.49463502", "0.49456316", "0.49431935", "0.48999938", "0.48924243", "0.48918903", "0.48894548", "0.48884186", "0.4866907", "0.48666856", "0.4856036", "0.48378727", "0.48310244", "0.48271778", "0.4827108", "0.48256955", "0.4813793", "0.48135838", "0.48133555", "0.48037964", "0.47991323", "0.47974962", "0.47890326", "0.47860897", "0.47813657", "0.4770163", "0.4750172", "0.47481397", "0.47404355", "0.47380692", "0.47348702", "0.4734643", "0.47301784", "0.47249252", "0.47238582", "0.4721283", "0.47024682", "0.46830985", "0.46782127", "0.46773362", "0.46739972", "0.46667776", "0.4666671", "0.46651864", "0.46509883", "0.46444947", "0.46365777", "0.4635739", "0.46355027", "0.46346423", "0.46346247", "0.46331966", "0.46298724", "0.46286598", "0.46275684", "0.46266496", "0.46243456", "0.46232882", "0.4622475", "0.46219105", "0.4621578", "0.46203664", "0.4618875", "0.46176752", "0.46150684" ]
0.5639854
2
Return a list of feature vectors, given a Unicode IPA word.
def word_to_vector_list(self, word, numeric=False, xsampa=False, normalize=True): if xsampa: word = self.xsampa.convert(word) segs = self.word_fts(word, normalize or xsampa) if numeric: tensor = [x.numeric() for x in segs] else: tensor = [x.strings() for x in segs] return tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec", "def _create_feature_vec():\n\tnum_tags = NGRAM_TUPLE[0]\n\tfvec = []\n\tfor _, size in FEATURE_TUPLE:\n\t\tfvec.append(np.zeros((num_tags, size)))\n\n\t# Append tag ngram weights to end\n\tfvec.append(np.zeros((num_tags, num_tags)))\n\treturn fvec", "def word_fts(self, word, normalize=True):\n return [self.fts(ipa, False) for ipa in self.ipa_segs(word, normalize)]", "def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec", "def word2vec(self, sentence: str):\n tokens = nltk.word_tokenize(sentence)\n v = [self.word_dict.get(token, 0) for token in tokens]\n return v", "def extract_feature_vectors(file, dict):\n f = open(file, 'r')\n num_lines = 0\n\n for line in f:\n if(line.strip()):\n num_lines = num_lines + 1\n\n f.close()\n\n feature_matrix = np.zeros([num_lines, len(dict)])\n\n f = open(file, 'r')\n pos = 0\n\n for line in f:\n if(line.strip()):\n flist = extract_words(line)\n for word in flist:\n if(word in dict):\n feature_matrix[pos, dict.index(word)] = 1\n pos = pos + 1\n\n f.close()\n\n return feature_matrix", "def create_feature_vector(ix, term_dict, bow):\n\n\ttfv = list()\n\t# get corpus length (n. of docs)\n\tnum_docs = ix.num_docs\n\tfor idx, tf in bow:\n\t\t# get term from dict index\n\t\tterm = ix[idx]\n\t\t# filter out terms not contained in self.term_dict\n\t\tif term not in term_dict:\n\t\t\tcontinue\n\t\t# filter out terms w/ length gt 20\n\t\tif len(term) > 20:\n\t\t\tcontinue\n\t\t# filter out non-alphabetical terms\n\t\tif not term.isalpha():\n\t\t\tcontinue\n\t\t# get document frequency \n\t\tdf = ix.dfs[idx]\n\t\t# compute ratio between df and num_docs\n\t\tratio = df / num_docs\n\t\tif ratio > 0.1: # skip term - requires tuning: check if it's okay to keep it as is\n\t\t\tcontinue\n\t\t# append term w/ tf to tfv\n\t\ttfv.append((term, tf))\n\treturn tfv", "def load_word_vectors(self, sentence_entry):\n word_vectors = []\n for token, lemma in zip(sentence_entry.tokens, sentence_entry.lemmas):\n # Go through the lookup chain. If one of these is found in the vsm,\n # return it, else use the fallback and report oov\n for s in [token, token.lower(), lemma, lemma.lower()]:\n if self.embeddings.contains_word(s):\n vector = self.embeddings.word_to_vec(s)\n self.statistics.known_token()\n break\n else:\n self.statistics.unknown_token()\n vector = self.embeddings.get_zero_fallback()\n\n word_vectors.append(vector)\n return word_vectors", "def get_vector(word, model):\n return model.wv[word]", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def txt2vec(self, text: str) -> List[int]:\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr", "def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def makeFeatureVec(words, model, num_features):\n\t# Initialize an empty numpy array (for speed) \n\tfeatureVec = np.zeros((num_features,), dtype=\"float32\")\n\t# Initialize a counter (number of words)\n\tnwords = 0.\n\t \n\t# Index2word is a list that contains the names of the words in the model's vocabulary. \n\tindex2word_set = set(model.index2word)\n\t# \n\t# Loop over each word in the review and, if it is in the model's vocaublary, add \n\t# its feature vector to the total \n\tfor word in words:\n\t\tif word in index2word_set:\n\t\t\tnwords = nwords + 1.\n\t\t\tfeatureVec = np.add(featureVec,model[word])\n\t# \n\t# Divide the result by the number of words to get the average \n\tfeatureVec = np.divide(featureVec,nwords)\n\treturn featureVec", "def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def _get_word2vec_features(x, word2vec, all_words_per_tweet, max_tweet_len):\n\n features = np.zeros((len(x), max_tweet_len, word2vec.vector_size))\n\n for i, tweet_words in enumerate(all_words_per_tweet):\n tweet_repr = np.array(\n [word2vec.wv[r] if r in word2vec.wv.vocab else np.zeros(word2vec.vector_size) for r in tweet_words])\n features[i][:len(tweet_repr), :word2vec.vector_size] = tweet_repr\n\n return features", "def vectorizer_features(self) -> list:\n if self._vectorizer:\n return self._vectorizer.get_feature_names()\n self.logger.warning('Uninitialized vector. Please call count_vectorizer first.')", "def get_word_vector(doc_id, corpus):\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n word_vec = np.zeros(len(inv_index))\n for count_vec, word in enumerate(inv_index):\n word_vec[count_vec] = inv_index[word].get(doc_id, {'frequency': 0})['frequency']\n return word_vec", "def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector", "def word_features(table):\n\tfeatures = numpy.zeros((len(table), 620), dtype='float32')\n\tkeys = table.keys()\n\tfor i in range(len(table)):\n\t\tf = table[keys[i]]\n\t\tfeatures[i] = f / norm(f)\n\treturn features", "def _feature_vec(xs, y):\n\tf = _create_feature_vec()\n\n\t# Iterate over rows in x, values of y, and update f.\n\tcount = y.shape[0]\n\tfor idx in range(count):\n\t\tword = xs[idx, :]\n\t\ttag = y[idx]\n\n\t\t# Defense!\n\t\tassert len(word) + 1 == len(f)\n\n\t\t# Iterate over feature values in word, increment the vector\n\t\tfor fidx, fvalue in enumerate(word):\n\t\t\tf[fidx][tag, fvalue] += 1\n\n\t\t# Update ngram matrix at the end of fvec. Must update edge potential\n\t\t# for previous AND next tag.\n\t\tif idx != 0:\n\t\t\tprev_tag = y[idx-1]\n\t\t\tf[-1][prev_tag, tag] += 1\n\t\tif idx != count - 1:\n\t\t\tnext_tag = y[idx+1]\n\t\t\tf[-1][tag, next_tag] += 1\n\n\treturn f", "def load_vectors(fname):\r\n # taken from: https://fasttext.cc/docs/en/english-vectors.html\r\n vectors_data = vocab.Vectors(name=fname)\r\n\r\n return vectors_data", "def _update_feature_vec(fvec, word, tag_ngram):", "def features_extract(document, wordset):\n words_doc = nltk.FreqDist(document)\n features = []\n for word in wordset:\n features.append(words_doc[word])\n return features", "def lexicon_features(tokens, feats):\n# feats['neg_words'] = 0;\n# feats['pos_words'] = 0;\n# tokens = list([token.lower() for token in tokens])\n# feats['neg_words'] , feats['pos_words'] = np.count_nonzero(np.in1d(tokens, list(neg_words))), np.count_nonzero(np.in1d(tokens, list(pos_words)))\n neg_count=0\n pos_count=0\n for i in tokens:\n if i.lower() in neg_words:\n neg_count+=1\n if i.lower() in pos_words:\n pos_count+=1\n feats[\"neg_words\"]=neg_count\n feats[\"pos_words\"]=pos_count", "def word2vec(self, words):\n with torch.no_grad():\n words = torch.LongTensor(self.doc2token(words))\n result = self.model.embedding(words).numpy()\n return result", "def preprocess_words(word: str) -> List[str]:\n return [change_latin(w) for w in word.lower().translate(SUB_MAP).split()] if word else []", "def get_features(words, vectors):\n result = [vectors.loc[word].values for word in words if word in df_keys.values.reshape(-1)]\n if result:\n return np.stack(result)\n return None", "def load_vector_dictionary():\n return read_word2vecs_from_file(VECTOR_FILE)", "def v(self, word):\n return self._vecs[self._index[word]]", "def load_word_vector_for_fee(self, fee):\n\n for s in [fee, fee.lower()]:\n if self.embeddings.contains_word(s):\n self.statistics.known_fee()\n return self.embeddings.word_to_vec(s)\n else:\n self.statistics.unknown_fee()\n return self.embeddings.get_random_fallback(fee)", "def extract_vector(inst, neighbor_word_list, _4c_4d_feature, language):\n if language.__eq__(\"English\"):\n sentence = inst.getElementsByTagName('context')[0]\n else:\n sentence = inst.getElementsByTagName('context')[0].getElementsByTagName('target')[0]\n\n x = []\n neighbors = {}\n left_list, right_list = get_left_right_lists(sentence, language)\n\n for word in left_list[-k:]:\n count = neighbors.get(word, 0)\n neighbors[word] = count + 1\n for word in right_list[:k]:\n count = neighbors.get(word, 0)\n neighbors[word] = count + 1\n\n for i in xrange(neighbor_word_list.__len__()):\n n = neighbors.get(neighbor_word_list[i], 0)\n if vector_0_1 and n > 0:\n n = 1\n x.append(n)\n\n for i in xrange(_4c_4d_feature.__len__()):\n n = neighbors.get(_4c_4d_feature[i], 0)\n if vector_0_1 and n > 0:\n n = 1\n x.append(n)\n return x", "def getVector(text):\n url = cfg.use_vectoriser\n res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})\n res_dictionary = res.json()\n return res_dictionary['vectors']", "def load_glove_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n for i,line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n if word in vocab:\n word_vecs[word] = np.array(L[1:], dtype='float32')\n return word_vecs", "def get_feature_names(self):\n\t\treturn np.array(['nouns', 'adjectives', 'verbs', 'adverbs'])", "def lexicon_features(tokens, feats):\n ###TODO\n\n # step 1 -> make lower-case\n # not getting why need to make lower case here -> doc-test need to check\n word_list = [x.lower() for x in tokens]\n \n \n nw = 0\n pw = 0\n \n # step 2 -> count pos/neg words\n for token in word_list:\n if token in neg_words: # returns True/False -> faster\n nw += 1\n if token in pos_words:\n pw += 1\n\n # step 3 -> add feature to feats\n feats.setdefault('neg_words',nw)\n feats.setdefault('pos_words',pw)\n \n pass", "def get_vocabulary(X, f):\n \n f = f or (lambda x: x)\n \n vocab = set(['<unk>', '<num>'])\n for i, word in enumerate(itertools.chain(*X)):\n word_ = f(word)\n vocab.add(word_)\n\n return vocab", "def extract_features(sentence, vocabulary):\n n_tokens = len(sentence)\n n_features = n_feature_functions + len(vocabulary)\n X = sp.lil_matrix((n_tokens, n_features), dtype=bool)\n\n for i in xrange(n_tokens):\n for j, f in enumerate(FEATURE_FUNCTIONS):\n X[i, j] = f(sentence, i)\n\n # Vocabulary feature\n try:\n X[i, n_feature_functions + vocabulary[sentence[i][0].lower()]] = 1\n except KeyError:\n pass\n\n return X", "def get_vector(self, word):\n\n if word in self.glove.stoi:\n return self.glove.vectors[self.glove.stoi[word]]\n else:\n return None", "def vectorize_collection(feature_space, collection_path):\n sentences = load_collection_sentences(collection_path, __fape_files_to_load)\n # concatenate all the string lists\n sentences = reduce(lambda x,y: x[0]+y[0], sentences)\n return zip(sentences, map(vectorize, [feature_space]*len(sentences),\\\n sentences))", "def processFeature(prevWord, word, vector):\n \n # We add feature whether it exists or not\n unigram, exists = vector.getUnigram(prevWord)\n if not exists:\n vector.addUnigram(prevWord)\n \n \n bigram, exists = vector.getBigram(prevWord, word)\n if not exists:\n vector.addBigram(prevWord, word)", "def load_word_vectors(root, wv_type, dim):\n if isinstance(dim, int):\n dim = str(dim) + 'd'\n fname = os.path.join(root, wv_type + '.' + dim)\n if os.path.isfile(fname + '.pt'):\n fname_pt = fname + '.pt'\n print('loading word vectors from', fname_pt)\n return torch.load(fname_pt)\n if os.path.isfile(fname + '.txt'):\n fname_txt = fname + '.txt'\n print('loading word vectors from', fname_txt)\n cm = open(fname_txt, 'rb')\n elif os.path.basename(wv_type) in URL:\n url = URL[wv_type]\n print('downloading word vectors from {}'.format(url))\n r = requests.get(url, stream=True)\n with zipfile.ZipFile(six.BytesIO(r.content)) as zf:\n print('extracting word vectors into {}'.format(root))\n zf.extractall(root)\n return load_word_vectors(root, wv_type, dim)\n else:\n print('Unable to load word vectors.')\n\n wv_tokens, wv_arr, wv_size = [], array.array('d'), None\n with cm as f:\n for line in f:\n entries = line.strip().split(b' ')\n word, entries = entries[0], entries[1:]\n if wv_size is None:\n wv_size = len(entries)\n try:\n word = word.decode()\n except:\n print('non-UTF8 token', repr(word), 'ignored')\n continue\n wv_arr.extend(float(x) for x in entries)\n wv_tokens.append(word)\n\n wv_dict = {word: i for i, word in enumerate(wv_tokens)}\n wv_arr = torch.Tensor(wv_arr).view(-1, wv_size)\n ret = (wv_dict, wv_arr, wv_size)\n torch.save(ret, fname + '.pt')\n return ret", "def create_vector(string):\n vec = {}\n words = string.split()\n\n for word in words:\n if len(word) <= NGRAM_SIZE:\n add(vec, word)\n else:\n for i in range(len(word) - NGRAM_SIZE + 1):\n add(vec, word[i : i + NGRAM_SIZE])\n\n return vec", "def feature_vector(features, vector):\n clean_features = set(features)\n new_features_vector = featurize(vector,clean_features)\n return new_features_vector", "def get_features(self, words):\n word_indices = []\n word_char_indices = []\n for word in words:\n if word in self.w2i:\n word_indices.append(self.w2i[word])\n else:\n word_indices.append(self.w2i[\"_UNK\"])\n\n if self.c_in_dim > 0:\n chars_of_word = [self.c2i[\"<w>\"]]\n for char in word:\n if char in self.c2i:\n chars_of_word.append(self.c2i[char])\n else:\n chars_of_word.append(self.c2i[\"_UNK\"])\n chars_of_word.append(self.c2i[\"</w>\"])\n word_char_indices.append(chars_of_word)\n return word_indices, word_char_indices", "def _featurize(self, predictions: SequenceSample) -> List[np.ndarray]:\n feature_vectors: List[np.ndarray] = []\n source = predictions.origin_words\n\n char_nn_scores = self.char_nn_lm_score(predictions.paths)\n word_nn_scores = self.word_nn_lm_score(predictions.paths)\n\n for i, (score, hypothesis) in enumerate(zip(predictions.scores, predictions.paths)):\n obss = list(zip(hypothesis, source))\n length = len(source)\n feature_vector = np.array([\n 1.,\n length,\n self.language_model.score(hypothesis) / length,\n char_nn_scores[i],\n word_nn_scores[i],\n score / length,\n sum(w in self.language_model for w in hypothesis) / length,\n sum(h[:self.prefix_size] == s[:self.prefix_size] for h, s in obss) / length,\n sum(h[-self.suffix_size:] == s[-self.prefix_size:] for h, s in obss) / length,\n self.language_model.score(hypothesis) * score / length,\n np.mean([editdistance.eval(h, s) for h, s in obss]),\n np.mean([float(obs in self.train_set_uniq) for obs in obss]),\n np.mean([self.train_counter.get(obs, self.discount) for obs in obss]),\n ])\n feature_vectors.append(feature_vector)\n return feature_vectors", "def get_feature_vector(self, board):\n return self.hot_one(board)\n # return self.get_tesauro_feature_vector(self, board)", "def vectorize_word(word):\n if word in Tfidf_vect.vocabulary_:\n i = Tfidf_vect.vocabulary_[word]\n return Tfidf_vect.idf_[i]\n else:\n return 0", "def segment_to_vector(self, seg, normalize=True):\n return self.fts(seg, normalize).strings()", "def word_vecs(self, raw_label=False):\n utterances, labels = self.read_json()\n # print(utterances)\n # print(self.label_dict)\n utterances = [self.word2vec(u) for u in utterances]\n if raw_label:\n labels = labels\n else:\n labels = [self.label_dict[l] for l in labels]\n\n return utterances, labels", "def vectorize_text(text):\n\n def remove_punctuation(text):\n \"\"\"Removes special characters from text.\"\"\"\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)\n\n def remove_common_words(text_vector):\n \"\"\"Removes 50 most common words in the uk english.\n\n source: http://www.bckelk.ukfsn.org/words/uk1000n.html\n\n \"\"\"\n common_words = set(['the', 'and', 'to', 'of', 'a', 'I', 'in', 'was',\n 'he', 'that', 'it', 'his', 'her', 'you', 'as', 'had', 'with',\n 'for', 'she', 'not', 'at', 'but', 'be', 'my', 'on', 'have', 'him',\n 'is', 'said', 'me', 'which', 'by', 'so', 'this', 'all', 'from',\n 'they', 'no', 'were', 'if', 'would', 'or', 'when', 'what', 'there',\n 'been', 'one', 'could', 'very', 'an', 'who'])\n return [word for word in text_vector if word not in common_words]\n\n text = text.lower()\n text = remove_punctuation(text)\n words_list = text.split()\n words_list = remove_common_words(words_list)\n\n return words_list", "def to_phoible_fts(self,ipa_seg):\n ipa_seg = self.epitran_to_phoible(ipa_seg)\n if ipa_seg not in PhonologicalEmbedding.__to_phoible_feats_dict.keys():\n raise KeyError(\"The ipa segment \"+str(ipa_seg)+\" was not found in the phoible ipa-to-features dict. \"+\\\n \"We use phoible data to work with features, while we use epitran to generate transliterations. \"+\\\n \"Even though both stick to a strict standard, IPA in unicode, they sometimes have different representations \"+\\\n \"which can cause this error.\\n Consider writing an exception into PhonologicalEmbedding.__epitran_phoible_replacements.\")\n return np.array(PhonologicalEmbedding.__to_phoible_feats_dict[ipa_seg],dtype='float32')", "def extract_word2vec(fichier, words_indices): \n \n word2vec={} #\n \n #\n with open(fichier,\"r\",encoding=\"utf-8\") as file:\n for line in file:\n line = line.replace(\" \\n\",\"\").split(\" \")\n # Lecture des informations du fichier\n # nombre de mots presents et nombre de features\n if len(line)==2 :\n nb_words=int(line[0])\n nb_feats=int(line[1])\n \n #\n else:\n if line[0] in words_indices:\n word, vec = line[0],np.array(line[1:])\n word2vec[word]=vec\n\n print(\"{} embbedings de taille {} pertinent parmi les {} du fichier\".format(len(word2vec), nb_feats, nb_words))\n\n return word2vec, nb_feats", "def word2features(sent, i):\n features = []\n\n # the [-1,+1] window of words around the token\n for o in [-1,0,1]:\n if i+o >= 0 and i+o < len(sent):\n word_tuple = sent[i+o]\n word_window = get_words_in_window(word_tuple, o)\n features.extend(word_window)\n\n # # part of speech\n # pos = ('pos', sent[i][1])\n # features.append(pos)\n\n # prop = ('prop', is_proper_case(sent[i][0]))\n # features.append(prop)\n\n return dict(features)", "def load_glove_vec(fname):\n word_vecs = {}\n length = 0\n with open(fname, \"rb\") as f:\n for i, line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n word_vecs[word] = np.array(L[1:], dtype='float32')\n if length == 0:\n length = len(word_vecs[word])\n return word_vecs, length", "def vectorize(vector_space, sentence):\n vector = [0] * len(vector_space)\n for word in sentence[0].split():\n vector[vector_space[word]] = 1\n return vector", "def vectorize(self,clean_path):\n \n #load pretrained embedding model (GloVe)\n glove = spacy.load('en_core_web_lg')\n #extract unique words (aka vocabulary)\n unique_words = set()\n for d in self.docs: \n txt = d.text\n doc = glove(txt)\n for word in doc: \n if word.has_vector:\n unique_words.add(word.text)\n #change set to list type\n unique_words = list(unique_words)\n #save vector representation\n word_vectors = np.array([glove(word).vector for word in unique_words if glove(word).has_vector])\n #index vectors by corresponding word \n corpus_vectors = pd.DataFrame(word_vectors, index=unique_words)\n with open(clean_path + 'corpus_vectors.pkl', 'wb') as f:\n pickle.dump(corpus_vectors,f)\n self.vectors = corpus_vectors\n print('Saved embedding vectors.')\n return", "def text_to_vecs(self):\n # convert word strings into word vectors\n sent_vec = []\n for w in self.sentence:\n if w in self.word_vectors.getVocab():\n sent_vec.append( self.word_vectors.getWordVectors()[w] )\n else:\n sent_vec.append( self.word_vectors.getOOVWordVector() )\n \n assert(len(self.sentence) == len(sent_vec)) \n self.sent_vec = sent_vec", "def get_tesauro_feature_vector(self, board):\n main_board = board[1:25]\n jail1, jail2, off1, off2 = board[25], board[26], board[27], board[28]\n features = np.array([])\n\n # naum i feature vector af adalsvaedinu\n for position in main_board:\n vector = np.zeros(4)\n sign = -1 if position < 0 else 1\n for i in range(int(abs(position))):\n if i > 3:\n vector[3] = sign * (abs(position) - 3) / 2\n break\n vector[i] = position/abs(position)\n features = np.append(features, vector)\n\n # jail feature-ar\n jail_features = np.array([jail1, jail2]) * 0.5\n\n # features fyrir hversu margir eru borne off\n off_board_features = np.array([off1, off2]) * (0.066667)\n bias_vector = np.array([1, 1])\n features = np.append(features, [jail_features, off_board_features, bias_vector])\n features = torch.from_numpy(features).float()\n features.requires_grad = True\n return features", "def create_vectors(list_dict, num_words):\n x = [] # list that will hold data \n\n for d in list_dict:\n # initializing numpy vector\n # it contains 5,000 (number of words) zeros\n temp = np.zeros(num_words, dtype=np.float64)\n for key, val in d.items():\n if key < num_words:\n key -= 1 # indexing in data starts at 1\n temp[key] = 1 # adding word and its frequency to vector \n # temp[key] = val\n x.append(temp) # appends vector to x \n\n return x", "def embed(self, sequence):\n words = sequence.split(' ')\n vecs = [self._E[self._w2i[i]] if i in self._w2i else self._E[self._w2i[\"UNK\"]]\n for i in words]\n return vecs", "def word_forms(self, word):\n result = set()\n for dic_name in self.dictionaries.keys():\n for vector in self.dictionaries[dic_name].word_forms(word):\n result.add(tuple(vector))\n return filter(lambda x: len(x), result)", "def create_feature_map(string, features):\n fmap = {}\n vec = create_vector(string)\n\n for ngram in features:\n if ngram in vec:\n fmap[ngram] = vec[ngram]\n\n return fmap", "def toFeatureVector(tokens,index=None):\n\t# Should return a dictionary containing features as keys, and weights as values\n\tadict = {}\n\ttokens = [w for w in tokens if w not in stopwords]\n\t# Q4 Limiting the token list to average/median of all the tokens per reviews\n\tfor i in tokens[:mean_token]: \n\t\tadict[i] = featureDict[i]\n\tif index is not None:\n\t\tfor i in rawData:\n\t\t\tif i[0] == index:\n\t\t\t\tadict['raiting'] = float(int(i[2]) - 0)/5\n\t\t\t\tadict['verPur'] = 1 if i[3] == 'Y' else 0\n\t\t\t\tadict['avgWordLen'] = sum(len(w) for w in i[1].split())/len(i[1])\n\t\t\t\tadict['stopwords'] = len([w for w in i[1].split() if w in stopwords])\n\t\t\t\t# adict['speacialChar'] = len(re.findall(r'[^A-Z0-9a-z ]+',i[1])) # performace metrics decreases\n\t\t\t\tadict['digits'] = len(re.findall(r'[0-9]+',i[1]))\n\treturn adict", "def get_feature_vec(board, players):\n token_dict = {i: PROBABILITIES[i]*36 for i in PROBABILITIES}\n res_types = [ResourceType.FOREST, ResourceType.ORE, ResourceType.BRICK, ResourceType.SHEEP, ResourceType.WHEAT]\n hexes = board.hexes()\n feature_data = np.zeros(24)\n feature_data[:4] = [player.vp() for player in players]\n for i, player in enumerate(players):\n for node in player.settlement_nodes():\n tiles = board.get_adj_tile_ids_to_node(node)\n for tile in tiles:\n hexnode = hexes[tile]\n if hexnode.resource() in res_types:\n feature_data[i*5 + res_types.index(hexnode.resource())] = token_dict[hexnode.token()]\n return feature_data", "def to_vectors_whd(glyphs: List[Glyph]):\n whd_vector_labels = [to_vector_whd(raw_char) for raw_char in glyphs]\n return [vector[0] for vector in whd_vector_labels], [whd_vector_label[1] for whd_vector_label in whd_vector_labels]", "def _featurize_py_func(text):\n label = np.array(text[-1], dtype=np.int32)\n words = word_tokenize(text[:-2])\n chars = np.zeros([max_sentence_length, max_word_length], dtype=np.int32)\n for i, word in enumerate(words):\n ids = [char_to_int.get(char, -1) for char in word]\n chars[i,:len(ids)] = ids\n return chars", "def get_word_list_features(word_list, word_features):\n document = ' '.join(word_list)\n words = word_tokenize(document)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features", "def get_vect(word, model, method):\n if method == \"model\":\n try:\n return model.wv[word]\n except KeyError:\n return None\n else:\n try:\n return model[word]\n except KeyError:\n return None", "def load_imagined_vectors(self, sentence_entry):\n imagined_vectors = []\n for token, lemma in zip(sentence_entry.tokens, sentence_entry.lemmas):\n # Go through the lookup chain. If one of these is found in the vsm,\n # return it, else use the fallback and report oov\n for s in [token, token.lower(), lemma, lemma.lower()]:\n if self.imagined_embeddings.contains_word(s):\n vector = self.imagined_embeddings.word_to_vec(s)\n break\n else:\n vector = self.imagined_embeddings.get_zero_fallback()\n\n imagined_vectors.append(vector)\n return imagined_vectors", "def get_text_features(text, word_features):\n words = word_tokenize(text)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features", "def get_features(docs, max_length):\n docs = list(docs)\n Xs = numpy.zeros((len(docs), max_length), dtype='int32')\n for i, doc in enumerate(docs):\n j = 0\n for token in doc:\n vector_id = token.vocab.vectors.find(key=token.orth)\n if vector_id >= 0:\n Xs[i, j] = vector_id\n else:\n Xs[i, j] = 0\n j += 1\n if j >= max_length:\n break\n return Xs", "def compute_label_feature(text, token_to_idx):\n tokens = list(text.strip().lower())\n feats = [token_to_idx[token] for token in tokens]\n return feats", "def featurize(self, data):\n \n bag_of_words = []\n\n tokens = data.split()\n\n for i in tokens:\n bag_of_words.append((i, True))\n\n return bag_of_words", "def vectorize_text(corpus):\n bag_of_words_model = CountVectorizer()\n\n # performs the above described three tasks on the given data corpus.\n dense_vec_matrix = bag_of_words_model.fit_transform(corpus).todense()\n bag_of_word_df = pd.DataFrame(dense_vec_matrix)\n bag_of_word_df.columns = sorted(bag_of_words_model.vocabulary_)\n return bag_of_word_df", "def feature_values(words, word_features):\r\n freq = nltk.FreqDist(words)\r\n values = []\r\n for wf in word_features:\r\n if wf in freq:\r\n values.append(freq[wf])\r\n else:\r\n values.append(0)\r\n return values", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def _get_vowels(sequence: str) -> list:\n vowels = []\n for char in sequence:\n if char in VOWELS:\n vowels.append(char)\n return vowels", "def get_vocabulary(documents):\n cv_model = CountVectorizer(binary=True)\n cv_model.fit(documents)\n\n vocabulary = cv_model.get_feature_names()\n vocabulary = list(map(str, vocabulary))\n\n return vocabulary", "def gen_review_vecs(reviews, model, num_features):\n\n curr_index = 0\n review_feature_vecs = np.zeros((len(reviews), num_features), dtype=\"float32\")\n\n # index2word is a list consisting of all words in the vocabulary\n # Convert list to set for speed\n index2word_set = set(model.wv.index2word)\n for review in reviews:\n\n #if curr_index%1000 == 0.:\n # print \"Vectorizing review %d of %d\" % (curr_index, len(reviews))\n \n review_feature_vecs[curr_index] = review_to_vec(review, model, num_features , index2word_set)\n curr_index += 1\n \n return review_feature_vecs", "def doc2features(self,sent):\n return [self.word2features(sent['tokens'], i) for i in range(len(sent['tokens']))]", "def token2features(sent, i, add_neighs=True):\n \n def add_lexicon_feats(tpl, lookupLexiconDict, usedTags):\n if tpl in lookupLexiconDict:\n for cls in lookupLexiconDict[tpl]:\n if cls not in usedTags:\n ftrs.append(cls) #<--------------------\n usedTags[cls]=1\n else:\n usedTags[cls]+=1\n \n \n ftrs = []\n # bias\n ftrs.append(\"BIAS\")\n # position features\n if i == 0:\n ftrs.append(\"SENT_BEGIN\")\n if i == len(sent)-1:\n ftrs.append(\"SENT_END\")\n\n # the word itself\n word = unicode(sent[i])\n ftrs.append(\"WORD=\" + word)\n word_lcase = word.lower()\n ftrs.append(\"LCASE=\" + word_lcase)\n # some features of the word\n if word.isalnum():\n ftrs.append(\"IS_ALNUM\")\n if word.isnumeric():\n ftrs.append(\"IS_NUMERIC\")\n if word.isdigit():\n ftrs.append(\"IS_DIGIT\")\n if word.isupper():\n ftrs.append(\"IS_UPPER\")\n if word.islower():\n ftrs.append(\"IS_LOWER\")\n\n # USE LEXICONS################################################## !\n maxTries=5\n usedTags = {}\n \n #look front up to 5 places \n if type(sent[0])== str: lSent = map(str.lower, sent)\n else: lSent = map(unicode.lower, sent)\n while(maxTries!=0):\n\n if len(lSent)-i>=maxTries:\n tpl = tuple(lSent[i:maxTries+i])\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n maxTries-=1\n \n #also look backwards: lexicons\n if i>=1:\n tpl = tuple(lSent[i-1:i+1]) # size 2\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n if i<len(lSent) : \n tpl = tuple(lSent[i-1:i+2]) # size 3\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n \n #analyze and add bias towards max used classification \n if usedTags:\n usedTags = list(usedTags.iteritems())\n maxused = max(usedTags, key=operator.itemgetter(1))\n minused = min(usedTags, key=operator.itemgetter(1)) \n if minused[1]!=maxused[1]:\n ftrs.append('BIAS='+maxused[0])\n \n\n #R ************************************************\n if len(word) > 15:\n ftrs.append(\"IS_LENGTHY\")\n if word[0].upper():\n ftrs.append(\"IS_FIRST_UPPER\")\n if word.__contains__(\"http\"):\n ftrs.append(\"IS_HYPERLINK\")\n if any(x.isupper() for x in word):\n ftrs.append(\"IS_MIXEDCASE\")\n if word.isupper():\n ftrs.append(\"ALL_UPPERCASE\")\n if word.__contains__(\"@\"):\n ftrs.append(\"IS_TAG\")\n if word.__contains__(\"#\"):\n ftrs.append(\"IS_HASHTAG\")\n if word in stop_words:\n ftrs.append(\"IS_STOPWORD\")\n if word in ['ing','ly','ed','ious','ies','ive','es','s','ment']:\n ftrs.append(\"CONTAINS_SUFFIX\")\n ftrs.append( nltk.pos_tag([word])[0][1] )\n\n # previous/next word feats\n if add_neighs:\n if i > 0:\n for pf in token2features(sent, i-1, add_neighs = False):\n ftrs.append(\"PREV_\" + pf)\n if i < len(sent)-1:\n for pf in token2features(sent, i+1, add_neighs = False):\n ftrs.append(\"NEXT_\" + pf)\n \n \n \n # return it!\n return ftrs", "def create_feature_vector(features, length):\n START_IDX = 0\n END_IDX = 1\n\n output_vector = np.zeros(length)\n\n # negative strand\n for loc in features[-1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 1 \n\n # positive strand\n for loc in features[1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 2\n\n return output_vector", "def training(string):\n print(\"Training...\")\n vec = create_vector(string)\n print(\"Selecting features...\")\n feature_list = select_features(vec)\n print(\"Done!\")\n return feature_list", "def _words_to_vec(self, sentence):\n return torch.FloatTensor([self._use_embeddings(word) for word in sentence])", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def sample_handling(sample, lexicon, classification):\n\n # We have a list of lists [.... [ [0, 2, 1, 0, 0, ..., 0] [1] ] , .... ] with the bag of words and the class\n featureset = []\n\n # Open the sample text and parse through the document and generate feastures.\n with open(sample, 'r') as f:\n contents = f.readlines()\n for l in contents[:hm_lines]:\n current_words = word_tokenize(l.lower())\n current_words = [lemmatizer.lemmatize(i) for i in current_words]\n features = np.zeros(len(lexicon))\n for word in current_words:\n if word.lower() in lexicon:\n index_value = lexicon.index(word.lower())\n features[index_value] = 1\n features = list(features)\n featureset.append([features, classification])\n\n return featureset", "def get_features(words):\n features = {}\n for word in [i for i in words.split() if i not in stopwords.words('english')]:\n features['contains_%s' % word.lower()] = True\n return features", "def replace_vowels(word):\n variants = []\n for c in word:\n if c in vowels:\n for vowel in vowels:\n variants.append(word.replace(c, vowel))\n return variants", "def generate_vector(text, tf=None):\n if not _trained:\n print(\"Make sure to train parameterizer first\")\n exit(1)\n if tf is None:\n tf = term_frequency.generate_vector(text)\n vector = []\n for i in range(len(tf)):\n vector.append(tf[i] * _idfs[i])\n return vector", "def get_word2vec_features(x_train, x_test):\n\n all_words_per_tweet_train = [nltk.word_tokenize(sent) for sent in x_train[\"text\"]]\n all_words_per_tweet_test = [nltk.word_tokenize(sent) for sent in x_test[\"text\"]]\n\n word2vec = Word2Vec(all_words_per_tweet_train, min_count=5)\n word2vec.train(all_words_per_tweet_train, total_examples=word2vec.corpus_count, epochs=15)\n\n max_tweet_len = np.max(\n [np.max([len(t) for t in all_words_per_tweet_train]), np.max([len(t) for t in all_words_per_tweet_test])])\n\n features_train = _get_word2vec_features(x_train, word2vec, all_words_per_tweet_train, max_tweet_len)\n features_test = _get_word2vec_features(x_test, word2vec, all_words_per_tweet_test, max_tweet_len)\n\n return features_train, features_test", "def vocabulary(self) -> np.ndarray:\n return np.array(\n list(set(word for text in self.preprocess_corpus for word in text))\n )", "def get_feature_vectors(self):\n\t\tresult = self.session.query(Image.id, Image.feature_vector).all()\n\n\t\ttransformed_result = list()\n\t\t\n\t\tfor (id, serialized_feature_vector) in result:\n\t\t\tdeserialized_tensor = tf.deserialize_feature_vector(serialized_feature_vector)\n\t\t\ttransformed_result.append((id, deserialized_tensor))\n\n\t\treturn transformed_result", "def infer_vectors(self, reports, labels):\n logger.info('Inferring vectors from Doc2Vec model')\n tagged_docs = self.tag_dataset(reports, labels)\n vecs = [self.model.infer_vector(tag.words) for tag in tagged_docs]\n vecs = np.array(vecs)\n return vecs", "def get_word_vector():\n\n patten = r\"[0-9\\s+\\.\\!\\/_,$%^*()?;;:-【】+\\\"\\']+|[+——!,;:。?、~@#¥%……&*()]+\"\n s1 = input(\"句子1:\").strip()\n s2 = input(\"句子2:\").strip()\n s1 = re.sub(patten, \" \", s1)\n s2 = re.sub(patten, \" \", s2)\n cut1 = jieba.cut(s1)\n cut2 = jieba.cut(s2)\n\n list_word1 = (' '.join(cut1)).split()\n list_word2 = (' '.join(cut2)).split()\n print(list_word1)\n print(list_word2)\n\n key_word = list(set(list_word1 + list_word2)) # 取并集\n print(key_word)\n\n word_vector1 = np.zeros(len(key_word)) # 给定形状和类型的用0填充的矩阵存储向量\n word_vector2 = np.zeros(len(key_word))\n\n for i in range(len(key_word)): # 依次确定向量的每个位置的值\n for j in range(len(list_word1)): # 遍历key_word中每个词在句子中的出现次数\n if key_word[i] == list_word1[j]:\n word_vector1[i] += 1\n for k in range(len(list_word2)):\n if key_word[i] == list_word2[k]:\n word_vector2[i] += 1\n\n print(word_vector1) # 输出向量\n print(word_vector2)\n return word_vector1, word_vector2", "def tag_word (lx,wd):\n\n resultSet = {tag for (word, tag) in function_words_tags if (word == wd)}\n\n nS = noun_stem(wd)\n vS = verb_stem(wd)\n\n for x in lx.getAll('A'):\n if (x == wd):\n resultSet.add('A')\n\n for x in lx.getAll('P'):\n if (x == wd):\n resultSet.add('P')\n\n for x in lx.getAll('N'):\n if (x == nS):\n resultSet.add('Np')\n elif (x == wd):\n resultSet.add('Ns')\n\n for x in lx.getAll('I'):\n if (x == vS):\n resultSet.add('Ip')\n elif (x == wd):\n resultSet.add('Is')\n\n for x in lx.getAll('T'):\n if (x == vS):\n resultSet.add('Tp')\n elif (x == wd):\n resultSet.add('Ts')\n\n return list(resultSet)", "def load_word2vect(self, file_path):\n self.embeddings = []\n self.word_to_idx = {'<pad>' : 0}\n self.vocab = ['<pad>']\n\n model = w2v.load(file_path)\n self.embedding_size = model.vectors.shape[1]\n pad_embedding = np.zeros(self.embedding_size, \"float32\")\n self.embeddings.append(pad_embedding)\n\n train_words_set = set([word for text in self.train_data for word in\n text[1].split(\" \")])\n\n for w in model.vocab:\n if w in train_words_set:\n self.word_to_idx[w] = len(self.vocab)\n self.vocab.append(w)\n self.embeddings.append(model[w])\n\n del model", "def extract(self, documents):\n\n # Feature vector to return\n features = np.zeros((len(documents), len(self.idx_to_word)))\n\n # Raise an exception if 'extract' is called before 'preprocess'\n if len(self.word_to_idx) == 0 or len(self.idx_to_word) == 0:\n raise Exception(\"Dictionary not initialised.\")\n\n # Iterate over all documents\n for idx, doc in enumerate(documents):\n # Split the doc into a list of words\n words = extract_words(doc)\n\n # For each word\n for w in words:\n # Calculate it's frequency, however, keep in mind\n # that this word may not have been in the training\n # corpus. In that case, ignore the word.\n ''' YOUR CODE HERE '''\n try:\n features[idx][self.word_to_idx[w]] = words.count(w)\n except KeyError:\n pass\n\n ''' END CODE FOR THIS LOOP '''\n\n # Divide the vector by the total number of words in the document to\n # normalize the frequencies.\n ''' YOUR CODE HERE '''\n features[idx] = features[idx]/len(words)\n ''' END CODE FOR THIS LOOP '''\n\n return features", "def getVocabList():\n vocab_list = []\n with open('vocab.txt') as f_obj:\n while True:\n vocab_line = f_obj.readline()\n if not vocab_line:\n break\n word = re.search(r'\\t(\\w+)', vocab_line).group(1)\n vocab_list.append(word)\n return vocab_list", "def get_word_vectors(self, docs):\n return self.tfidf.transform(docs)" ]
[ "0.6246148", "0.62410086", "0.6116035", "0.6115346", "0.5932024", "0.59269047", "0.5920939", "0.5908701", "0.58761644", "0.58152443", "0.5796711", "0.5752887", "0.574728", "0.57368666", "0.5726028", "0.5711519", "0.57040584", "0.570145", "0.568096", "0.5660981", "0.56468755", "0.5622057", "0.5601281", "0.5589679", "0.5582587", "0.5580266", "0.5572363", "0.5546099", "0.554303", "0.55407697", "0.55325204", "0.5521876", "0.55155784", "0.549252", "0.5485818", "0.54825747", "0.54753673", "0.54440993", "0.5437384", "0.5431975", "0.5430501", "0.54198676", "0.54184055", "0.54089737", "0.5395199", "0.53887993", "0.53860563", "0.53653216", "0.5358437", "0.5339024", "0.5335508", "0.53299063", "0.532853", "0.53284717", "0.53263843", "0.5322409", "0.5320369", "0.53106636", "0.5304639", "0.52996", "0.5299029", "0.5294246", "0.52928644", "0.52832186", "0.52767646", "0.52692413", "0.5267719", "0.52666444", "0.5261397", "0.52566034", "0.52550787", "0.52505857", "0.5244734", "0.5243458", "0.5240114", "0.5228172", "0.52237743", "0.5218568", "0.52162015", "0.52118504", "0.52091926", "0.5203718", "0.5194333", "0.5193058", "0.5186916", "0.51707315", "0.5168233", "0.51679385", "0.51591206", "0.5156813", "0.5155407", "0.5147761", "0.51346505", "0.51312053", "0.5125683", "0.51202893", "0.51188165", "0.51184356", "0.51122737", "0.51118195" ]
0.61455387
2
Keep track of machine activity with time stamps.
def updateMachine(self, machine, raw_cpu, filtered_cpu): stamp = time.time() - self.initTime raw_cpu = float(raw_cpu) filtered_cpu = float(filtered_cpu) if machine in self.activity.keys(): self.activity[machine]["filtered activity"].append(filtered_cpu) self.activity[machine]["raw activity"].append(raw_cpu) self.activity[machine]["time"].append(stamp) else: self.activity[machine] = {"filtered activity" : [filtered_cpu], "raw activity" : [raw_cpu], "time" : [stamp]}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))", "def noteActivity(): \r\n global lastActivity\r\n lastActivity = millis()", "def noteActivity(): \n global lastActivity\n lastActivity = millis()", "def touch(self):\n self._timestamps['last_seen'] = rospy.get_rostime()", "def main():\n\n # check database for tracking options\n # if empty prompt to add subject\n\n # present tracking options\n\n # calculate timedelta\n\n # printing/updating the time", "def insert_machine_time(self, start: int, end: int):\n today = datetime.date.today().strftime(\"%Y/%m/%d\")\n machine_time = '{}#{}'.format(start, end)\n time_used = end - start\n\n try:\n usage_today = usage.Usage.objects.get(machine_id=self.model.id, date=today)\n except DocumentDoesNotExists:\n usage_today = usage.Usage()\n usage_today.date = today\n usage_today.machine_id = self.model.id\n usage_today.name = self.model.name\n usage_today.times = []\n usage_today.total_time = 0\n\n usage_today.times.append(machine_time)\n usage_today.total_time += time_used\n usage_today.save()\n\n if not self.model.open:\n self.model.open = True\n self.model.save()", "def _trigger(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def take(self):\n self.when_taken = datetime.datetime.now().timestamp()", "def wall_time(self):", "def time_automation_listener(now):\n action()", "def record_activity(msg_list):\n self.last_kernel_activity = kernel.last_activity = utcnow()\n\n idents, fed_msg_list = session.feed_identities(msg_list)\n msg = session.deserialize(fed_msg_list)\n\n msg_type = msg['header']['msg_type']\n self.log.debug(\"activity on %s: %s\", kernel_id, msg_type)\n if msg_type == 'status':\n kernel.execution_state = msg['content']['execution_state']", "def Analyze(self):\n \n self._analyzeLogs()\n for user in self._start_times:\n self._result[user] = self._zipTimes(user)", "def test_issue_tracked_times(self):\n pass", "def increment_machine_time(self, people, image_cap_time):\n # Find out if there is at least one person using the machine\n person_inside = False\n for person in people:\n (h_top_x, h_left_y, h_bottom_x, h_right_y) = person\n\n person_inside = self.check_inside((h_top_x, h_left_y, h_bottom_x, h_right_y))\n if person_inside:\n break\n\n # if there is somebody in the machine\n if person_inside:\n self.last_seen_unix = image_cap_time\n if not self.inside:\n self.inside = True\n self.first_detected = image_cap_time\n elif self.inside and not self.using:\n diff = image_cap_time - self.first_detected\n # If they have been using this machine for a set period\n # of time, we can be sure the machine is in use\n if diff > self.time_threshold:\n # Tell all clients this machine is being used now\n if self.model.open:\n self.model.open = False\n self.model.save()\n\n self.using = True\n self.time_elapsed = self.first_detected\n else:\n \"\"\"\n If a person is no longer inside and last time we \n seen someone was past the last seen threshold\n \"\"\"\n last_seen = image_cap_time - self.last_seen_unix\n if last_seen > self.last_seen_threshold:\n # Turn off all flags if they are set\n self.inside = False\n # if machine was being used log that time\n if self.using:\n self.using = False\n self.time_used += image_cap_time - self.first_detected\n logging.info(\"Used for: \" + str(image_cap_time - self.first_detected))\n\n # Send to database\n start_time, end_time = self.first_detected, image_cap_time\n db_thread = threading.Thread(target=self.insert_machine_time, args=[start_time, end_time])\n db_thread.start()", "def updateProcess(self, machine, process):\n\n stamp = time.time() - self.initTime\n if machine in self.activity.keys():\n if ((\"processes\" in self.activity[machine].keys()) and \n (process in self.activity[machine][\"processes\"].keys())):\n self.activity[machine][\"processes\"][process].append(stamp)\n else:\n self.activity[machine][\"processes\"] = {process : [stamp]}\n else:\n self.activity[machine] = {\"filtered activity\" : [],\n \"raw activity\" : [],\n \"time\" : [],\n \"processes\" : {process : [stamp]}}", "def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)", "def audit(self):\n self.ping()", "def _push(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def track_duration(self):\n # raise NotImplementedError\n self.out_schema.append(\"run_duration\")\n self._track_duration = True\n # self.runner = GridExecutor.timer(self.runner)", "def do_timestamp_messages(self, messages):\n timestamp = self.env.now\n self.reception_records[timestamp] = messages\n log.debug(\"{} recorded {}\".format(self, self.reception_records))", "def monitor(self):", "def __start_tracking(self, event_type=None, message=None):\n\n if message:\n logger.info(message)\n\n start_time = time.time()\n id = str(uuid.uuid4())\n if not hasattr(self, 'running_processes'):\n self.running_processes = {}\n self.running_processes[id] = (start_time, event_type)\n return id", "def internal_event (self):\n self.clock_time += 1\n self.log()", "def sync_time(self, event=None):\n if self.collect: return\n time_obj= localtime()\n serial_time = strftime(\"t%Y,%m,%d,%H,%M,%S\", time_obj)\n print(serial_time)\n self.system_timestamp = f\"\\nSystem start time is: {serial_time}\"\n print(serial_time.encode(encoding=\"ascii\"))\n self.ser.write(serial_time.encode(encoding=\"ascii\"))", "def getTimes():", "def getTimes():", "def getTimes():", "def __pass_time(self):\n self.hunger += 1\n self.boredom += 1", "def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))", "def reportBallSeen(self):\r\n self.lastTimeSeen = time.time()", "def get_tracking():\n\n output_lines = run_command(\"chronyc tracking\")\n tdata = {}\n for line in output_lines:\n\t(label, data) = line.split(\":\", 1)\n\tvalue = data.strip().lower()\n\tif value.endswith(\"ppm\") or value.endswith(\"seconds\"):\n\t key = re.sub(r\"[^a-z0-9]\", \"_\", label.strip().lower())\n\t if key == \"update_interval\":\n\t\tcontinue\n\t (value, unit) = value.split(\" \", 1)\n\t if unit not in tdata:\n\t\ttdata[unit] = {}\n\t tdata[unit][key] = {\"label\": label.strip(), \"value\": value}\n return tdata", "def manipulate_activity():\n pass", "def update_time(self):\n pass # Do nothing", "def getSubmitTime():", "def recordStart(self, event_key):\n self.start_times[event_key] = time.time()", "def time(self):\r\n raise NotImplementedError", "def save_current_run_time():\n # path = \"/Users/szou/Downloads/bu/happydogs/analytics_happydogs/last_time_run\" # hard coding this due to CRON, but will remove later\n output_file = open(\"last_time_run\", \"w\")\n current_time_string = datetime.datetime.strftime(\n datetime.datetime.now(), \"%Y-%m-%d %H:%M:%S\"\n )\n output_file.write(current_time_string)\n print(current_time_string)\n output_file.close()", "def setTrackStartTime() :\n s.startTrack()", "def time(self):\n raise NotImplementedError()", "def important_time(self):\n\t\twork_s = self.work_time().seconds\n\t\tbreak_s = self.break_time().seconds\n\t\tif self.status():\n\t\t\tremaining_time_s = tomato(work_s, break_s)\n\t\telse:\n\t\t\tremaining_time_s = potato(work_s, break_s)\n\n\t\timp_time = datetime.now() + timedelta(0, remaining_time_s)\n\t\treturn imp_time", "def report(self):\n self.last_contacted = time.time()", "def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])", "def task_scanned(now_task):", "def republish(self):\n self.timestamp = time.time()", "def action(self):\n return self.rowTime.activity", "def record_event(self, description, time=None, additional=None):\n if time is None:\n time = datetime.datetime.now()\n if additional is not None:\n self.history.append((time, (description, additional)))\n else:\n self.history.append((time, description))", "def _log_update_time(self, *_):\n import time\n if not hasattr(self, '_time'):\n setattr(self, '_time', time.time())\n _time = time.time()\n debug('Time since last call: {:.6f}s'.format(_time - getattr(self, '_time')))\n setattr(self, '_time', _time)", "def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def __init__(self, device_connection, current_time):\n self.last_seen = current_time - device_connection.time\n self.name = device_connection.device_name", "def __init__(self):\n self.now = datetime.now()", "def _add_to_recently_called(self, match, reporter):\n if utils.istrcmp(match.player1_tag, reporter):\n other = match.player2_tag\n else:\n other = match.player1_tag\n self.recently_called[other] = time()", "def get_time_info(self):\n\n raise NotImplementedError", "def time_automation_listener(now):\n hass.async_add_job(action, {\n 'trigger': {\n 'platform': 'time',\n 'now': now,\n },\n })", "def realtime(self):", "def set_last_used_on(self):\n self.last_used_on = datetime.now()\n self.save()", "def started_on(self):\n return self.get_time(\"started_on\")", "def save_walltime(self):\n\n walltime = time.time() - self._start_time\n with open(self._walltime_path, 'w') as f:\n f.write(str(walltime) + \"\\n\")", "def save(self):\n\n # make a clone to preserve the original in case it's still needed\n clone = {}\n\n for machine in self.activity.keys():\n data = self.activity[machine].copy()\n data[\"filtered activity\"] = np.array(data[\"filtered activity\"], dtype=np.float)\n data[\"raw activity\"] = np.array(data[\"raw activity\"], dtype=np.float)\n data[\"time\"] = np.array(data[\"time\"], dtype=np.float)\n clone[machine] = data\n\n out = open(self.filename, \"wb\")\n pickle.dump(clone, out)\n out.close()", "def tic(self):\n return self._timestamp", "def after_epoch(self):\n line = ' '.join([str(k) + ': ' + str(v) for k, v in self.trainer.status.items()])\n with open(os.path.join(self.root_path, 'log.txt'), 'a+') as fout:\n fout.write(line + '\\n')", "def LingerTime(self) -> int:", "def tic():\n import time\n global startTime_for_tictoc\n startTime_for_tictoc = time.time()", "def sys_time(self):\n timestamp = None\n for i in range(10):\n while timestamp is None:\n timestamp = self.acquire_system_time()\n break\n return timestamp", "def GetCpuTimestamp(self):\n return {'TotalTime': time.time()}", "def update_info(self):\n self.m_canvas.master.m_informations_displayer.set_operations(\n self.m_current_index\n )\n self.m_canvas.master.m_informations_displayer.set_time(\n self.m_history[self.m_current_index].m_passed_time\n )", "def service( self ):\n\n self.alive = time.time()", "def _start_times_to_visit_info(self):\n\n self.visit_plan = {\n 'exp_start_times': self.exp_start_times,\n # for visit trends\n 'orbit_start_index': tools.detect_orbits(self.exp_start_times),\n }", "def LogProcess(self):\n time = datetime.today().strftime('%a %Y%b%d %X')\n# Get user name.\n f = os.popen(\"whoami\",\"r\")\n user = f.read().strip()\n f.close()\n\n entry = '%s\\t%s\\t%s\\t%s\\n' % (time, self.topdir, user, self.version)\n\n if ismounted(c.exams_file):\n# Append info to the exams file.\n try:\n f = open(c.exams_file,'a+')\n f.seek(0, 2)\n f.write(entry)\n f.close()\n except:\n# Not a huge problem if this doesn't work.\n pass", "async def status(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n up_time = time.time() - self.start_time\n m, s = divmod(up_time, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n stime = time.time() - psutil.boot_time()\n m, s = divmod(stime, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n system_uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n mem = psutil.virtual_memory()\n\n pid = os.getpid()\n memory_use = psutil.Process(pid).memory_info()[0]\n\n content = discord.Embed(title=f\"Miso Bot | version {main.version}\")\n content.set_thumbnail(url=self.client.user.avatar_url)\n\n content.add_field(name=\"Bot process uptime\", value=uptime_string)\n content.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent()}%\")\n content.add_field(name=\"System uptime\", value=system_uptime_string)\n\n content.add_field(name=\"System RAM Usage\", value=f\"{mem.percent}%\")\n content.add_field(name=\"Bot memory usage\", value=f\"{memory_use/math.pow(1024, 2):.2f}MB\")\n\n await ctx.send(embed=content)", "def process_current_time(self):\n if self.new_input:\n cuda.Context.synchronize()\n self.activate()\n self.new_input = False\n if self.plastic:\n self.learn()", "def cpu_time(self):", "def start_monitoring(self):\n pass", "def get_status(self):\n # find status\n # search in summary file first\n self.status = \"running\"\n status = self.search_summary(\"status\")\n if status:\n self.status = status.split()[1]\n # define running time\n # search in summary file first\n self.running_time = \"00:00:00\"\n running_time = self.search_summary(\"running-time\")\n if running_time:\n self.running_time = running_time.split()[1]\n # calculate running time\n else:\n now = datetime.datetime.now()\n elapsed_time = (now - self.ctime).seconds\n hours, remainder = divmod(elapsed_time, 3600)\n minutes, seconds = divmod(remainder, 60)\n self.running_time = (\n f\"{int(hours):02}:{int(minutes):02}:{int(seconds):02}\"\n )", "def service_time(self):\r\n #print self.node_monitor_address, self.completion_time - self.node_monitor_launch_time\r\n return (self.completion_time - self.node_monitor_launch_time)", "def _track_changes(self):\n if self.untrack is False:\n self._event._track_changes.add('attendees')", "def _RecordVisitTime(self, mr, now=None):\n now = now or int(time.time())\n if not settings.read_only and mr.auth.user_id:\n user_pb = mr.auth.user_pb\n if (user_pb.last_visit_timestamp <\n now - framework_constants.VISIT_RESOLUTION):\n user_pb.last_visit_timestamp = now\n self.services.user.UpdateUser(mr.cnxn, user_pb.user_id, user_pb)", "def setSubmitTime(t):", "def _track_changes(self):\n if self._untrack is False:\n self._event._track_changes.add('attendees')", "def _metadataSaving(self):\n imageCount=0\n\n #Timestamp to flag the beginning of acquisition\n if not self.startAcquisitionTime:\n self.startAcquisitionTime = time()\n print('timestamp got')\n while(imageCount<(self.nbFrames) and self.acqRunning and self.loopRunning):\n if risingEdge(self.labjack, 3): #Labjack, channel, timeout(s)\n startTime = time()\n frameTime = startTime - self.startAcquisitionTime #Taking the off time to be synchronized with metadata\n odourValveSig = readOdourValve(self.labjack, 2)\n respirationSig = readSignal(self.labjack, 0)\n saveMetadata(\tself.textFile,\n\t\t\t\t\t\t\t\tstr(frameTime),\n\t\t\t\t\t\t\t\tstr(self.ledList[imageCount]),\n\t\t\t\t\t\t\t\tstr(imageCount),\n\t\t\t\t\t\t\t\tstr(odourValveSig),\n\t\t\t\t\t\t\t\tstr(respirationSig),\n\t\t\t\t\t\t\t\tstr(0)) #Maybe not the best practice\n imageCount+=1\n\n #close the metadata .txt file\n self.textFile.close()\n print('end of the ledSwitchingThread')\n return imageCount", "def on_log(self):\n monitors = self.monitors\n if self.monitors is None:\n monitors = self.trainer.metrics.keys()\n\n\n hparams = self.hparams\n if self.hparams is None:\n hparams = self.trainer.hparams.keys()\n\n metrics = {name: format_metric(self.trainer.metrics[name])\n for name in monitors\n if name in self.trainer.metrics}\n hparams = {name: format_metric(self.trainer.hparams[name])\n for name in hparams\n if name in self.trainer.hparams}\n\n\n step_bar = self.step_bars[-1]\n step_bar.set_description(\"Epoch {}\".format(self.trainer.epoch+1))\n step_bar.set_postfix(**metrics, **hparams)\n step_bar.update(self.trainer.steps_trained - self.last_step)\n self.last_step = self.trainer.steps_trained", "def time_created(self, time_created):\n self._time_created = time_created", "def time_created(self, time_created):\n self._time_created = time_created", "def time_created(self, time_created):\n self._time_created = time_created", "def time_created(self, time_created):\n self._time_created = time_created", "def on_action_time_changed(self, content):\n time = parse_iso_dt(content['time']).time()\n self.set_guarded(time=time)", "def __init__(self):\n self.tic0=time.time()\n self.tic=self.tic0\n self.record=[]", "def update_activity():\n pass", "def on_episode_begin(self, episode, logs):\n self.episode_start[episode] = timeit.default_timer()\n self.observations[episode] = []\n self.rewards[episode] = []\n self.actions[episode] = []\n self.metrics[episode] = []", "def on_episode_begin(self, episode, logs):\n self.episode_start[episode] = timeit.default_timer()\n self.observations[episode] = []\n self.rewards[episode] = []\n self.actions[episode] = []\n self.metrics[episode] = []", "def mark_started(self):\n self.started = datetime.now()\n self.save()", "def do_uptime(self, message):\r\n\t\tup_time = time.time() - self.start_time\r\n\t\tstart_time = datetime.datetime.fromtimestamp(self.start_time)\r\n\t\tself.trace(f'Uptime: {duration(up_time)}; running since {start_time}')", "def save(cls):\n\n cls._set_mode_stopped()\n notes = Notes.get_text()\n timestamp = TimeDisplay.get_timestamp()\n TimeLog.add_to_log(timestamp, *TimeDisplay.get_time(), notes)\n LogView.refresh()\n Notes.clear()\n TimeDisplay.stop_time()\n TimeDisplay.reset_time(erase=True)\n for callback in cls.save_callback:\n callback()", "def now():\r\n return time.time()", "def start(self):\r\n self.start_time = time.time()", "def timeSinceSeen(self):\r\n return time.time() - self.lastTimeSeen\r\n \r\n \t'''定位模块加入了再使用'''", "def timeCheckpoint(start_time, name):\n\n time = clock() - start_time\n print(str.capitalize(name) + ': \\t%.3f' % time)\n return clock()", "def start_time(self):\n pass", "def __time(self):\n return time.time()", "def process_current_time(self):\n if self.new_input:\n self.new_input = False\n\n if self.activation_count == self.mask_init_time:\n cuda.Context.synchronize()\n self.mask.calculate()\n\n if self.tsettle == 0:\n # Special case: behave just like a CFSheet\n cuda.Context.synchronize()\n self.activate()\n self.learn()\n\n elif self.activation_count == self.tsettle:\n # Once we have been activated the required number of times\n # (determined by tsettle), reset various counters, learn\n # if appropriate, and avoid further activation until an\n # external event arrives.\n for f in self.end_of_iteration: f()\n\n self.activation_count = 0\n self.new_iteration = True # used by input_event when it is called\n if (self.plastic and not self.continuous_learning):\n self.learn()\n else:\n cuda.Context.synchronize()\n self.activate()\n self.activation_count += 1\n if (self.plastic and self.continuous_learning):\n self.learn()", "def hourly_stats():\r\n count_total.delay()\r\n count_unique.delay()\r\n count_tags.delay()" ]
[ "0.69313186", "0.63031006", "0.62680084", "0.6137144", "0.6080439", "0.58535415", "0.58349663", "0.580419", "0.57930464", "0.57846797", "0.57673466", "0.57643175", "0.57618624", "0.57599396", "0.57484883", "0.5720372", "0.5714929", "0.57034856", "0.5640119", "0.5619169", "0.5600874", "0.5551675", "0.55425686", "0.55075747", "0.5486525", "0.5486525", "0.5486525", "0.5447992", "0.5430837", "0.5429526", "0.5417997", "0.5403844", "0.5398501", "0.53912467", "0.5386414", "0.53852564", "0.5385058", "0.5361391", "0.5352222", "0.5337062", "0.5310936", "0.5308349", "0.5300359", "0.5297581", "0.52676535", "0.5267599", "0.5263842", "0.52610475", "0.52488714", "0.5239724", "0.522401", "0.52019566", "0.51968616", "0.51920325", "0.51887864", "0.5183037", "0.5170623", "0.51603943", "0.5160353", "0.5160309", "0.5157505", "0.5150604", "0.51468325", "0.5145274", "0.514443", "0.5140572", "0.51404756", "0.5132618", "0.51243097", "0.5116489", "0.5113122", "0.5109168", "0.5100943", "0.5088493", "0.5088174", "0.5088131", "0.5086031", "0.5079544", "0.5076005", "0.50727785", "0.50711596", "0.50711596", "0.50711596", "0.50711596", "0.5059158", "0.50578004", "0.50562006", "0.50477326", "0.50477326", "0.50443476", "0.5038182", "0.5033557", "0.5029844", "0.5028635", "0.5025789", "0.50216657", "0.5016898", "0.5011815", "0.5008756", "0.50033724" ]
0.5073809
79
Record start time of a process as well as its host machine.
def updateProcess(self, machine, process): stamp = time.time() - self.initTime if machine in self.activity.keys(): if (("processes" in self.activity[machine].keys()) and (process in self.activity[machine]["processes"].keys())): self.activity[machine]["processes"][process].append(stamp) else: self.activity[machine]["processes"] = {process : [stamp]} else: self.activity[machine] = {"filtered activity" : [], "raw activity" : [], "time" : [], "processes" : {process : [stamp]}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_time(self):\n pass", "def start_time(self) -> float:\r\n ...", "def getStartTime(self):\n assert not self.isWaitingToStart(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__rawInfo.startTime", "def start_time(self):\n return self.__start", "def set_start_time():\n __start = current_time_milli()", "def start_time(self):\n return self._meta['start_time']", "def recordStart(self, event_key):\n self.start_times[event_key] = time.time()", "def startTime(self):\n return self._startTime", "def startTime(self):\n return self._startTime", "def startTime(self):\n return self._startTime", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start_time(self):\n return self._start_time", "def start(self):\r\n self.start_time = time.time()", "def start_time(self):\n return self._get(\"start_time\")", "def start_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"start_time\")", "def start_time():\n t = [time.clock(), time.time()]\n return t", "def GetCpuTimestamp(self):\n return {'TotalTime': time.time()}", "def time_start(self):\n return self._time_start", "def proc_start():\n\n fd = open(\"/proc/self/stat\")\n start_clk = int(fd.readline().split()[21])\n start_sec = start_clk // os.sysconf(\"SC_CLK_TCK\")\n fd.close()\n\n fd = open(\"/proc/stat\")\n boot_sec = None\n for line in fd:\n if line.startswith(\"btime\"):\n boot_sec = int(line.split()[1])\n assert boot_sec is not None\n fd.close()\n\n return boot_sec + start_sec", "def start_time(self) -> str:\n return self._start_time", "def __get_starting_time(self):\n return self.__starting_time", "def getStartTime(self):\n raise NotImplementedError", "def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")", "def init_time():\r\n\tstarttime = time.time()\r\n\treturn starttime # return the start time\r", "def started_on(self):\n return self.get_time(\"started_on\")", "def start_time(self) -> datetime:\n return self.root_hartree.start_time", "def get_attempt_start_time():\n pass", "def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):\n if pgrep_full:\n cmd = 'pgrep -o -f {}'.format(service)\n else:\n cmd = 'pgrep -o {}'.format(service)\n proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())\n return self._get_dir_mtime(sentry_unit, proc_dir)", "def track_start_time(self, test_class, test_name, start_time):\n if test_class is None or test_name is None:\n return\n\n test_key = \"{}.{}\".format(test_class, test_name)\n self.start_time_by_test[test_key] = start_time", "def starts(self):\n return self.time_start", "def start_time(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"start_time\")", "def get_start_time(self):\n # Searching through qstat and grabbing only the start time. Lot of weeding out.\n qstat = subprocess.getoutput(\"qstat -j {0}\".format(self.id))\n qstat = qstat[qstat.find(\"start_time\"):]\n qstat = qstat[:qstat.find('\\n')]\n return qstat[28:]", "def timestart(self, timestamp=0):\n self._p('[timestart] {}'.format(timestamp))", "def start_time(self) -> float:\n return self._start_time", "def getStartTime(self):\n assert not self.isWaitingToStart(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__jobInfo.startTime", "def start_time(self):\n return self.time_parser.start_time", "def _start_clock(self):\n self._start = time.time()", "def start(self):\n# if self._start_time is not None:\n self._start_time = time.perf_counter()", "def get_start_time(self):\n return str(self._start_time)", "def start_time_string(self):\r\n return time.strftime('%H:%M:%S', time.localtime(self.start_time))", "def start(self):\n self.start_time = time.time()", "def __init__(self, start_time=None):\n if start_time is None:\n self.started = time.time()\n else:\n self.started = start_time", "def _getStartTime(self):\n return self._startTime.strftime(\"%Y-%m-%d %H:%M:%S\")", "def get_exec_time(self):\n return self._exec_time", "def __start_tracking(self, event_type=None, message=None):\n\n if message:\n logger.info(message)\n\n start_time = time.time()\n id = str(uuid.uuid4())\n if not hasattr(self, 'running_processes'):\n self.running_processes = {}\n self.running_processes[id] = (start_time, event_type)\n return id", "def start_time_entry(self, userID):\n start = datetime.datetime.now()\n\n sql = u'INSERT INTO job_time_log_TBL (' \\\n u'job_ID_year, job_ID_number, person_ID, start_time) ' \\\n u'VALUES (%s, %s, %s, %s)'\n data = (self.job_number_sql[0], self.job_number_sql[1], userID, start)\n\n c, conn = connection(self.company_schema)\n try:\n c.execute(sql, data)\n finally:\n conn_close(c, conn)\n\n return start", "def start_time(self, value):\n self._start_time = value", "def start_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[0].to_pydatetime()", "def start_time(self, start_time):\n self._start_time = start_time", "def start_time(self, start_time):\n self._start_time = start_time", "def service_time(self):\r\n #print self.node_monitor_address, self.completion_time - self.node_monitor_launch_time\r\n return (self.completion_time - self.node_monitor_launch_time)", "def get_start_time(self):\n start = datetime.strptime(\n self.get_handler().SOURCE_START_DATE.split('.')[0],\n '%Y%m%d%H%M%S'\n )\n return start", "def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())", "def start_time(self) -> Optional[str]:\n return pulumi.get(self, \"start_time\")", "def time(self):\n return self._begin", "def start_time(self) -> float:\n return float(self.get_from_redis(\"start_time\"))", "def _start_launch_time(self, launched_event):\n if launched_event:\n interval = Interval(0, launched_event.timestamp)\n return self._trace.cpu.task_intervals(task=launched_event.task,\n interval = interval)[0].interval.start", "def timeCheckpoint(start_time, name):\n\n time = clock() - start_time\n print(str.capitalize(name) + ': \\t%.3f' % time)\n return clock()", "def get_start_time(self, file_path) -> datetime | None:\n if file_path in self._processors:\n return self._processors[file_path].start_time\n return None", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_time(self, start_time):\n\n self._start_time = start_time", "def start_timer(self):\n self.start_time = datetime.now()", "def start_time(self, value):\n if value is not None:\n value = value.freeze()\n self.cache['start_time'] = value", "def pc_work_time(self):\n return _spacegrant_swig.udp_debug_sptr_pc_work_time(self)", "def start_timestamp(self):\n return self._start_timestamp", "def get_start_time(self):\n return min([m.get_start_time() for m in self._mappers])", "def pc_work_time(self):\n return _spacegrant_swig.message_debug_sptr_pc_work_time(self)", "def start_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"start_time\")", "def time():\n _check_init()\n return _pypm.Time()", "def start_timer(self):\n self.start_time = time.time()", "def cpu_time(self):", "def StartTimer(self):\n self._start_time = time.time()", "def time(self, start_time):\n \n TIME_LIST.append((time.time() - start_time))\n print(\"--- %s seconds ---\" % (time.time() - start_time))", "def set_startTime(self, startTime):\n self.startTime = mktime(startTime)", "def start(self) -> datetime:\n return self._start", "def service_time(self):\r\n return (self.completion_time - self.node_monitor_launch_time)", "def start_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_time\")", "def on_phase_start(\n self, task: \"tasks.ClassyTask\", local_variables: Dict[str, Any]\n ) -> None:\n self.start_time = time.time()\n local_variables[\"perf_stats\"] = PerfStats()", "def pc_work_time_var(self):\n return _spacegrant_swig.udp_debug_sptr_pc_work_time_var(self)", "def start(self):\n # type: () -> datetime\n return self._start", "def set_start_time(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_start_time(self, *args, **kwargs)", "def get_starttime(self):\n filetime = datetime.datetime.strptime(self.filenametime,\n \"%Y%m%d_%H%M%S\")\n if self.ldat_type != 'acc':\n starttime = filetime\n else:\n starttime = filetime - datetime.timedelta(seconds=512)\n return starttime", "async def track_start(self):\n await self.wait_until_ready()\n self.start_time = datetime.datetime.utcnow()", "def get_start_time(settings: SimulationSettingsModel):\n return settings.project.start_time", "def _ParseStartTime(output: str) -> float:\n hosts = output['sysstat']['hosts']\n date = hosts[0]['date']\n time = hosts[0]['statistics'][0]['timestamp']\n # TODO(user): handle malformed json output from mpstat\n start_datetime_string = ' '.join([date, time])\n # As a sysstat utility, this is printed in UTC by default\n start_datetime = datetime.datetime.strptime(\n start_datetime_string,\n '%Y-%m-%d %H:%M:%S').replace(tzinfo=datetime.timezone.utc)\n return start_datetime.timestamp()" ]
[ "0.65806735", "0.6416755", "0.6403931", "0.6389469", "0.6377402", "0.6365346", "0.62822706", "0.6281774", "0.6281774", "0.6281774", "0.623706", "0.623706", "0.623706", "0.623706", "0.623706", "0.623706", "0.623706", "0.623706", "0.62240183", "0.62178797", "0.61842185", "0.6182558", "0.61816776", "0.6177564", "0.6172094", "0.6169025", "0.6167443", "0.6155781", "0.61260813", "0.61260813", "0.6117514", "0.6114716", "0.6114096", "0.6104896", "0.6091661", "0.6070227", "0.60396534", "0.60379845", "0.6017881", "0.60164034", "0.6006879", "0.6006563", "0.59340686", "0.5919823", "0.5904885", "0.5896552", "0.5833521", "0.5831422", "0.58288246", "0.58283937", "0.58209425", "0.58032185", "0.58006114", "0.578877", "0.5785487", "0.57748383", "0.57748383", "0.5771541", "0.57433957", "0.57407683", "0.57298267", "0.5712508", "0.5705296", "0.5698683", "0.56697947", "0.5643666", "0.5638142", "0.5638142", "0.5638142", "0.5638142", "0.5638142", "0.56339335", "0.56309164", "0.5629769", "0.56248254", "0.5620538", "0.5616273", "0.56119365", "0.56119365", "0.5607375", "0.5598538", "0.55929744", "0.55840665", "0.55830514", "0.5574149", "0.5562801", "0.5553907", "0.5551101", "0.5551101", "0.5551101", "0.5551101", "0.5551101", "0.5551101", "0.55465704", "0.55394626", "0.55384415", "0.55293024", "0.55282575", "0.5491088", "0.54874915", "0.54822326" ]
0.0
-1
Convert to numpy arrays and pickle to the specified file.
def save(self): # make a clone to preserve the original in case it's still needed clone = {} for machine in self.activity.keys(): data = self.activity[machine].copy() data["filtered activity"] = np.array(data["filtered activity"], dtype=np.float) data["raw activity"] = np.array(data["raw activity"], dtype=np.float) data["time"] = np.array(data["time"], dtype=np.float) clone[machine] = data out = open(self.filename, "wb") pickle.dump(clone, out) out.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pickle(array, file):\r\n\timport cPickle\r\n\tfo = open(file,'wb')\r\n\tcPickle.dump(array,fo)\r\n\tfo.close()", "def save(file, arr, allow_pickle=True, fix_imports=True):\n\n return numpy.save(file, array_create.array(arr, bohrium=False), allow_pickle, fix_imports)", "def save_to_file(samps, filename, save_as_numpy):\n with open(filename, 'wb') as out_file:\n if save_as_numpy:\n np.save(out_file, samps, allow_pickle=False, fix_imports=False)\n else:\n samps.tofile(out_file)", "def dump_npy(filename: str, obj, **kwargs):\n return np.save(filename, obj)", "def load_pickles_to_arrays(k_out, sphere_radius):\n cwd = os.getcwd()\n current = generate_file_ending(k_out, sphere_radius)\n with open(cwd + '/T_ns_' + current + \".pkl\", 'rb') as f:\n T_ns = pickle.load(f)\n with open(cwd + '/Energy_' + current + \".pkl\", 'rb') as f:\n E = pickle.load(f)\n with open(cwd + '/Distance-e-to-e_' + current + \".pkl\", 'rb') as f:\n D = pickle.load(f)\n with open(cwd + '/chain_on_iterations_' + current + \".pkl\", 'rb') as f:\n chains_on_iterations = pickle.load(f)\n #chains_on_iterations = back_to_XYZs(chains_on_iterations)\n return T_ns, E, D, chains_on_iterations", "def save_to_array(x, y):\n\n with open(settings.data(\"x.npy\"), \"wb\") as file:\n np.save(file, x)\n\n with open(settings.data(\"y.npy\"), \"wb\") as file:\n np.save(file, y)", "def save_array(array, filename):\n np.save(filename, array)", "def save2file(lis, path):\r\n np.save(path, np.array(lis))", "def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")", "def save_dataset_as_numpy(dataloader, file_path, key=\"original\", message=\"\"):\n\n print(message)\n array_to_save = compose_array_from_dataloader(dataloader, key=key)\n with open(file_path, \"wb\") as file:\n np.save(file, array_to_save)", "def quick_save_array(data, file_name, delimiter=',', ):\n data.tofile(file_name, sep=delimiter)", "def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)", "def toFile(self,fid):\n stack = []\n for w,b in self.stack:\n w.copy_to_host()\n b.copy_to_host()\n stack.append([w.numpy_array,b.numpy_array])\n\tpickle.dump(stack,fid)", "def store(obj, filename, suffix = ''):\n # It is a numpy array\n if type(obj) == np.ndarray:\n path,f = writefile(filename, obj_id='numpy_objs', suffix=suffix)\n json.dump(obj, fp=f, cls=NumpyEncoder,\n separators=(',', ':'), sort_keys=True, indent=4)\n print '> saved with JSON to {}'.format(path)\n else:\n path, f = writefile(filename, obj_id='other_objs', suffix=suffix)\n pickle.dump(obj, file=f)\n print '> saved with dill (pickled) to {}'.format(path)\n return path", "def to_las(self, fpath):\n with laspy.file.File(fpath, mode='w', header=self.header,\n vlrs=[laspy.header.VLR(**_VLR_DEFAULT)]) as f:\n f.x, f.y, f.z = self._arr", "def _save_numpy(self, folderpath: str):\n\n if not os.path.exists(folderpath):\n raise EnvironmentError('Unable to save numpy data to {}, does not exist'.format(folderpath))\n da.to_npy_stack(folderpath + '/data', self.data)\n da.to_npy_stack(folderpath + '/node_data', self.node_data)", "def save_bin(data, file_path):\n np.save(file_path, data)", "def to_files(self, gen, filenames=None):\n\n if filenames:\n self.filenames = filenames\n\n for f, arr in zip(self.pathgen, gen):\n np.save(f, arr)", "def test_load_numpy_file(save_npz) -> None:\n filename, data = save_npz\n result = loader.load_numpy_file(filename)\n\n for k, v in data.items():\n assert np.array_equal(v, result[k])", "def save_numpy_array(self):\n np.save(\"smallest_particles.npy\", self.smallest_particles)\n np.save(\"number_counts.npy\", self.number_counts)\n np.save(\"number_counts_2.npy\", self.number_counts_2)", "def write_file(_data, _label, _clinical, _contour, _type):\n pickle.dump(np.array(_data), open(_type + '_data.pxl', 'wb'))\n pickle.dump(np.array(_label), open(_type + '_label.pxl', 'wb'))\n pickle.dump(np.array(_clinical), open(_type + '_clinical.pxl', 'wb'))\n pickle.dump(np.array(_contour), open(_type + '_contour.pxl', 'wb'))", "def _serialize_array(self, array):\n buffer = io.BytesIO()\n np.save(buffer, array)\n return buffer.getvalue()", "def save_to_array(arr_name, arr_object):\n return np.save(arr_name, arr_object)", "def save_data(data, filename, save_path):\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n np.save(os.path.join(save_path, filename), data)", "def save(self, timestamp: int, data_path: str, file_base: str):\n file_name = os.path.join(data_path,\n '{}-{}.pkl'.format(file_base, timestamp))\n pickle.dump(self.as_numpy_array(),\n open(file_name, 'wb'),\n protocol=pickle.HIGHEST_PROTOCOL)", "def save_ml_output(arrays, out_fname, force):\n if not force:\n if os.path.isfile(out_fname):\n return\n try:\n os.makedirs(os.path.dirname(out_fname))\n except FileExistsError:\n pass\n np.save(out_fname, arrays, allow_pickle=False)", "def _save(self, data: np.ndarray) -> None:\n ...", "def save(self, filename = 'array_zest', path = '/home/eric/dev/insitu/data/zs_recovery/'):\n filename = filename# + '_Lx_' + str(self.Lx) + 'm_Ly_' + str(self.Ly) + 'm'\n self.path_filename = path + filename + '.pkl'\n f = open(self.path_filename, 'wb')\n pickle.dump(self.__dict__, f, 2)\n f.close()", "def save(fname, data):\n from ..numpy import ndarray as np_ndarray\n if isinstance(data, NDArray):\n data = [data]\n handles = c_array(NDArrayHandle, [])\n if isinstance(data, dict):\n str_keys = data.keys()\n nd_vals = data.values()\n if any(not isinstance(k, string_types) for k in str_keys) or \\\n any(not isinstance(v, NDArray) for v in nd_vals):\n raise TypeError('save only accept dict str->NDArray or list of NDArray')\n if any(isinstance(v, np_ndarray) for v in nd_vals):\n raise TypeError('cannot save mxnet.numpy.ndarray using mxnet.ndarray.save;'\n ' use mxnet.numpy.save instead.')\n keys = c_str_array(str_keys)\n handles = c_handle_array(nd_vals)\n elif isinstance(data, list):\n if any(not isinstance(v, NDArray) for v in data):\n raise TypeError('save only accept dict str->NDArray or list of NDArray')\n if any(isinstance(v, np_ndarray) for v in data):\n raise TypeError('cannot save mxnet.numpy.ndarray using mxnet.ndarray.save;'\n ' use mxnet.numpy.save instead.')\n keys = None\n handles = c_handle_array(data)\n else:\n raise ValueError(\"data needs to either be a NDArray, dict of str, NDArray pairs \"\n \"or a list of NDarrays.\")\n check_call(_LIB.MXNDArraySave(c_str(fname),\n mx_uint(len(handles)),\n handles,\n keys))", "def save_as_numpy(self, filename, compressed=False):\n logger.warn(\n 'Saving in npz format loses timestamp and ROI information.')\n logger.warn('Consider saving in FITS or HDF5 formats instead.')\n save_func = np.savez_compressed if compressed else np.savez\n save_func(filename, *self.to_list())", "def save_arrays_to_npz(data: Union[dict, list], file_path: str):\n arrays = list(data.values()) if isinstance(data, dict) else data\n if not all([isinstance(arrays[i], np.ndarray) for i in range(len(arrays))]):\n raise ValueError(\"Incorrect data arrays\")\n\n if os.path.dirname(file_path):\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n file_path += \".npz\" if \".npz\" != file_path[-4:] else \"\"\n\n if isinstance(data, dict):\n np.savez(file=file_path, **data)\n else:\n args = {str(i): arrays[i] for i in range(len(arrays))}\n np.savez(file=file_path, **args)", "def convert_and_save(filename, output_dir, is_train_data):\n mat_contents = sio.loadmat(filename)\n if 'X' in mat_contents:\n data = mat_contents['X']\n # data = data[:-1, :]\n if is_train_data:\n # data = data[:, :5000]\n # Finding the normalization statistics from the train data\n global mu, sig2 \n mu = np.sum(data, axis=1) / data.shape[1]\n sig2 = np.sum(data**2, axis=1) / data.shape[1] \n # Normalizing feature data\n data -= mu.reshape((data.shape[0], 1))\n data /= sig2.reshape((data.shape[0], 1))\n\n if 'Y' in mat_contents:\n data = mat_contents['Y']\n if is_train_data:\n # data = data[:, :5000]\n None\n\n print(\"Input filename is \" + filename)\n save_name = filename.split(str(os.sep))[-1]\n save_name = save_name.split('.')[0]\n np.save(os.path.normpath(os.path.join(output_dir, save_name)), data.T)", "def pickle(self,data,filename):\n pickle.dump(data, open(filename, 'wb'))", "def save_big_data_array(folder, start, n_realization, prefix='real'):\n file_name = prefix + '_' + str(start) + \".pkl\"\n file_address = os.path.join(folder, file_name)\n with open(file_address, 'rb') as input:\n dataHolder = pickle.load(input)\n x_big = dataHolder.x_array\n t_big = dataHolder.t_array\n y_big = dataHolder.y_array\n print 'making large array from realizations...'\n for i in range(start + 1, start + n_realization):\n if not i%100:\n print 'relization number: ', i\n file_name = prefix + \"_\" + str(i) + \".pkl\"\n file_address = os.path.join(folder, file_name)\n with open(file_address, 'rb') as input:\n dataHolder = pickle.load(input)\n if not dataHolder.x_array.shape[0]:\n print 'empty realization, skipping...'\n continue\n x_big = np.vstack((x_big, dataHolder.x_array))\n y_big = np.vstack((y_big, dataHolder.y_array))\n t_big = np.vstack((t_big, dataHolder.t_array))\n print 'saving big arrays'\n np.save(os.path.join(folder, 'big_x'), x_big)\n np.save(os.path.join(folder, 'big_y'), y_big)\n np.save(os.path.join(folder, 'big_t'), t_big)", "def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())", "def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())", "def pickle_data(self):\n if 'data_sets.pckl' in self.expected_pickles:\n to_file(\n self.data_sets,\n os.path.join(self.logdir, 'data_sets.pckl')\n )\n if 'all_params.pckl' in self.expected_pickles:\n to_file(\n self.all_params,\n os.path.join(self.logdir, 'all_params.pckl')\n )\n if 'labels.pckl' in self.expected_pickles:\n to_file(\n self.labels,\n os.path.join(self.logdir, 'labels.pckl')\n )\n if 'minimiser_info.pckl' in self.expected_pickles:\n to_file(\n self.minimiser_info,\n os.path.join(self.logdir, 'minimiser_info.pckl')\n )", "def load_data_from_npy(filename):\n return np.load(filename)", "def numpy_2_file(narray, file, path=OUTPUT_PATH, sep=',' ):\n file_path = path + file\n narrayc = numpy.copy(narray)\n numpy.place(narrayc,numpy.logical_or(narrayc==-1,narrayc==-2), 2)\n dataset = numpy.copy(narrayc).astype(str)\n numpy.place(dataset,dataset=='2', '*')\n d=numpy.atleast_2d(dataset)\n numpy.savetxt(file_path, d, delimiter=sep, fmt='%s')\n return", "def save_pickles(filename, *args):\n with gzip.open(filename, 'wb') as outfile:\n for thing in args:\n pickle.dump(thing, outfile)", "def save(self, obj):\r\n if self.np is not None and type(obj) in (self.np.ndarray,\r\n self.np.matrix, self.np.memmap):\r\n size = obj.size * obj.itemsize\r\n if self.compress and size < self.cache_size * _MEGA:\r\n # When compressing, as we are not writing directly to the\r\n # disk, it is more efficient to use standard pickling\r\n if type(obj) is self.np.memmap:\r\n # Pickling doesn't work with memmaped arrays\r\n obj = self.np.asarray(obj)\r\n return Pickler.save(self, obj)\r\n self._npy_counter += 1\r\n try:\r\n filename = '%s_%02i.npy' % (self._filename,\r\n self._npy_counter)\r\n # This converts the array in a container\r\n obj, filename = self._write_array(obj, filename)\r\n self._filenames.append(filename)\r\n except:\r\n self._npy_counter -= 1\r\n # XXX: We should have a logging mechanism\r\n print('Failed to save %s to .npy file:\\n%s' % (\r\n type(obj),\r\n traceback.format_exc()))\r\n return Pickler.save(self, obj)", "def array_to_file(filename, a):\n a = normalize_array(a)\n i = Image.fromarray(a.astype('uint8'))\n return i.save(filename)", "def numpify_and_store(X, y, X_name, y_name, outdatapath, shuffle=False):\n X = np.array(X)\n y = np.array(y)\n # Shuffle the train set\n if shuffle is True:\n np.random.seed(123)\n neworder = np.random.permutation(X.shape[0])\n X = X[neworder, :, :]\n y = y[neworder, :]\n # Save binary file\n xpath = os.path.join(outdatapath, X_name)\n ypath = os.path.join(outdatapath, y_name)\n np.save(xpath, X)\n np.save(ypath, y)\n print('Stored ' + xpath, y_name)", "def save_npy(self, array: np.ndarray, name: str):\n tf.gfile.MkDir(str(self.info.npy_path))\n file_name = name + \".npy\"\n np.save(str(self.info.npy_path / file_name), array)", "def save_data(data: Any, file_name: str) -> None:\n with open(file_name, \"wb\") as output:\n pickle.dump(data, output)", "def save(self, path, name):\n if not self._frozen:\n raise Exception(\"Dataset must be frozen\")\n # create directory\n pathlib.Path(os.path.join(path,name)).mkdir(parents=True, exist_ok=True)\n self._raw_data.to_hdf(os.path.join(path,name,\"dataset.h5\"), key=\"raw_data\")\n self._proc_data.to_hdf(os.path.join(path,name,\"dataset.h5\"), key=\"proc_data\")\n np.save(os.path.join(path,name,\"_X_train.npy\"), self._X_train)\n np.save(os.path.join(path,name,\"_X_test.npy\"), self._X_test)\n np.save(os.path.join(path,name,\"_y_train.npy\"), self._y_train)\n np.save(os.path.join(path,name,\"_y_test.npy\"), self._y_test)\n \n np.save(os.path.join(path,name,\"_X_mean.npy\"), self._X_mean)\n np.save(os.path.join(path,name,\"_X_std.npy\"), self._X_std)\n np.save(os.path.join(path,name,\"_y_mean.npy\"), self._y_mean)\n np.save(os.path.join(path,name,\"_y_std.npy\"), self._y_std)\n \n with open(os.path.join(path,name,\"_seed.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._seed, fp)\n with open(os.path.join(path,name,\"_train_part.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._train_part, fp)\n with open(os.path.join(path,name,\"_test_part.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._test_part, fp)\n with open(os.path.join(path,name,\"_columns.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._columns, fp)", "def loadFromNpy(filename):\n return np.load(filename, allow_pickle = True)[()]", "def save_data_to_disk(self):\n Omega_M = self.theta_fid[0]\n for key in self.data.keys():\n np.save(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy', self.data[key])", "def pickle_data(file_name, data):\n outfile = open(file_name, \"wb\")\n pickle.dump(data, outfile)\n outfile.close()", "def storePickle(filename, fit_diff_matrix):\n store_filename = filename + \".pickle\"\n with open(store_filename, \"wb\") as f:\n pickle.dump(fit_diff_matrix, f)", "def save_any_to_npy(save_dict={}, name='any.npy'):\n np.save(name, save_dict)", "def save_mat(ndarray, path):\n io.savemat(path, dict(ndarray=ndarray))", "def save_mat(ndarray, path):\n io.savemat(path, dict(ndarray=ndarray))", "def load_npy_to_any(path='', name='any.npy'):\n npz = np.load(path+name).item()\n return npz", "def serialize_numpy(self, buff, numpy):\n try:\n pass\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n pass\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def savez(file, *args, **kwds):\n\n ary_list = []\n for a in args:\n ary_list.append(array_create.array(a, bohrium=False))\n return numpy.savez(file, *ary_list, **kwds)", "def dataConvertToNumpy( self ):\n self.featureNumpy = np.asarray( self.feature )\n self.ClassNumpy = np.asarray( self.Class )", "def load_data():\n\n dump_path = dump_base + '/micro_poi/mpoi_info/'\n\n assert os.path.exists(dump_path)\n\n dpath = dump_path + 'shortest_path.pickle'\n paths = joblib.load(dpath)\n\n dpath = dump_path + 'path_list.pickle'\n path_list = joblib.load(dpath)\n\n dpath = dump_path + 'gain.pickle'\n gain = joblib.load(dpath)\n\n dpath = dump_path + 'stay.pickle'\n stay_time = joblib.load(dpath)\n\n dpath = dump_path + 'reach.pickle'\n reach_time = joblib.load(dpath)\n\n spath = dump_base + '/micro_poi/model_params.list'\n model_params = np.loadtxt(spath)\n\n return np.array(paths), path_list, gain, stay_time, reach_time, model_params", "def convert_all(data_file, dest_npy, class_names):\n class_names_to_ids = dict(zip(class_names, range(len(class_names))))\n data = [[] for _ in range(len(class_names))]\n with open(data_file) as fp:\n for i, line in enumerate(fp):\n row = line.strip().split('\\t')\n if len(row) == 4:\n key, url, class_name, feature_str = row\n else:\n key, class_name, feature_str = row\n feature = [float(x) for x in feature_str.strip().split(\",\")]\n class_id = class_names_to_ids[class_name]\n data[class_id].append(feature)\n sys.stdout.write('\\r>> load %d samples without verification label' % (i+1))\n sys.stdout.write('\\n')\n sys.stdout.flush()\n data = np.array(data)\n np.save(dest_npy, data)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_7q.pack(_x.s_x, _x.s_y, _x.f_x, _x.f_y, _x.step_size, _x.bias_param, _x.max_iteration))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def test_save_npy(temp_dir):\n data = np.array([[1, 2, 3], [4, 5, 6]])\n save_npy(temp_dir, data, step=1)\n\n assert os.path.exists(os.path.join(temp_dir, \"npy\", \"1.npy\"))", "def save_datasets(self):\n if self.processed_extension == '.csv':\n # Save to csv\n logger.info(f'Saving sets to csv:')\n \n # TRAIN\n logger.info(f'train: {self.train_path}')\n \n # Concatenate X and y\n train_data = self.train_data[0]\n train_data['TARGET'] = self.train_data[1]\n \n # Save as csv\n train_data.to_csv(self.train_path, index = False)\n \n \n # VAL\n logger.info(f'val: {self.val_path}')\n \n # Concatenate X and y\n val_data = self.val_data[0]\n val_data['TARGET'] = self.val_data[1]\n \n # Save as csv\n val_data.to_csv(self.val_path, index = False)\n \n # TEST\n logger.info(f'test: {self.test_path}')\n \n # Concatenate X and y\n test_data = self.test_data[0]\n test_data['TARGET'] = self.test_data[1]\n \n # Save as csv\n self.test_data.to_csv(self.test_path, index = False)\n \n elif self.processed_extension == '.npz':\n # Convert y to numpy array\n if isinstance(self.train_data[1], pd.Series):\n self.train_data[1] = self.train_data[1].to_numpy()\n if isinstance(self.val_data[1], pd.Series):\n self.val_data[1] = self.val_data[1].to_numpy()\n if isinstance(self.test_data[1], pd.Series):\n self.test_data[1] = self.test_data[1].to_numpy()\n \n # Save to npz (scipy sparse)\n logger.info(f'Saving sets to npz:')\n\n logger.info(f'train: {self.train_path}')\n train_data = [self.train_data[0], np.reshape(self.train_data[1], (-1,1))]\n sparse.save_npz(self.train_path, sparse.hstack(train_data))\n \n logger.info(f'val: {self.val_path}')\n val_data = [self.val_data[0], np.reshape(self.val_data[1], (-1,1))]\n sparse.save_npz(self.val_path, sparse.hstack(val_data))\n\n logger.info(f'test: {self.test_path}')\n test_data = [self.test_data[0], np.reshape(self.test_data[1], (-1,1))]\n sparse.save_npz(self.test_path, sparse.hstack(test_data))\n\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n \n self.input_size = self.train_data[0].shape[1]\n logger.info(f'Saved datasets.')", "def load(filename):\n return np.load(filename)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_12d2f3d().pack(_x.position.x, _x.position.y, _x.position.z, _x.approach.x, _x.approach.y, _x.approach.z, _x.binormal.x, _x.binormal.y, _x.binormal.z, _x.axis.x, _x.axis.y, _x.axis.z, _x.width.data, _x.score.data, _x.sample.x, _x.sample.y, _x.sample.z))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.tsp_turtles\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.conveyor_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.catch_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', chip_ids=self.chip_ids, core_ids=self.core_ids, cx_ids=self.cx_ids)", "def save_obsarray_to_pickle(self, obs_array, dest_dir):\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n dump_array = obs_array\n pkl_name = 'batch' + str(len(os.listdir(dest_dir)))\n dump_path = os.path.join(dest_dir, pkl_name)\n self.pickledump(dump_array, dump_path)\n return pkl_name", "def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', core_ids=self.core_ids, cx_ids=self.cx_ids)", "def outputPulses(self,filename):\n np.save(filename,self.getData())\n return", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_b7d().pack(_x.decision, _x.distance, _x.oriX, _x.oriY, _x.oriZ, _x.placX, _x.placY, _x.placZ))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(_get_struct_i().pack(self.numberOfTSPTurtles))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def generate(inputFilename, outputFilename = defaultFileName, \n sizeOfReducedSample = DEFSIZEOFREDUCEDSAMPLE, \n centerEta = DEFCENTERETA, centerPhi = DEFCENTERPHI): \n listOfSignals = convert(inputFilename)\n arrayOfSignals = np.array(listOfSignals)\n arrayOfSignals.shape\n np.save(outputFilename, arrayOfSignals, allow_pickle=False)\n print(\"npy array name: \",outputFilename)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_6d2I2iB().pack(_x.x, _x.y, _x.z, _x.yaw, _x.v_des, _x.a_des, _x.t_start.secs, _x.t_start.nsecs, _x.duration.secs, _x.duration.nsecs, _x.relative))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2ib6d12B().pack(_x.Timestamp_sec, _x.Timestamp_nsec, _x.IdModulo, _x.InputVolA, _x.InputVolB, _x.InputCorrA, _x.InputCorrB, _x.OutputAnlg1, _x.OutputAnlg2, _x.InputDig1, _x.InputDig2, _x.InputDig3, _x.InputDig4, _x.OutputDig1, _x.OutputDig2, _x.OutputDig3, _x.OutputDig4, _x.OutputDig5, _x.OutputDig6, _x.OutputDig7, _x.OutputDig8))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def load_data(file_to_read):\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data", "def test_convert_numpy_to_libsvm(self):\n\n file = tempfile.NamedTemporaryFile(delete=False)\n\n # write to temporary files\n write_data_to_xlearn_format(self.X, self.y, file.name)\n\n # load data back and compare if they are the same as original data\n X_true, y_true = load_svmlight_file(file.name)\n file.close()\n if os.path.exists(file.name):\n os.remove(file.name)\n\n assert np.all(np.isclose(self.X, X_true.todense()))\n assert np.all(self.y.ravel() == y_true.ravel())", "def save(self, directory):\n for field in self.save_fields:\n np.save(pjoin(directory, field+'.npy'), self.__dict__[field])", "def dumpToNpy(dt, filename):\n np.save(filename, dt)", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(self.thumb.tostring())\n buff.write(self.index.tostring())\n buff.write(self.middle.tostring())\n buff.write(self.ring.tostring())\n buff.write(self.little.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def store (input, filename) :\n cout = open (filename, 'w')\n pickle.dump (input, cout)\n cout.close ()", "def serialize_numpy(self, buff, numpy):\n try:\n pass\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def save_data(features, labels, mask, file_name):\n label = labels[mask]\n label = label.reshape((len(label), 1))\n data = np.concatenate((features[mask, :], label), axis = 1)\n np.save(file_name, data)", "def read_array(self, filename):\n extension = filename.split('.')[-1] # Get file extension\n if extension == 'mat':\n array = sci.loadmat(filename)\n elif extension == 'npy':\n array = np.load(filename)\n else:\n print('Error!!! Unrecognised file type for read_array()')\n array = None\n return array", "def save(self,filepath):\n d = self.X.tocoo(copy=False)\n v = self.col_view.tocoo(copy=False)\n np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape,\n v_row=v.row,v_col=v.col,v_data=v.data,v_shape=v.shape)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_h16fh8f().pack(_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def save_data_pickle(self, save_full=False):\n self.train.to_pickle('../input/train_mod.pkl')\n self.test.to_pickle('../input/test_mod.pkl')\n if save_full:\n self.train_full.to_pickle('../input/train_full_mod.pkl')", "def to_numpy(self, **kwargs):\n pass", "def pickle_dump_files():\n with open('data/' + dataset_name + '_' + model_name + '_' + 'predictions', 'wb') as f:\n pickle.dump(predictions, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'state_sentences', 'wb') as f:\n pickle.dump(final_state_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'decoded_sentences', 'wb') as f:\n pickle.dump(final_decoded_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'ids', 'wb') as f:\n pickle.dump(idx, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'exemplars', 'wb') as f:\n pickle.dump(exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'counter_exemplars', 'wb') as f:\n pickle.dump(counter_exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_exemplar_words', 'wb') as f:\n pickle.dump(top_exemplar_words, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_counter_exemplar_words', 'wb') as f:\n pickle.dump(top_counter_exemplar_words, f)", "def generate_LAS(self, filename):\n try:\n filename = filename.split('.')[0] + '.LAS'\n headerobj = self.__make_header__()\n fileobj = lasfile.File(filename, mode='w', header=headerobj)\n fileobj.X = self.flat_array[self.x_idx, :]\n fileobj.Y = self.flat_array[self.y_idx, :]\n fileobj.Z = self.flat_array[self.z_idx, :]\n fileobj.Intensity = self.flat_array[self.temp_idx, :]\n fileobj.close()\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message('.LAS file saved: {}'.format(filename))\n else:\n print('.LAS file saved: {}'.format(filename))\n\n except AttributeError as err:\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message('AttributeError [{}] when attempting to generate .LAS file'.format(err))\n else:\n print('AttributeError [{}] when attempting to generate .LAS file'.format(err))\n except TypeError as err:\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message('TypeError [{}] when attempting to generate .LAS file'.format(err))\n else:\n print('TypeError [{}] when attempting to generate .LAS file'.format(err))", "def to_txt(self, fpath):\n np.savetxt(fpath, self._arr.T)", "def test_convert_numpy_to_libffm(self):\n file = tempfile.NamedTemporaryFile(delete=False)\n\n # write data to libffm format\n write_data_to_xlearn_format(self.X, self.y, file.name, fields=self.fields)\n\n # read back data from file\n X_true, y_true, field_true = self._read_libffm_file(file.name)\n file.close()\n if os.path.exists(file.name):\n os.remove(file.name)\n\n assert np.all(np.isclose(self.X, X_true))\n assert np.all(self.y.ravel() == y_true.ravel())\n assert np.all(self.fields.ravel() == field_true.ravel())", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2q3d3q().pack(_x.max_features, _x.window_size, _x.quality, _x.min_distance, _x.harris, _x.size_block, _x.pyramid_lvl, _x.mask_border))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def save(self, filename):\n with open(filename, 'w') as f:\n pickle.dump((self.components, self.mean), f)", "def load(self, directory):\n for field in self.save_fields:\n self.__dict__[field] = np.load(pjoin(directory, field)+'.npy')[()]", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_ihih3i3d2i2d().pack(_x.originId, _x.originType, _x.destinationId, _x.destinationType, _x.range, _x.ts, _x.seq, _x.rxPower, _x.channel, _x.datarate, _x.prf, _x.preambleLength, _x.txGain, _x.angle))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def to_pickle(self, filename, **kwargs):\n\n self._check_is_fitted()\n\n return dump(self, filename, **kwargs)", "def save(self, path_to_save):\n for item in self.data_array:\n item.save(path_to_save+item.file_name)", "def images_to_numpy(imageList,saveAs):\n saveAs=os.path.abspath(saveAs)\n if os.path.exists(saveAs):\n print(\"loading data from %s ...\"%(os.path.basename(saveAs)))\n data=np.load(saveAs)\n if len(data)==len(imageList):\n print(\" loaded %.02f MB array\"%(data.nbytes/2**20),data.shape)\n return data\n else:\n print(\"... but I see %d images! Starting over.\"%(len(imageList)))\n # we have to create the data file\n for n,fname in enumerate(imageList):\n imageData=mpimg.imread(fname)\n if n==0:\n sizeY,sizeX=imageData.shape\n data=np.empty((len(imageList),sizeY,sizeX))\n print(\"creating data for %s ...\"%(os.path.basename(saveAs)))\n elif n%20==0:\n print(\" %.02f%% ...\"%(100*n/len(imageList)))\n data[n]=imageData\n print(\"saving %.02f MB array to disk ...\"%(data.nbytes/1024/1024))\n np.save(saveAs,data)\n print(\"created:\",saveAs)\n return data", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_2I2d4fh2B.pack(_x.date, _x.time, _x.longitude_RTK, _x.latitude_RTK, _x.height_above_sea_RTK, _x.velocity_north, _x.velocity_east, _x.velocity_ground, _x.yaw, _x.position_flag, _x.yaw_flag))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def save_data(self, speaker_name, data):\n if not os.path.exists(self.data_dir):\n os.makedirs(self.data_dir)\n \n path = os.path.join(self.data_dir, speaker_name + \".pkl\")\n np.array(data).dump(path)" ]
[ "0.69759345", "0.69321483", "0.6812905", "0.67736834", "0.6720767", "0.66823816", "0.6668173", "0.66250736", "0.66020155", "0.6568645", "0.65596145", "0.6404797", "0.6398163", "0.6341175", "0.6324566", "0.6324464", "0.626959", "0.623348", "0.6216238", "0.6199442", "0.61983526", "0.6197138", "0.6180286", "0.6160731", "0.613573", "0.6113995", "0.61070246", "0.60663086", "0.6061822", "0.60615796", "0.6043909", "0.6038343", "0.5996795", "0.59645367", "0.5959656", "0.5959656", "0.59592456", "0.5955196", "0.59534186", "0.5947535", "0.59273", "0.59248096", "0.5895777", "0.5892755", "0.58867526", "0.58718497", "0.58683944", "0.5867411", "0.586541", "0.58496773", "0.5845797", "0.58319825", "0.58319825", "0.5826698", "0.58237886", "0.58137983", "0.5809714", "0.5803135", "0.5797934", "0.5793539", "0.579006", "0.578283", "0.57780874", "0.57701033", "0.5769423", "0.57678944", "0.57650393", "0.5759373", "0.5759148", "0.5732539", "0.5728565", "0.57272154", "0.5717288", "0.5716825", "0.571499", "0.5708091", "0.57063615", "0.5705033", "0.57039714", "0.5702143", "0.56993735", "0.56974363", "0.5696711", "0.5683216", "0.5682857", "0.5682614", "0.5679175", "0.5678298", "0.56747025", "0.5672977", "0.5670124", "0.5664134", "0.56588906", "0.56588084", "0.56553036", "0.56544006", "0.564148", "0.56400067", "0.56277114", "0.56276697", "0.56260926" ]
0.0
-1
Return a list containing the predicted output for each frame image from the retrained CNN model
def predict_on_frames(frames): frame_predictions = [] print("Total Number of Frames ",len(frames)) count = 0 #for i, frame in tqdm(enumerate(frames)): for frame in tqdm(frames): filename = frame[0] label = frame[1] frameCount = frame[2] if(count%200 == 0): print(count) prediction = label_image.get_prediction(filename) frame_predictions.append([prediction, label, frameCount]) count = count + 1 return frame_predictions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(model, images):\n return model.predict_classes(images)", "def predict_batch(model, images):\n if images is not None:\n y_predicted = model.predict(images)\n predicted_classes = np.argmax(y_predicted, axis=1)\n return predicted_classes.tolist()\n else:\n return []", "def extract_detections(self):\n self.rescue_model.setInput(self.human_blob)\n self.predictions = self.rescue_model.forward()", "def predict(self, images):\n\t\t#testing_dataset = tf.data.Dataset.from_tensor_slices(images)\n\t\ttf.keras.backend.set_learning_phase(0)\n\t\ttesting_dataset = tf.data.Dataset.from_tensor_slices(np.asarray(images)).map(lambda x: tf.image.resize(x, [self.image_size, self.image_size]) / 255.0)\n\t\t#testing_dataset_shape = tf.data.Dataset.from_tensor_slices(np.full((len(images), 2), 500, dtype=np.int32))\n\t\ttesting_iterator_X = tf.data.Dataset.zip((testing_dataset, )).batch(self.batch_size).make_initializable_iterator()\n\n\t\tself.sess.run(testing_iterator_X.initializer)\n\t\ttesting_handle_X = self.sess.run(testing_iterator_X.string_handle())\n\n\t\tfinal_output = np.zeros([len(images), 500, 500, num_classes])\n\t\tj = 0\n\t\tcount = 0\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\t[test_output] = self.sess.run(\n\t\t\t\t\t[self.output],\n\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\tself.is_training: False,\n\t\t\t\t\t\t\tself.handle_X: testing_handle_X,\n\t\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t\tthis_len = len(test_output)\n\t\t\t\tfor z in range(len(test_output)):\n\t\t\t\t\tfor dim in range(num_classes):\n\t\t\t\t\t\tfinal_output[count+z:count+z+1, :, :, dim] = scipy.misc.imresize(test_output[z, :, :, dim], [500, 500])\n\n\t\t\t\t#final_output[count:count+this_len, :, :, :] = test_output\n\t\t\t\tto = final_output[count:count+this_len, :, :, :].argmax(axis=-1)\n\t\t\t\t'''\n\t\t\t\tpdb.set_trace()\n\t\t\t\tfor z in range(this_len):\n\t\t\t\t\tplt.matshow(to[z])\n\t\t\t\t\tplt.colorbar()\n\t\t\t\t\tplt.show()\n\t\t\t\t'''\n\t\t\t\tcount += this_len\n\t\t\t\tprint(f'Batch: {j}')\n\t\t\t\tj += 1\n\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\tbreak\n\t\treturn final_output", "def predict_for_frame(model, cv_img):\n faces = crop_faces([cv_img], only_one=False, using_bundled_library=True)[0]\n\n if len(faces) == 0:\n return []\n\n pre_processing = transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.Resize(tuple(config[\"resolution\"])),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5], std=[0.5]),\n ])\n\n pre_processed_faces = []\n faces_coords = []\n for face in faces:\n (x, y, w, h) = face\n face_cv = crop_cv_img(cv_img, x, y, w, h)\n face_pil = pre_processing(pl.Image.fromarray(face_cv))\n pre_processed_faces.append(face_pil)\n faces_coords.append((x, y, w, h))\n\n x = torch.stack(pre_processed_faces)\n predictions = torch.nn.Softmax(dim=1)(model.forward(x))\n\n output = []\n\n for prediction, coords in zip(predictions, faces_coords):\n output.append({\n \"prediction\": prediction,\n \"position\": coords\n })\n\n return output", "def predict_all(self, imgs):\n return self._predict(imgs)", "def predict(self, images, batch_size):\n pass", "def forward(self, img):\n c2_outputs = self.run_all_layers(img)[-1]\n c2_outputs = torch.cat(\n [c2_out[:, None, :] for c2_out in c2_outputs], 1)\n return c2_outputs", "def get_classification_predictions(self):\n predictions = []\n for i, test_batch in enumerate(tqdm.tqdm(self.loader)):\n if self.tta_fn is not None:\n pred_out = self.tta_fn(batch=test_batch[0].cuda())\n else:\n # (batch_size, n_classes)\n pred_out = apply_nonlin(self.model(test_batch[0].cuda()))\n # for each prediction (1,) in pred_out (n, 4): post process\n for pred in pred_out:\n # (4, )\n probability = pred.cpu().detach().numpy()\n for prob_i in probability:\n # (1,)\n predictions.append(prob_i)\n return predictions", "def predict(self,url):\n\n # get image\n response = requests.get(url)\n \n img = Image.open(BytesIO(response.content))\n\n transform = transforms.Compose([transforms.Grayscale(),\n transforms.Resize((128,128)),\n transforms.ToTensor()])\n\n img = transform(img).unsqueeze(0)\n\n if torch.cuda.is_available(): \n img = img.cuda() \n\n out = self.model(img)\n\n classes = ['Jazzmaster','Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster','Telecaster']\n\n if torch.cuda.is_available():\n\n logs = out.cpu().data.numpy()\n \n else:\n\n logs = out.data.numpy()\n \n return [classes[logs.argmax()]]", "def _predict(self, frames, transform):\n clip = torch.from_numpy(np.array(frames))\n # Transform frames and append batch dim\n sample = torch.unsqueeze(transform(clip), 0)\n sample = sample.to(torch_device())\n output = self.model(sample)\n scores = nn.functional.softmax(output, dim=1).data.cpu().numpy()[0]\n return scores", "def get_mc_pred(output,eng,frame,nImg):\n preds_2d = [];\n preds_3d = [];\n \n c,s = ut.calc_cent_scale(frame);\n \n center = matlab.double([list(c)],(1,2));\n scale = matlab.double([s],(1,1));\n \n for i in tqdm(range(0,nImg)):\n preds_2d.append(eng.transformMPII(output[\"W_final\"][2*i:2*(i+1)],center,scale,matlab.double([64,64],(1,2)),1));\n preds_3d.append(output[\"S_final\"][3*i:3*i + 3]);\n \n print(\"Converting estimates to Python format...\");\n preds_2d = np.array(preds_2d);\n preds_3d = np.array(preds_3d);\n \n preds_2d = preds_2d.swapaxes(1, 2);\n preds_3d = preds_3d.swapaxes(1, 2);\n \n return preds_2d, preds_3d;", "def class_imgs(list_img):\n numberimg = len(list_img)\n resize(net, numberimg, cursize)\n i = 0\n for img in list_img:\n image = caffe.io.load_image(img)\n transformed_image = transformer.preprocess('data', image)\n net.blobs['data'].data[i] = transformed_image\n i = i + 1\n\n output = net.forward()\n\n results = []\n for n in range(0, numberimg):\n themax = output['prob'][n].argmax()\n results.append({'filename':list_img[n], 'class': themax, 'prob': output['prob'][n].tolist()})\n\n return results", "def predict_data(img): \n return gennet.predict_data(img, 'Resnet50')", "def predict(self, img_path):\n\n img = cv2.imread(img_path)\n img0 = img.copy()\n \n #This happens inside datasets\n # Convert\n img = letterbox(img, new_shape=self.img_size)[0]\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n \n #this happens on detect\n img = torch.from_numpy(img).to(self.device)\n img = img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = self.model(img)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n \n # Process detections\n for i, det in enumerate(pred): # detections per image\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()\n\n pred = [d.cpu().detach().numpy() for d in pred if d is not None]\n pred = pred[0] if len(pred) else pred\n \n pred = [[[x1, y1, x2, y2],conf] for x1, y1, x2, y2, conf, clss in pred]\n\n return pred", "def get_batch_predictions_MFE(rnn, X, target):\n\n out = rnn.forward(X, mean_field_inference=True)\n arr_preds = nn.functional.softmax(out, dim=-1).data.cpu().numpy()\n arr_target = target.detach().cpu().numpy()\n\n return arr_preds, arr_target", "def get_batch_predictions(rnn, X, target):\n\n out = rnn.forward(X)\n arr_preds = nn.functional.softmax(out, dim=-1).data.cpu().numpy()\n arr_target = target.detach().cpu().numpy()\n\n return arr_preds, arr_target", "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])", "def predict(self, images, batch_size=1):\n predictions = []\n \n for image in images.astype(\"float\"):\n filtered_image = self.apply_filter(image)\n _, pred = cv2.threshold(filtered_image.astype('uint8'), 0, 1, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n predictions.append(pred)\n \n return np.reshape(predictions, images.shape)", "def warmup_predict(model, imgs, Npred):\n H = augmented_state_matrix(model[:-1], imgs, 0)\n h0 = H[-2]\n y0 = imgs[-1]\n return predict(model, y0, h0, Npred)", "def predict_charac(img):\n model_ft = models.resnet34(pretrained=True)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 27)\n\n optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.001)\n folder = os.path.dirname(__file__)\n checkpoint_path_res34 = os.path.join(folder,\"res34_albu_26.pt\")\n model, _, _, _ = load_ckp(checkpoint_path_res34, model_ft, optimizer_ft, DEVICE)\n model = model.to(DEVICE)\n model.eval()\n\n img = preprocess(img)\n img = img.to(DEVICE)\n all_preds = []\n with torch.no_grad():\n outputs = model(img)\n _, preds = torch.max(outputs, 1)\n\n return preds", "def model_predict(img, model, preprocess_func):\n img = img.resize((224, 224)) # Each model expects shape: (224, 224, 3)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n\n x = preprocess_func(x)\n preds = model.predict(x)\n return preds", "def model_predict(classifier, X_test:list) -> list:\n y_predict = classifier.predict(X_test)\n return y_predict", "def predict(self, inputs, oversample=True):\r\n # Scale to standardize input dimensions.\r\n input_ = np.zeros((len(inputs),\r\n self.image_dims[0],\r\n self.image_dims[1],\r\n inputs[0].shape[2]),\r\n dtype=np.float32)\r\n print inputs[0].shape\r\n print input_.shape\r\n for ix, in_ in enumerate(inputs):\r\n input_[ix] = caffe.io.resize_image(in_, self.image_dims)\r\n\r\n # if oversample:\r\n # # Generate center, corner, and mirrored crops.\r\n # input_ = caffe.io.oversample(input_, self.crop_dims)\r\n # else:\r\n # # Take center crop.\r\n # center = np.array(self.image_dims) / 2.0\r\n # crop = np.tile(center, (1, 2))[0] + np.concatenate([\r\n # -self.crop_dims / 2.0,\r\n # self.crop_dims / 2.0\r\n # ])\r\n # crop = crop.astype(int)\r\n # input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]\r\n\r\n # Classify\r\n caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],\r\n dtype=np.float32)\r\n for ix, in_ in enumerate(input_):\r\n caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)\r\n out = self.forward_all(**{self.inputs[0]: caffe_in})\r\n predictions = out[self.outputs[0]]\r\n\r\n # # For oversampling, average predictions across crops.\r\n # if oversample:\r\n # predictions = predictions.reshape((len(predictions) / 10, 10, -1))\r\n # predictions = predictions.mean(1)\r\n\r\n return predictions", "def predict_car():\n img = open_image(request.files['image'])\n pred_class, pred_idx, outputs = learn.predict(img)\n return str(pred_class)", "def predict_all_images():\n #Read config\n config = read_config()\n\n #read model\n model = read_model(config[\"model_path\"], config)\n tifs = glob.glob(os.path.join(\"data\",\"**\",\"*.tif\"))\n for tif in tifs:\n print(tif)\n prediction = predict_image(model, tif, score_threshold = 0.1, max_detections= 200,return_plot=False)\n\n #reshape and save to csv\n df = pd.DataFrame(prediction)\n df.columns = [\"xmin\",\"ymin\",\"xmax\",\"ymax\"]\n\n #save boxes\n file_path = os.path.splitext(tif)[0] + \".csv\"\n df.to_csv(file_path)", "def prediction():\r\n\r\n loaded_model = load_model('imageTrainedModel.h5')\r\n print(loaded_model.summary())\r\n\r\n # retrieve history also:\r\n f = open('history.pckl', 'rb')\r\n history = pickle.load(f)\r\n f.close()\r\n\r\n print(history.keys())\r\n print(history)\r\n\r\n epochs = len(history['loss']) # length of the list stored at 'loss'\r\n # Plot losses for train and validation\r\n plt.figure()\r\n plt.title('Loss as training progresses')\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.plot(history['loss'], label='Train Error')\r\n plt.plot(history['val_loss'], label='Val Error')\r\n plt.legend()\r\n plt.show()\r\n\r\n # Plot metrics\r\n plt.plot(history['acc']) # use same metric that was used for training. 'history' is a dictionary.\r\n plt.title('Accuracy as training progresses')\r\n plt.ylabel('Accuracy (%)')\r\n plt.xlabel('Epoch')\r\n ymax = max(history['acc'])\r\n xpos = history['acc'].index(ymax)\r\n xmax = xpos\r\n plt.annotate('Maximum accuracy: %s' % round(ymax, 3),\r\n xy=(xmax, ymax), xycoords='data',\r\n xytext=(0.5, 0.5), textcoords='axes fraction',\r\n fontsize=12)\r\n plt.show()\r\n\r\n # make predictions using x_test\r\n test_y_predictions = loaded_model.predict(x_test, batch_size=None, verbose=1, steps=None)\r\n test_y_predictions = np.around(test_y_predictions, decimals=0) # round to whole integers\r\n true_false_array = np.equal(y_test, test_y_predictions) # test of equality.\r\n true_count = np.sum(true_false_array) # number of correctly categorised images\r\n false_count = true_false_array.shape[0] - true_count # number of images not correctly categorised\r\n\r\n # Plot predicted and actual image categories\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111)\r\n plt.title('Classification of Image Categories')\r\n plt.ylabel('Number of Images')\r\n plt.xlabel('Image Classification')\r\n label = ['Correct', 'Incorrect']\r\n index = np.arange(len(label))\r\n plt.xticks(index, label, fontsize=10, rotation=0)\r\n ax1.bar(index, [true_count, false_count])\r\n plt.show()", "def make_predictions(file_list, model, is_game=False):\n temp_list = []\n for wav_file in glob.glob(file_list):\n temp_list.append(reshape_and_predict(filepath=wav_file, saved_model=model, is_game=is_game))\n\n return temp_list", "def eval(self): \n inputs,enc_input_weights, outputs, dec_input_weights = self.get_batch()\n predicted_ids = self.model.step(self.sess, inputs, enc_input_weights) \n print(\"=\"*20)\n for i in range(FLAGS.batch_size):\n print(\"* %dth sample target: %s\" % (i,str(outputs[i,1:]-2)))\n for predict in predicted_ids[i]:\n print(\"prediction: \"+str(predict)) \n print(\"=\"*20)", "def get_features(model, image_filename, images_folder_path):\n\n img = image.load_img(images_folder_path + image_filename,\n target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n return model.predict(x).reshape(-1)", "def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class", "def predict(self, n):\n X = self.X[n:n+1]\n outputs, hidden = self.feed_forward(X)\n pca_reconstruction = self.pca.inverse_transform(X)\n hidden_expected = dot(self._inverse_activation(outputs), pinv(self.W_output))[:,:-1]\n hidden_reconstruction = self.pca.inverse_transform(dot(self._inverse_activation(hidden_expected), pinv(self.W_hidden))[:,:-1])\n return (argmax(outputs),\n pca_reconstruction.reshape(self.dataset.images.shape[1:]),\n hidden_reconstruction.reshape(self.dataset.images.shape[1:]))", "def predict(model, img, target_size, top_n=3):\r\n print('img.size=',img.size)\r\n if img.size != target_size:\r\n img = img.resize(target_size)\r\n \r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n preds = model.predict(x)\r\n return decode_predictions(preds,top=top_n)[0]", "def batch_predict(filenames, net):\n N, C, H, W = net.blobs[net.inputs[0]].data.shape\n F = net.blobs[net.outputs[0]].data.shape[1]\n Nf = len(filenames)\n allftrs = np.zeros((Nf, F))\n #allpreds = []\n for i in range(0, Nf, N):\n tic = time.time()\n in_data = np.zeros((N, C, H, W), dtype=np.float32)\n\n batch_range = range(i, min(i+N, Nf))\n batch_filenames = [filenames[j] for j in batch_range]\n Nb = len(batch_range)\n\n batch_images = np.zeros((Nb, 3, H, W))\n for j,fname in enumerate(batch_filenames):\n im = np.array(Image.open(fname))\n \n if len(im.shape) == 2:\n im = np.tile(im[:,:,np.newaxis], (1,1,3))\n # RGB -> BGR\n im = im[:,:,(2,1,0)]\n # mean subtraction\n im = im - np.array([103.939, 116.779, 123.68])\n # resize\n im = imresize(im, (H, W))\n # get channel in correct dimension\n im = np.transpose(im, (2, 0, 1))\n batch_images[j,:,:,:] = im\n\n # insert into correct place\n in_data[0:len(batch_range), :, :, :] = batch_images\n \n # predict features\n ftrs = predict(in_data, net)\n toc = time.time()\n \n for j in range(len(batch_range)):\n allftrs[i+j,:] = ftrs[j,:]\n\n return allftrs", "def predict(frame):\n cv_net = cv2.dnn.readNetFromTensorflow(PATH_TO_MODEL_WEIGHTS, PATH_TO_GRAPH)\n labels = coco_label_reader(PATH_TO_LABELS)\n\n rows, cols, _ = frame.shape\n blob = cv2.dnn.blobFromImage(frame, size=(rows, cols), swapRB=True, crop=False)\n cv_net.setInput(blob)\n cv_out = cv_net.forward()\n boxes = []\n classes = []\n for detection in cv_out[0, 0, :, :]:\n score = float(detection[2])\n if score > 0.3:\n left = detection[3] * cols\n top = detection[4] * rows\n right = detection[5] * cols\n bottom = detection[6] * rows\n class_ = int(detection[1])\n if left > right:\n left, right = right, left\n if top > bottom:\n top, bottom = bottom, top\n boxes.append([left, top, right, bottom])\n classes.append(labels[class_])\n return non_max_suppression(np.asarray(boxes), np.asarray(classes))", "def prediction_on_a_image(self, input, output,model_saved_path):\n\n # load the saved model\n if os.path.isfile(model_saved_path) is False:\n raise IOError('trained model: %s not exist' % model_saved_path)\n\n clf = joblib.load(model_saved_path)\n\n # split a large image to many small ones\n patch_w = 500 # parameters.get_digit_parameters(\"\", \"train_patch_width\", None, 'int')\n patch_h = 500 # parameters.get_digit_parameters(\"\", \"train_patch_height\", None, 'int')\n overlay_x = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_x\", None, 'int')\n overlay_y = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_y\", None, 'int')\n\n img_folder = os.path.dirname(input)\n img_name = os.path.basename(input)\n inf_list_txt = 'inf_image_list.txt'\n with open(inf_list_txt, 'w') as txt_obj:\n txt_obj.writelines(img_name + '\\n')\n\n img_patches = build_RS_data.make_dataset(img_folder, inf_list_txt, patch_w, patch_h, overlay_x, overlay_y,\n train=False)\n\n for img_idx, aImg_patches in enumerate(img_patches):\n inf_output_dir = 'inf_results' #os.path.splitext(img_name)[0]\n os.system('mkdir -p '+inf_output_dir)\n os.system('rm '+inf_output_dir+'/*')\n\n ## parallel inference patches\n # but it turns out not work due to the Pickle.PicklingError\n # not working due to mulitple parameters. Jan 9, 2019, hlc\n # use multiple thread\n num_cores = multiprocessing.cpu_count()\n print('number of thread %d' % num_cores)\n # theadPool = mp.Pool(num_cores) # multi threads, can not utilize all the CPUs? not sure hlc 2018-4-19\n theadPool = Pool(num_cores) # multi processes\n\n # inference_one_patch_svm(img_idx, image_count, p_idx, patch_count, inf_output_dir, img_patch, scaler,clf)\n\n parameters_list = [\n (img_idx, len(img_patches), idx, len(aImg_patches), inf_output_dir, img_patch, self._scaler, clf)\n for (idx, img_patch) in enumerate(aImg_patches)]\n # results = theadPool.map(inference_one_patch_svm, parameters_list) # not working\n results = theadPool.starmap(inference_one_patch_svm, parameters_list) # need python3\n print('result_list', results)\n\n # for p_idx, img_patch in enumerate(aImg_patches):\n # # read images\n # patch_data = build_RS_data.read_patch(img_patch) # read_whole_x_pixels(input)\n #\n # nbands, height, width = patch_data.shape\n #\n # X_predit = patch_data.reshape(nbands, -1)\n # X_predit = np.transpose(X_predit, (1, 0))\n #\n # if os.path.isfile(scaler_saved_path) and self._scaler is None:\n # self._scaler = joblib.load(scaler_saved_path)\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # elif self._scaler is not None:\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # else:\n # X = X_predit\n # basic.outputlogMessage('warning, no pre-processing of data before prediction')\n #\n # # more method on prediction can be foudn in :\n # # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\n # pre_result = clf.predict(X)\n # result_img = pre_result.reshape((height, width))\n #\n # # save results\n # print('Save patch:%d/%d on Image:%d/%d , shape:(%d,%d)' %\n # (p_idx,len(aImg_patches), img_idx,len(img_patches), result_img.shape[0], result_img.shape[1]))\n #\n # # short the file name to avoid error of \" Argument list too long\", hlc 2018-Oct-29\n # file_name = \"I%d_%d\" % (img_idx, p_idx)\n #\n # save_path = os.path.join(inf_output_dir, file_name + '.tif')\n # build_RS_data.save_patch_oneband_8bit(img_patch,result_img.astype(np.uint8),save_path)\n #\n # with rasterio.open(input) as src_obj:\n # # Set spatial characteristics of the output object to mirror the input\n # kwargs = src_obj.meta\n # kwargs.update(\n # dtype=rasterio.uint8,\n # count=1)\n # # Create the file\n # with rasterio.open(output, 'w', **kwargs) as dst:\n # dst.write_band(1, result_img.astype(rasterio.uint8))\n # basic.outputlogMessage(\"save to %s\" % output)\n\n return True", "def predict(self, sess, img_data):\n\n with sess.as_default():\n new_image = self.preprocess(img_data, self.input_shape)\n input_feed = self.create_input_feed(sess, new_image, img_data)\n output_fetch = self.create_output_fetch(sess)\n all_classes, all_scores, all_bboxes = sess.run(output_fetch, input_feed)\n\n return all_classes, all_scores, all_bboxes", "def predict(self, X):\n\n pred = []\n for x_i in X:\n tmp = x_i\n p0 = self.model.predict(tmp.reshape(1,128,128,3))\n p1 = self.model.predict(np.fliplr(tmp).reshape(1,128,128,3))\n# p2 = self.model.predict(np.flipud(tmp).reshape(1,128,128,1))\n# p3 = self.model.predict(np.fliplr(np.flipud(tmp)).reshape(1,128,128,1))\n p = (p0[0] +\n np.fliplr(p1[0]) #+\n# np.flipud(p2[0]) +\n# np.fliplr(np.flipud(p3[0]))\n ) / 2#4\n pred.append(p)\n return np.array(pred)", "def predict(model, img, target_size=(229, 229)): #fixed size for InceptionV3 architecture\r\n if img.size != target_size:\r\n img = img.resize(target_size)\r\n\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n preds = model.predict(x)\r\n return preds[0]", "def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)", "def predict(model, img):\n\tx = image.img_to_array(img)\n\tx = np.expand_dims(x, axis=0)\n\tx = preprocess_input(x)\n\tpreds = model.predict(x)\n\treturn preds[0]", "def predict(self, X):\n y_prediction = []\n\n ###########################################################################\n # Implement this function. thats VERY easy to do\n for i, x in enumerate(X):\n l1 = np.matmul(x, self.p_net['W1']) + self.p_net['b1']\n l1 = np.array([self.relu(s) for s in l1])\n l2 = np.matmul(l1, self.p_net['W2']) + self.p_net['b2']\n l2 = np.array([self.relu(s) for s in l2])\n y_prediction.append(np.argmax(l2))\n ###########################################################################\n pass\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return y_prediction", "def predict(self, X):\n\n pred = []\n for x_i in X:\n tmp = x_i\n p0 = self.model.predict(tmp.reshape(1,128,128,1))\n p1 = self.model.predict(np.fliplr(tmp).reshape(1,128,128,1))\n# p2 = self.model.predict(np.flipud(tmp).reshape(1,128,128,1))\n# p3 = self.model.predict(np.fliplr(np.flipud(tmp)).reshape(1,128,128,1))\n p = (p0[0] +\n np.fliplr(p1[0]) #+\n# np.flipud(p2[0]) +\n# np.fliplr(np.flipud(p3[0]))\n ) / 2#4\n pred.append(p)\n return np.array(pred)", "def predict() -> Any:\n threshold = request.form.get(\"threshold\", type=float)\n source_size = request.form.get(\"source_size\", type=bool)\n images = request.files.getlist(\"images\")\n result = {}\n for image in images:\n input_image = prepare_input(image)\n if input_image is not None:\n output_image = model.predict(input_image, threshold, source_size)\n if output_image is not None:\n result[image.filename] = prepare_output(output_image)\n else:\n result[image.filename] = None\n else:\n result[image.filename] = None\n return result", "def forward(self, images, captions):\n\t\timages_features = self.encoder(images)\n\t\timages_features = images_features.permute(1,0,2)\n\t\tpredictions = self.decoder.forward(images_features, captions)\n\t\treturn predictions", "def get_predictors(self):\n\t\treturn self.predictors", "def predict_dataset(filenames, path, model, model_preprocess_function):\n y_predicted = []\n batch_size = 32\n batch = []\n for filename in filenames:\n batch.append(preprocess(path+filename, model_preprocess_function))\n if len(batch) >= batch_size:\n y_predicted = y_predicted + model.predict(np.array(batch)).tolist()\n batch = []\n y_predicted = y_predicted + model.predict(np.array(batch)).tolist()\n return y_predicted", "def forward(self,images):\n with torch.no_grad(): \n features = self.resnet(images)\n features = features.view(features.size(0),-1)\n features = self.embed(features)\n features = self.bn(features)\n return features", "def prediction(self, X):\n images = self.preprocess_images(X)\n return self.model.predict(images)", "def predict(input):\n pf = process_input(input)\n # Reshape data to be [samples][pixels][width][height]\n pf = pf.reshape(pf.shape[0], 1, 28, 28).astype('float32')\n # Normalize inputs from 0-255 to 0-1\n pf = pf / 255\n pr = classifier.predict_classes(pf)\n # Cast the numpy array predicted values as a list.\n return list(map(lambda x: int(x), pr))", "def predict(image):\n with tf.Session(graph=graph) as session:\n saver = tf.train.Saver()\n saver.restore(session, \"saved_models/model12.ckpt\")\n print(\"Model restored.\")\n feed_dict = {tf_sample_dataset : image}\n predictions = session.run(train_prediction, feed_dict=feed_dict)\n # Prints an array of softmax probabilities for each digit in the number\n print str(predictions)\n return np.argmax(predictions, 2)", "def predict(self, preprocessed_dict):\r\n preprocessed_images_fg = preprocessed_dict.get('images_fg')\r\n\r\n net_image = slim.conv2d(preprocessed_images_fg, num_outputs=5, kernel_size=3,\r\n padding='SAME', scope='psp_conv1')\r\n net_image = slim.batch_norm(net_image, is_training=self._is_training)\r\n net_image = slim.conv2d(net_image, num_outputs=4, kernel_size=3,\r\n padding='SAME', scope='psp_conv2')\r\n net_image = slim.batch_norm(net_image, is_training=self._is_training)\r\n # pyramid scence pooling\r\n pool1 = self.pyramid_pooling(net_image, (60, 60), 64, scope='pyramid_pooling1')\r\n pool2 = self.pyramid_pooling(net_image, (30, 30), 64, scope='pyramid_pooling2')\r\n pool3 = self.pyramid_pooling(net_image, (20, 20), 64, scope='pyramid_pooling3')\r\n pool4 = self.pyramid_pooling(net_image, (10, 10), 64, scope='pyramid_pooling4')\r\n\r\n net_image = tf.concat(values=[net_image, pool1, pool2, pool3, pool4], axis=3)\r\n net_image = slim.batch_norm(net_image, is_training=self._is_training)\r\n pred_trimap = slim.conv2d(net_image, num_outputs=3, kernel_size=3,\r\n padding='SAME', scope='psp_conv3')\r\n pred_trimap_soft = tf.nn.softmax(pred_trimap, axis=3)\r\n # background_trimap = tf.slice(pred_trimap, [0, 0, 0, 0], [-1, -1, -1, 1])\r\n # foreground_trimap = tf.slice(pred_trimap, [0, 0, 0, 1], [-1, -1, -1, 1])\r\n # unsure_trimap = tf.slice(pred_trimap, [0, 0, 0, 2], [-1, -1, -1, 1])\r\n background = tf.slice(pred_trimap, [0, 0, 0, 0], [-1, -1, -1, 1])\r\n background_trimap = tf.slice(pred_trimap_soft, [0, 0, 0, 0], [-1, -1, -1, 1])\r\n foreground = tf.slice(pred_trimap, [0, 0, 0, 1], [-1, -1, -1, 1])\r\n foreground_trimap = tf.slice(pred_trimap_soft, [0, 0, 0, 1], [-1, -1, -1, 1])\r\n unsure = tf.slice(pred_trimap, [0, 0, 0, 2], [-1, -1, -1, 1])\r\n unsure_trimap = tf.slice(pred_trimap_soft, [0, 0, 0, 2], [-1, -1, -1, 1])\r\n\r\n # net_image_trimap = tf.concat(values=[preprocessed_images_fg, pred_trimap], axis=3)\r\n # VGG-16\r\n _, endpoints = nets.vgg.vgg_16(preprocessed_images_fg,\r\n num_classes=1,\r\n spatial_squeeze=False,\r\n is_training=self._is_training)\r\n # Note: The `padding` method of fc6 of VGG-16 in tf.contrib.slim is\r\n # `VALID`, but the expected value is `SAME`, so we must replace it.\r\n net_image = endpoints.get('vgg_16/pool5')\r\n net_image = slim.batch_norm(net_image, is_training=self._is_training)\r\n # net_image = slim.conv2d(net_image, num_outputs=4096, kernel_size=7,\r\n # padding='SAME', scope='fc6_')\r\n\r\n # VGG-16 for alpha channel\r\n net_alpha = slim.repeat(pred_trimap, 2, slim.conv2d, 64,\r\n [3, 3], scope='conv1_alpha')\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool1_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n net_alpha = slim.repeat(net_alpha, 2, slim.conv2d, 128, [3, 3],\r\n scope='conv2_alpha')\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool2_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n net_alpha = slim.repeat(net_alpha, 2, slim.conv2d, 256, [3, 3],\r\n scope='conv3_alpha')\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool3_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n net_alpha = slim.repeat(net_alpha, 2, slim.conv2d, 512, [3, 3],\r\n scope='conv4_alpha')\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool4_alpha')\r\n net_alpha = slim.repeat(net_alpha, 2, slim.conv2d, 512, [3, 3],\r\n scope='conv5_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool5_alpha')\r\n # net_alpha = slim.conv2d(net_alpha, 4096, [7, 7], padding='SAME',\r\n # scope='fc6_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n\r\n # Concate the first stage prediction\r\n net = tf.concat(values=[net_image, net_alpha], axis=3)\r\n net.set_shape([None, self._default_image_size // 32,\r\n self._default_image_size // 32, 1024])\r\n\r\n # Deconvlution\r\n with slim.arg_scope([slim.conv2d_transpose], stride=2, kernel_size=5):\r\n # Deconv6\r\n net = slim.conv2d_transpose(net, num_outputs=512, kernel_size=1, scope='deconv6')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv5\r\n net = slim.conv2d_transpose(net, num_outputs=512, scope='deconv5')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv4\r\n net = slim.conv2d_transpose(net, num_outputs=256, scope='deconv4')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv3\r\n net = slim.conv2d_transpose(net, num_outputs=128, scope='deconv3')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv2\r\n net = slim.conv2d_transpose(net, num_outputs=64, scope='deconv2')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv1\r\n net = slim.conv2d_transpose(net, num_outputs=64, stride=1, scope='deconv1')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n\r\n # Predict alpha matte\r\n alpha_matte_r = slim.conv2d(net, num_outputs=1, kernel_size=[5, 5],\r\n activation_fn=tf.nn.sigmoid,\r\n scope='AlphaMatte')\r\n\r\n alpha_matte_p = foreground_trimap + tf.multiply(unsure_trimap, alpha_matte_r)\r\n prediction_dict = {'alpha_matte_r': alpha_matte_r,\r\n 'alpha_matte_p': alpha_matte_p,\r\n 'pred_trimap': pred_trimap,\r\n 'background': background,\r\n 'foreground': foreground,\r\n 'background_trimap': background_trimap,\r\n 'foreground_trimap': foreground_trimap,\r\n 'unsure_trimap': unsure_trimap,\r\n }\r\n return prediction_dict", "def prediction(input_path=INPUT_DIR,\n output_path=OUTPUT_DIR,\n model_path=MODEL_PATH,\n test=False):\n\n X = tf.placeholder(shape=[None, chunk_size, chunk_size], dtype=tf.float32, name='input_area')\n y_inter = deepcn.deepcn(X, chunk_size, False)\n y_pred = tf.cast(tf.argmax(tf.squeeze(y_inter), -1), tf.uint8)\n\n img_ids = []\n for name in os.listdir(input_path):\n if os.path.isdir(os.path.join(input_path, name)):\n img_ids.append(name)\n all_preds = np.zeros((len(img_ids), 256, 256))\n print('num of images: ', len(img_ids))\n\n loader = tf.train.Saver()\n\n with tf.Session() as sess:\n print(\"Import model from: %s\" %model_path)\n loader.restore(sess, model_path)\n # sess.run(tf.global_variables_initializer())\n\n batch_start_pos = 0\n while batch_start_pos < len(img_ids):\n batch_size = 100\n batch_end_pos = min(batch_start_pos + batch_size, len(img_ids))\n print('predict from %s, to %s' % (batch_start_pos, batch_end_pos))\n batch = img_ids[batch_start_pos:batch_end_pos]\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=batch)\n input_arr = pw.ResizedTestData()\n print(\"input_arr.shape: \", input_arr.shape)\n # input test_data_batch, output prediction of shape batch_size * 256 * 256\n pred_arr = sess.run(y_pred, feed_dict={X: input_arr})\n print(\"pred_arr.shape: \", pred_arr.shape)\n all_preds[batch_start_pos:batch_end_pos] = pred_arr\n pw.OutputPrediction(pred_arr*100, path=output_path)\n batch_start_pos = batch_end_pos\n\n # Use all img_ids and all_preds to generate single cell split csv file\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=img_ids)\n pw.GenerateSubmit(all_preds, output_path, cutoff=0.5)", "def predict(self, X):\n predictions = []\n for p in X:\n predictions.append(self.feedForward(p))\n return predictions", "def pre_analyse():\n t = transform()\n model = modified_resnet50()\n model.load_state_dict(\n torch.load(\n \"model.pth.tar\",\n map_location=torch.device(\"cpu\"),\n )[\"state_dict\"]\n )\n model.eval()\n\n def get_preds(img_path):\n \"\"\"\n Gives labelds and probabilities for a single image\n This is were we preprocess the image, using a function defined in the model class\n \"\"\"\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output\n\n return get_preds(\"image.jpg\")", "def predict_kaggle(test_path, file_list): \n return gennet.predict_kaggle(test_path, file_list, 'Resnet50')", "def predict(self, X):\n pred = []\n \n for x in X:\n y_pred = self.__feed_forward(x)\n pred.append(np.argmax(y_pred, axis=0))\n \n return np.asarray(pred)", "def predict(self, X):\n probs = []\n with tf.Session(graph=self.graph) as sess:\n sess.run(tf.global_variables_initializer())\n for x in X:\n probs.append(sess.run(self.output, feed_dict=self.feed(x)))\n return probs", "def get_predictions(self, img):\n \n predictions = self.tf_model.predict_proba(img)\n prediction = np.argmax(predictions, axis=-1)\n \n return prediction", "def predict(self, x_test, y_test, model_path):\n tf.reset_default_graph()\n with tf.compat.v1.Session() as sess:\n saver = tf.compat.v1.train.import_meta_graph(model_path + \".meta\")\n saver.restore(sess, model_path)\n graph = tf.compat.v1.get_default_graph()\n x = graph.get_operation_by_name(\"x_input\").outputs[0]\n y = tf.compat.v1.get_collection(\"network_architecture\")[0]\n no_samples = x_test.shape[0]\n predictions = []\n n_iteration = no_samples // self.batch_size\n for step in range(n_iteration):\n x_batch, y_batch = get_batch_data(x_test, y_test, iter_step=step, batch_size=self.batch_size)\n preds = sess.run(y, feed_dict={x: x_batch})\n predictions.append(preds)\n return predictions", "def predicts(self,X):\n return [self.predict(x) for x in X]", "def predict(self, x):\n assert isinstance(x, np.ndarray)\n \n output = x\n for layer in self._layers:\n output = layer.feed_forward(output) \n return output", "def predict_ch3(net, test_iter, n=6): #@save\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(d2l.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(d2l.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])", "def generate_predictions_on_folder(folder_path, unet, img_size):\n \n testing_dir = folder_path\n\n testing_img_paths = [os.path.join(testing_dir, fname) \n for fname in os.listdir(testing_dir)\n if (fname.endswith(\".png\") or fname.endswith(\".jpg\"))]\n\n x = np.zeros((len(testing_img_paths),) + img_size + (3,), dtype=\"float32\")\n\n for j, path in enumerate(testing_img_paths):\n img = load_img(path)\n # cropping images from 900x720 to 512x512\n img = img.crop(box=(313,99,825,611))\n # resizing image from 512x512 to 256x256\n img = img.resize(img_size)\n x[j] = img\n\n testing_preds = unet.model.predict(x)\n\n def display_mask(i):\n \"\"\"Quick utility to display a model's prediction.\"\"\"\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)\n \n def display_cropped_img(i):\n \"\"\" Utility to display the original image. \"\"\"\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)\n\n # displaying all predictions for images in a folder\n for i in range(0,len(testing_img_paths)):\n # Display input image\n display_cropped_img(i)\n # Display mask predicted by our model\n display_mask(i)", "def read_cv_predictions(output):\n with open(os.path.join(output, \"classifier-fold-predictions.txt\"), 'r') as in_file:\n lines = in_file.readlines()\n\n lines = ''.join(lines)\n # there is always a new line at the end of the file. strip it\n fold_predictions = lines.split('\\n\\n')[:-1]\n\n # parse the lines of the file\n names, preds, all_labels, all_prob0s, all_prob1s = [], [], [], [], []\n for fold_prediction in fold_predictions:\n fold_prediction = fold_prediction.split('\\n')\n\n rec_names, predictions, labels, prob0s, prob1s = [], [], [], [], []\n for line in fold_prediction[2:]:\n [name, prediction, label, prob0, prob1] = re.findall('[\\w_./]+', line)\n\n rec_names.append(name)\n predictions.append(int(prediction))\n labels.append(int(label))\n prob0s.append(float(prob0))\n prob1s.append(float(prob1))\n\n names.append(rec_names)\n preds.append(predictions)\n all_labels.append(labels)\n all_prob0s.append(prob0s)\n all_prob1s.append(prob1s)\n\n return np.asarray(names), np.asarray(preds), np.asarray(all_labels), np.asarray(all_prob0s), np.asarray(all_prob1s)", "def predict(model, session_batch):\n predicted = np.zeros((len(session_batch), 4))\n for i, session in enumerate(session_batch): \n legal_moves = session.possible_moves(session.current_player())\n move_preds = get_move_predictions(model, legal_moves, session)\n\n chosen_move_index = move_preds[:, 0].argmax()\n predicted[i, :] = move_preds[chosen_move_index, :]\n return predicted", "def predict(image_path):\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n plt.imshow(img)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]", "def predict(self):\n self.get_test_data()\n predicted_labels = []\n for row in self.test_data:\n predicted_labels.append(DecisionTree.predict_row(self.classifier, row))\n return predicted_labels", "def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1] #subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred", "def predict_image(self, image_paths):\n predictions = list()\n for image_path in image_paths:\n img = ImageHelper.get_image_by_path(image_path, self.target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = self.preprocess_input(x)\n\n with self.graph.as_default():\n features = self.model_base.predict(x)\n preds = self.model_top.predict(features)\n label, probability = self.decode_prediction(preds)\n\n predictions.append({\"image_path\": image_path,\n \"label\": label,\n \"probability\": probability})\n return predictions", "def extract_feats(file_path, filenames, frame_num, batch_size, save_path):\n\t#net = inceptionresnetv2(num_classes=1001, pretrained='imagenet+background', load_path='./pretrained_models/inceptionresnetv2-520b38e4.pth')\n\tnet = resnet101(pretrained=True)\n\tnet.eval()\n\tnet.cuda()\n\ttransform = trn.Compose([trn.ToPILImage(),\n\t\ttrn.Resize((224, 224)), # 299 for IRV2\n\t\ttrn.ToTensor(),\n\t\ttrn.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])])#trn.Normalize(net.mean, net.std)])\n\t\t\n\tprint(\"res101 Network loaded\")\n\t#Read videos and extract features in batches\n\tfor fname in filenames:\n\t\tfeat_file = os.path.join(save_path, fname[:-4]+'.npy')\n\t\tif os.path.exists(feat_file):\n\t\t\tcontinue\n\t\tvid = imageio.get_reader(os.path.join(file_path, fname), 'ffmpeg')\n\t\tcurr_frames = []\n\t\tfor frame in vid:\n\t\t\tif len(frame.shape)<3:\n\t\t\t\tframe = np.repeat(frame,3)\n\t\t\tcurr_frames.append(transform(frame).unsqueeze(0))\n\t\tcurr_frames = torch.cat(curr_frames, dim=0)\n\t\tprint(\"Shape of frames: {0}\".format(curr_frames.shape))\n\t\tidx = np.linspace(0, len(curr_frames)-1, frame_num).astype(int)\n\t\tcurr_frames = curr_frames[idx,:,:,:].cuda()\n\t\tprint(\"Captured {} frames: {}\".format(frame_num, curr_frames.shape))\n\t\t\n\t\tcurr_feats = []\n\t\tfor i in range(0, frame_num, batch_size):\n\t\t\tcurr_batch = curr_frames[i:i+batch_size,:,:,:]\n\t\t\tout = net(curr_batch)\n\t\t\tcurr_feats.append(out.detach().cpu())\n\t\t\tprint(\"Appended {} features {}\".format(i+1,out.shape))\n\t\tcurr_feats = torch.cat(curr_feats, 0)\n\t\tdel out\n\t\tnp.save(feat_file,curr_feats.numpy())\n\t\tprint(\"Saved file {}\\nExiting\".format(fname[:-4] + '.npy'))", "def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1]# subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred", "def predict(self, x):\n pred_labels = np.zeros((x.shape[0], 10))\n\n N = len(self.NET)\n for i in range(N):\n\n inputs = self.apply_dct_permutation(x.copy(), self.permutation[i])\n pred_labels += self.NET[i].model.predict(inputs)\n\n return pred_labels", "def model(cp_list,cp_now):\r\n global record\r\n distance_threshold = 0.5\r\n model_path = \"trained_knn_model.clf\"\r\n \r\n # Load a trained KNN model (if one was passed in)\r\n with open(model_path, 'rb') as f:\r\n knn_clf = pickle.load(f)\r\n\r\n for X_img in cp_list:\r\n # Load image file and find face locations\r\n X_face_locations = face_recognition.face_locations(X_img)\r\n\r\n # If no faces are found in the image, return an empty result.\r\n if len(X_face_locations) == 0:\r\n return []\r\n\r\n # Find encodings for faces in the test iamge\r\n faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)\r\n\r\n # Use the KNN model to find the best matches for the test face\r\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\r\n are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]\r\n # Predict classes and remove classifications that aren't within the threshold\r\n for pred, rec in zip(knn_clf.predict(faces_encodings), are_matches):\r\n if rec:\r\n name = pred\r\n else:\r\n name = \"unknown\"\r\n if name not in record:\r\n end = datetime.now()\r\n record.append(name)\r\n record_time.append(end-cp_now)", "def predict(self, samples):\n output = []\n samples, _ = u.to_augmented_array(samples)\n for sample in samples:\n output.append(self._feedforward(sample))\n return output", "def class_predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 3:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return class_predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return class_predict_3(trained_model, X_test, y_test, image_name)", "def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)", "def cnn_pred(self):\n \n # Construct model\n pred = self.conv_net()\n \n # Evaluate model\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(self.y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n return (pred, correct_pred, accuracy)", "def predict(self): \n return self.model.predict(self.test_x)", "def predictions(self):\n return self._pred", "def predict(self, x):\n \n\n return predictions", "def detect_fn(image) :\n image, shapes = detection_model.preprocess(image)\n prediction_dict = detection_model.predict(image, shapes)\n detections = detection_model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])", "def get_outputs(self, input_reviews):\r\n split_list = [1] * self.item_pad_num\r\n splitted_review_wordId_intputs = tf.split(input_reviews, split_list, 1)\r\n cnn_outputs = []\r\n for i in range(self.item_pad_num):\r\n input_review = tf.squeeze(splitted_review_wordId_intputs[i], [1])\r\n cnn_output = self.get_single_output(input_review=input_review, index=i)\r\n cnn_outputs.append(cnn_output)\r\n\r\n return cnn_outputs", "def inference(self, input_batch):\r\n inferences = []\r\n # Handling inference for token_classification.\r\n batch_size = len(input_batch)\r\n\r\n num_rows = batch_size\r\n for i in range(num_rows):\r\n inferences.append({'entity':input_batch[i]})\r\n logger.info(\"Model predicted: '%s'\", input_batch)\r\n\r\n return inferences", "def predict(self, x):\n if self.training:\n self.eval()\n\n with torch.no_grad():\n output = self.forward(x)\n\n if self.classes > 1:\n probs = torch.softmax(output, dim=1)\n else:\n probs = torch.sigmoid(output)\n\n probs = probs.squeeze(0)\n tf = transforms.Compose(\n [\n transforms.ToPILImage(),\n transforms.Resize(x.size[1]),\n transforms.ToTensor()\n ]\n )\n full_mask = tf(probs.cpu()) \n\n return full_mask", "def predict_next(self):\n return [layer._get_mean() for layer in self.layers]", "def predict(self, img):\n logger.info(\"predict() for %s\" %threading.current_thread())\n\n #detect face from the image\n face, rect = self.detect_face(img)\n\n if face is None or rect is None:\n #print(\"No face found for img \", type(img))\n return None, None, None, None\n\n if self.redis_server_password is None:\n # No training data available. Just perform detection and return\n # an error message in the subject value.\n warning = \"Training data not available. Redis password not set.\"\n subject = \"No Training Password\" # This will be displayed with the face\n confidence = 0\n logger.warning(\"%s\" %warning)\n return None, subject, confidence, rect\n\n #predict the image using our face recognizer\n label, confidence = self.face_recognizer.predict(face)\n #get name of respective label returned by face recognizer\n label_text = self.face_recognizer.getLabelInfo(label)\n logger.info(\"label=%s label_text=%s\" %(label, label_text))\n\n # print(label_text, confidence, rect)\n return img, label_text, confidence, rect", "def makepredictions(self):\n data, sampling_rate = librosa.load(self.file)\n mfccs = np.mean(librosa.feature.mfcc(y=data, sr=sampling_rate, n_mfcc=40).T, axis=0)\n x = np.expand_dims(mfccs, axis=1)\n x = np.expand_dims(x, axis=0)\n predictions = self.loaded_model.predict_classes(x)\n predict = self.convertclasstoemotion(predictions)\n print(\"Prediction is\", \" \", self.convertclasstoemotion(predictions))\n return predict", "def image_model_predict(input_ms_image_filename, input_pan_image_filename, pan_img_height_size, pan_img_width_size, \r\n fitted_model, write, output_filename):\r\n \r\n with rasterio.open(input_ms_image_filename) as f:\r\n metadata = f.profile\r\n ms_img = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(input_pan_image_filename) as g:\r\n metadata_pan = g.profile\r\n pan_img = g.read(1)\r\n \r\n pan_img = np.expand_dims(pan_img, axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n class_layer = np.zeros((pan_img.shape[0], pan_img.shape[1]))\r\n \r\n img_pan_holder = []\r\n img_ms_holder = []\r\n \r\n for i in range(0, pan_img.shape[0] - pan_img_height_size, int(ms_to_pan_ratio)):\r\n for j in range(0, pan_img.shape[1] - pan_img_width_size, int(ms_to_pan_ratio)):\r\n img_pan_iter = pan_img[i : i + pan_img_height_size, j : j + pan_img_width_size, 0]\r\n img_pan_holder.append(img_pan_iter)\r\n \r\n for i in range(0, int(ms_img.shape[0] - (pan_img_height_size / ms_to_pan_ratio)), int(ms_to_pan_ratio)):\r\n for j in range(0, int(pan_img.shape[1] - (pan_img_width_size / ms_to_pan_ratio)), int(ms_to_pan_ratio)):\r\n img_ms_iter = ms_img[i : int(i + (pan_img_height_size / ms_to_pan_ratio)), \r\n j : int(j + (pan_img_width_size / ms_to_pan_ratio)), \r\n 0 : metadata['count']]\r\n img_ms_holder.append(img_ms_iter)\r\n \r\n img_pan_array = np.concatenate(img_pan_holder, axis = 0)\r\n img_ms_array = np.concatenate(img_ms_holder, axis = 0)\r\n \r\n pred_array = np.argmax(fitted_model.predict([img_ms_array, img_pan_array]), axis = 1)\r\n \r\n n = 0 \r\n for i in range(int(pan_img_height_size / 2), pan_img.shape[0] - int(pan_img_height_size / 2), int(ms_to_pan_ratio)):\r\n for j in range(int(pan_img_width_size / 2), pan_img.shape[1] - int(pan_img_width_size / 2), int(ms_to_pan_ratio)):\r\n class_layer[i, j] = pred_array[n]\r\n n += 1\r\n \r\n if write:\r\n with rasterio.open(output_filename, 'w', **metadata_pan) as dst:\r\n dst.write(class_layer)\r\n \r\n return class_layer", "def predict(trainer, img_path, patch_size, device='cpu'):\n\n img = imread(img_path)\n patches = divide_image_to_patches(img, patch_size)\n predictions = []\n\n for patch in patches:\n input_ = TF.to_tensor(Image.fromarray(patch)).to(device).unsqueeze(0)\n prediction = trainer.postprocess(trainer.model(input_))\n prediction = prediction.detach().cpu().numpy()\n predictions.append(prediction[..., np.newaxis])\n\n predictions = np.concatenate(predictions)\n\n return combine_patches_to_image(predictions, img.shape[0], img.shape[1])", "def get_predict_data(self) -> List[PredictSample]:\n predict_data = self.__get_season_data(self.__predict_season, self.__predict_episode, False)\n if not predict_data:\n return []\n\n predict_input = np.array([ExamDropEncoder.extract_features(sample, self.__predict_episode) for sample in predict_data])\n predict_input = self.__discretizer.transform(predict_input)\n predict_input = self.__add_answered_on_feature(predict_data, predict_input)\n predict_input = self.__anova_f_filter.transform(predict_input)\n predict_input = self.__pca.transform(predict_input)\n\n predict_samples = []\n weights = self.__get_train_weights(predict_data)\n for data, in_features, out_features, weight in zip(predict_data[::2], predict_input[1::2], predict_input[::2], weights):\n in_answer = data.answer\n out_answer = set(data.exam_episode.players).difference(data.answer)\n predict_samples.append(PredictSample(in_answer, out_answer, in_features, out_features, weight))\n return predict_samples", "def predictions(self):\n\n return self._predictions", "def forward(self, images):\n with torch.no_grad():\n features = self.resnet(images) \n features = features.reshape(features.size(0), -1)\n features = self.bn(self.linear(features))\n return features", "def predicted(self):\n return np.squeeze(self._predicted)", "def input_output_network(self, X):\n layer_wise_output = [[] for i in range(len(self.layers) + 1)]\n # if X contains only one data\n if len(X.shape) == 1:\n X = X.reshape(1, -1)\n # for every data in X\n for x in X:\n for l in range(len(self.layers)):\n p_h = self.layers[l].input_output(x)\n layer_wise_output[l].append(p_h)\n x = (np.random.random_sample(self.layers[l].output_size) <\n p_h) * 1\n # last layer --> softmax\n layer_wise_output[-1].append(self.softmax(self.classif_RBM, x))\n\n # just reshape\n layer_wise_output = [[\n layer_wise_output[i][j].reshape(1, -1)\n for j in range(len(layer_wise_output[i]))\n ] for i in range(len(layer_wise_output))]\n # concat results for each layers for each sample\n layer_wise_output = [\n np.concatenate(layer_wise_output[i])\n for i in range(len(layer_wise_output))\n ]\n return layer_wise_output", "def predict_data(*args):\n\n print(\"predict_data(*args) - args: %s\" % (args)) if debug_model else ''\n\n files = []\n files_original = []\n\n for arg in args:\n file_objs = arg['files']\n for f in file_objs:\n files.append(f.filename)\n files_original.append(f.original_filename)\n if debug_model:\n print(\"file_obj: name: {}, filename: {}, content_type: {}\".format(\n f.name,\n f.filename,\n f.content_type))\n print(\"File for prediction is at: {} \\t Size: {}\".format(\n f.filename,\n os.path.getsize(f.filename)))\n trained_graph = arg['trained_graph']\n\n results = []\n try:\n idx = 0\n for imgfile in files:\n imgfile_original = files_original[idx]\n pred = {\n \"original_filename\": imgfile_original,\n \"prediction\" : str(predict_file(imgfile, trained_graph))\n }\n idx+=1\n results.append(pred)\n print(\"image: {} (tmp: {})\".format(imgfile_original, imgfile))\n except Exception as e:\n raise e\n finally:\n for imgfile in files:\n os.remove(imgfile)\n\n return results", "def predict(self, request):\r\n f = request.files['image']\r\n \r\n img = Image.open(f)\r\n \r\n image = img.convert('RGB')\r\n \r\n image_np = load_image_into_numpy_array(image)\r\n output_dict = run_inference_for_single_image(model, image_np)\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n output_dict['detection_boxes'],\r\n output_dict['detection_classes'],\r\n output_dict['detection_scores'],\r\n category_index,\r\n instance_masks=output_dict.get('detection_masks_reframed', None),\r\n use_normalized_coordinates=True,\r\n line_thickness=2, \r\n min_score_thresh=0.45, \r\n skip_scores=True)\r\n \r\n result_image = Image.fromarray(image_np)\r\n \r\n raw_bytes = BytesIO()\r\n result_image.save(raw_bytes, \"PNG\")\r\n \r\n return base64.b64encode(raw_bytes.getvalue()).decode(\"utf-8\")", "def batch_forward(model, images, max_batch=MAX_BATCH):\n N = images.shape[0]\n nbatchs = ceil(N / max_batch)\n pred_list = []\n\n with torch.no_grad():\n for i in range(nbatchs):\n pred_list.append(model(images[i * max_batch: (i + 1) * max_batch]))\n return torch.cat(pred_list, dim=0)", "def predict_live(image, session):\n\n feed_dict = {tf_sample_dataset : image}\n predictions = session.run(train_prediction, feed_dict=feed_dict)\n\n return np.argmax(predictions, 2)", "def image_model_fn(self, features, labels, mode):\n col_count, row_count = self.metadata_.get_matrix_size(0)\n sequence_size = self.metadata_.get_sequence_size()\n output_dim = self.metadata_.get_output_size()\n\n # Input Layer\n input_layer = features[\"x\"]\n # Transpose X to 4-D tensor: [batch_size, row_count, col_count, sequence_size]\n # Normally the last axis should be channels instead of time axis, but they\n # are both equal to 1 for images\n hidden_layer = tf.transpose(input_layer, [0, 2, 3, 1])\n # At begining number of filters = 32\n num_filters = 32\n while True:\n hidden_layer = tf.layers.conv2d(\n inputs=hidden_layer,\n filters=num_filters,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n hidden_layer = tf.layers.max_pooling2d(inputs=hidden_layer, pool_size=[2, 2], strides=2)\n num_rows = hidden_layer.shape[1]\n num_columns = hidden_layer.shape[2]\n num_filters *= 2 # Double number of filters each time\n if num_rows == 1 or num_columns == 1:\n break\n hidden_layer = tf.layers.flatten(hidden_layer)\n hidden_layer = tf.layers.dense(inputs=hidden_layer, units=1024, activation=tf.nn.relu)\n hidden_layer = tf.layers.dropout(\n inputs=hidden_layer, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)\n logits = tf.layers.dense(inputs=hidden_layer, units=output_dim)\n sigmoid_tensor = tf.nn.sigmoid(logits, name=\"sigmoid_tensor\")\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `sigmoid_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": sigmoid_tensor\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n # For multi-label classification, a correct loss is sigmoid cross entropy\n loss = sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.AdamOptimizer()\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)" ]
[ "0.6863064", "0.6837647", "0.67098725", "0.66824293", "0.658928", "0.65756834", "0.65547824", "0.6550675", "0.65452987", "0.6542417", "0.6496417", "0.6496279", "0.64936125", "0.64826065", "0.6478494", "0.6471331", "0.6459123", "0.64511555", "0.64502", "0.6362195", "0.6349084", "0.63439745", "0.6334361", "0.6333736", "0.6328022", "0.632228", "0.6294826", "0.6260715", "0.62517583", "0.6247306", "0.6246829", "0.6228531", "0.62203234", "0.62090176", "0.62033814", "0.619558", "0.617307", "0.6170278", "0.6169877", "0.61688316", "0.6157217", "0.6155289", "0.6151263", "0.61496776", "0.61446863", "0.6143371", "0.61389756", "0.6133933", "0.6129187", "0.6122128", "0.6117279", "0.6115188", "0.61116296", "0.6103954", "0.6099333", "0.60981077", "0.6090815", "0.6087852", "0.60817665", "0.606917", "0.60684234", "0.6065748", "0.6055381", "0.60326964", "0.60269135", "0.602507", "0.6004733", "0.60035634", "0.6002229", "0.60003126", "0.5999655", "0.5994908", "0.5989006", "0.5986285", "0.5984714", "0.59832203", "0.59799564", "0.59628594", "0.5960332", "0.59599197", "0.5958146", "0.5956999", "0.5956446", "0.5956331", "0.59491163", "0.5946953", "0.5942649", "0.5941568", "0.59385544", "0.59384906", "0.5935952", "0.59340715", "0.5932394", "0.5923847", "0.59225154", "0.5922359", "0.5917355", "0.59157616", "0.59026706", "0.5901104" ]
0.64555985
17
Returns list of elements with length "num" that are found to be equally spaced in the sequence provided
def takespread(sequence, num): length = float(len(sequence)) for i in range(num): yield sequence[int(ceil(i * length / num))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(np.ceil(i * length / num))]", "def makespread(sequence, num):\n length = float(len(sequence))\n seq = np.array(sequence)\n return seq[np.ceil(np.arange(num) * length / num).astype(int)]", "def find_long_runs(num_sequence, l):\n chunked = [(k, list(g)) for k, g in itertools.groupby(num_sequence)]\n retval = [(i, len(g)) for i, (k, g) in enumerate(chunked) if k and len(g) > l]\n return retval", "def select_evenly_spread(seq, num_items):\n if len(seq) <= num_items:\n return seq\n if num_items == 1:\n return [seq[0]]\n step_size = (len(seq) - 1) / float(num_items - 1)\n float_indices = [i * step_size for i in range(num_items)]\n return [seq[int(round(index))] for index in float_indices]", "def split_seq(seq,size):\n for i in range(0,len(seq),size):\n if i+size<len(seq) and seq[i+size] - seq[i] == size:\n yield seq[i:i+size]", "def chunk_it(seq, num):\n avg = len(seq) / float(num)\n out = []\n last = 0.0\n\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n\n return out", "def seq_sqrt(xs):\n num_list = []\n for xs_split in xs:\n print(xs)\n xs_num = int(xs_split)\n print(xs_num)\n xs_squrt = math.sqrt(xs_num)\n print(xs_squrt)\n num_list.append(xs_squrt)\n return num_list", "def slices(numstr, count):\n if count > len(numstr) or not count:\n raise ValueError(\"Slice can't be smaller than input!\")\n return [ map(int, list(numstr[i:i+count]))\n for i in xrange(len(numstr)-count+1) ]", "def geomspace(start, ratio=None, stop=False, num=50):\n if not ratio and stop != False:\n ratio = (stop/start)**(1/(num-1))\n seq = []\n seq.append(start)\n if stop == False:\n for j in range(1, num):\n seq.append(seq[j-1]*ratio)\n return seq\n else:\n val, j = start, 1\n while val <= stop or np.allclose(val, stop, ):\n val = seq[j-1]*ratio\n seq.append(val)\n j+=1\n return seq[:-1]", "def squares(amount, start, stop, truncated, sequence):\n for x in range(start, amount):\n y = x * x\n if truncated and y >= stop:\n sequence.append(stop)\n else:\n sequence.append(y)\n return sequence", "def squares():\n return [i for i in xrange(11, 89) if 1 <= (i % 10) <= 8]", "def slices(series, num):\n\tif num > len(series) or num < 1:\n\t\traise ValueError\n\tif not series.isdigit():\n\t\traise TypeError(\"Input string must consist only of digits!\")\n\treturn [[int(x) for x in series[index:index+num]] for index in xrange(len(series) + 1 - num)]", "def numbers(num):\n r = []\n for i in range(num):\n d = len(r)\n r = [1 if i == 0 or i == d else r[i-1]+r[i] for i in range(d+1)]\n yield r", "def splitevery(s, n):\n\treturn [s[x:x+n] for x in range(0,len(s), n)]", "def pair_equal(amount=100, start=0, stop=100, truncated=True):\n sequence = []\n amount = amount + start\n\n\n for x in range(start, amount):\n if truncated and x >= stop:\n sequence.append(stop)\n else:\n sequence.append(x)\n\n return sequence", "def problem_52():\n\n for number in xrange(1, 123456789):\n sorted_num = ''.join(sorted(str(number)))\n if len([value for value in xrange(2, 7)\n if ''.join(sorted(str((value * number)))) == sorted_num]) == 5:\n return number", "def maiores(lista, n):\n numeros = [lista for lista in lista if lista > n]\n return numeros", "def groups_of(seq, n):\n for i in range(0, len(seq), n):\n yield seq[i : (i + n)]", "def chunk_it(seq, num):\n # find average chunk size\n avg = len(seq) / float(num)\n out = []\n last = 0.0\n\n # until the end of sequence\n while last < len(seq):\n # append the value to a bin\n out.append(seq[int(last):int(last + avg)])\n last += avg\n\n return out", "def medium_words(a_list):\n\n medium_list = [word for word in a_list if len(word) in range(6,9)]\n return medium_list", "def question_27(list_num: int) -> int:\n return [abs(list_num[i+1] - list_num[i]) for i,v in enumerate(list_num) if\n i <= len(list_num) - 2]", "def double_nums(num_list):", "def split(a, N):\n\n integ = int(len(a) / N)\n remain = int(len(a) % N)\n\n splitted = [a[i * integ + min(i, remain):(i + 1) * integ +\n min(i + 1, remain)] for i in range(N)]\n\n return splitted", "def at_least_n_ver(seq, n):\r\n num_AB = 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AB):\r\n if num_AB == n-1:\r\n return Quantifier.T\r\n else:\r\n num_AB += 1\r\n return Quantifier.F", "def findMultiples(M, N):\n\tnumbers = []\n\n\tfor i in range(N):\n\t\tif(i + 1 == N):\n\t\t\tbreak\n\t\tif(((i + 1) % M) == 0):\n\t\t\tnumbers.append(i+1)\n\n\treturn numbers", "def number2multisize_patten(number, min_length, max_length):\n lengths = np.arange(min_length, max_length + 2) # +2 Include last interval\n offsets = np.cumsum(4**lengths)\n\n try:\n index = np.where((offsets - number) > 0)[0][0]\n org_length = lengths[index]\n number -= np.concatenate(([0], offsets))[index]\n return number2patten(number, org_length)\n except IndexError:\n raise ValueError('Provided number (%d) do not match ' % number +\n 'list of provided lengths %s nt.' % lengths)", "def number_of_distances(number_of_sequences):\n return math.factorial(number_of_sequences)/(math.factorial(2)*math.factorial(number_of_sequences-2))", "def big_selections(lst: List[int], n: int) -> List[List[int]]:\n if not lst:\n return [[]]\n else:\n holder = [lst.copy()]\n for i in range(len(lst)):\n l2 = lst.copy()\n l2.pop(i)\n for item in selections(l2):\n if item not in holder and sum(item) >= n:\n holder.append(item)\n return holder", "def compute_sequence(starting_numbers: List[int], n_elems: int) -> int:\n elems = list(reversed(starting_numbers))\n\n for _ in range(len(starting_numbers), n_elems):\n try:\n idx_before = elems.index(elems[0], 1)\n except ValueError:\n elems = [0] + elems\n else:\n elems = [idx_before] + elems\n\n return elems[0]", "def runs(sequence, predicate, minlength=2):\n inrun = False\n for i,v in enumerate(sequence):\n if not inrun and predicate(v):\n inrun = True\n start = i\n elif inrun and not predicate(v):\n inrun = False\n stop = i - 1\n if stop - start >= minlength:\n yield start, stop\n\n if predicate(v) and inrun:\n stop = i\n if stop - start >= minlength:\n yield start, stop", "def split_seq(seq,size):\n return [seq[i:i+size] for i in range(0, len(seq), size)]", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result", "def create_wild_lists(amount,length):\r\n box = []\r\n\r\n k = 0\r\n while k < amount:\r\n sublist = []\r\n j = 0\r\n while j < length:\r\n num = random.randint(1, 100)\r\n sublist.append(num)\r\n j += 1\r\n box.append(sublist)\r\n k += 1\r\n\r\n if amount == 1:\r\n return sublist\r\n\r\n return box", "def __get_min_indexes(num_list, number):\n result = []\n\n num_list = np.array(num_list)\n result = np.argpartition(num_list, number)[:number]\n\n return result", "def _split_into_legal_volume(\n self, oclc_numbers: List[str] = [], n: int = 50\n ) -> List[str]:\n\n for i in range(0, len(oclc_numbers), n):\n yield \",\".join(oclc_numbers[i : i + n])", "def filter_long_words(list,n):\n numberlist=[]#set up a new list\n for i in range(0,len(list)):\n if len(list[i]) > n:#pick up the word that is longer than n\n numberlist.append(list[i])#count the length of each word\n else:\n continue\n return numberlist", "def get_splits(ntot, nper):\n beglist = numpy.arange(0,ntot,nper)\n endlist = numpy.arange(0,ntot,nper) + nper - 1\n\n if (ntot % nper) != 0:\n endlist[-1] = ntot-1\n return beglist, endlist", "def Split(self, k):\n n = len(self)\n start = range(0, n, ceil(n / k))\n end = list(start[1:]) + [n]\n return [range(first, last) for first, last in zip(start, end)]", "def get_number(number_seqeunce):\n number_sequence.append(None)\n return [i for i in number_seqeunce if number_seqeunce.count(i) % 2 != 0][0]", "def slices(digits, size):\n if not 0 <= size <= len(digits):\n raise ValueError\n elif digits == '':\n return [[1]]\n\n slice_list = []\n\n for i in range(len(digits) - size + 1):\n slice_list.append([int(d) for d in digits[i:i+size]])\n return slice_list", "def chunks(lst, n):\n for i in range(0, len(lst), 1):\n if len(lst[i:i + n])==n:\n yield lst[i:i + n]", "def get_kmers_from_sequence(sequence, kmin, kmax):\n limits = range(kmin, kmax + 1)\n seq_range = len(sequence) - kmax + 1\n for i in range(0, seq_range):\n for j in limits:\n yield sequence[i:i + j]", "def pickingNumbers(a):\n a = sorted(a)[::-1]\n len_a = len(a)\n counts = []\n for ind, ele in enumerate(a):\n count = 0\n for i in range(ind, len_a):\n if ele - a[i] < 2:\n count += 1\n else:\n break\n counts.append(count)\n return max(counts)", "def chunks(l, n):\n for i in range(0, len(l) // n * n + n - 1, n):\n if len(l[i:i + n]):\n yield l[i:i + n]", "def split_len(seq, length):\n return [seq[i:i+length] for i in range(0, len(seq), length)]", "def three_times_nums(num_list):", "def chunks(A, N):\n for i in range(0, len(A)):\n r = A[i:i+N]\n if len(r) == N:\n yield r", "def get_numbers(sequence):\r\n\r\n new_list = []\r\n for element in sequence:\r\n if isinstance(element, numbers.Number) == True:\r\n new_list.append(element)\r\n\r\n return new_list", "def slices(digits, length):\n if len(digits) < length or length < 1:\n raise ValueError(\"Slice length %d is too long\" % length)\n\n digit_array = [int(c) for c in digits]\n for i in range(len(digits) - length + 1):\n yield digit_array[i:i+length]", "def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk", "def genslices(n):\n def range_with_none():\n yield None\n yield from range(-n, n+1)\n\n for t in product(range_with_none(), range_with_none(), range_with_none()):\n s = slice(*t)\n if s.step != 0:\n yield s", "def subdim(number):\n res = []\n for i in [2, 3, 4, 5, 6, 7, 8, 9, 10]:\n res.append(number % i)\n if number % i == 0:\n n = i\n m = number // i\n return n, m\n if not 0 in res:\n return subdim(number + 1)", "def exactly_n_ver(seq, n):\r\n num_AB = 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AB):\r\n num_AB += 1\r\n return Quantifier.T if num_AB == n else Quantifier.F", "def find_powers(n):\n # find_powers(6) --> [1, 2, 3, 4]\n return list(takewhile(lambda x: len(str(n**x)) == x, count(1)))", "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def genslices(n):\n return product(range(-n, n+1), range(-n, n+1), range(-n, n+1))", "def genslices(n):\n return product(range(-n, n + 1), range(-n, n + 1), range(-n, n + 1))", "def lessthan_5(num_list):", "def cntfrac_sqrt(num):\r\n a = limit = int(sqrt(num))\r\n if limit * limit == num:\r\n return []\r\n b, period = 1, 0\r\n lst = [limit]\r\n while b != 1 or period == 0:\r\n b = (num - a * a) / b\r\n q = (limit + a) / b\r\n a = q * b - a\r\n lst.append(q)\r\n period += 1\r\n return lst", "def split(a, n):\n k, m = divmod(len(a), n)\n ret = [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)]\n return ret", "def subtraction_of(number_list):", "def batches(l, n):\n for i in range(0, l, n):\n yield range(i,min(l,i+n))", "def mots_Nlettre(L:list, n)->list:\n lst= []\n mot = 0\n for i in range(len(L)):\n mot = L[i] \n cpt = 0\n for e in mot:\n cpt += 1\n if cpt == n:\n lst.append(mot)\n return lst", "def question_20(list_num: int) -> int:\n return [i for i in list_num if i % 2 == 1]", "def window(seq, size=2, stride=1):\n it = iter(seq)\n result = []\n for elem in it:\n result.append(elem)\n if len(result) == size:\n yield result\n result = result[stride:]", "def _batch_by_length(\n seqs: Sequence[Any], max_words: int, get_length=len\n) -> List[List[Any]]:\n # Use negative index so we can get sort by position ascending.\n lengths_indices = [(get_length(seq), i) for i, seq in enumerate(seqs)]\n lengths_indices.sort()\n batches = []\n batch: List[int] = []\n for length, i in lengths_indices:\n if not batch:\n batch.append(i)\n elif length * (len(batch) + 1) <= max_words:\n batch.append(i)\n else:\n batches.append(batch)\n batch = [i]\n if batch:\n batches.append(batch)\n # Check lengths match\n assert sum(len(b) for b in batches) == len(seqs)\n batches = [list(sorted(batch)) for batch in batches]\n batches.reverse()\n return batches", "def easy_words(a_list):\n\n easy_list = [word for word in a_list if len(word) in range(4,7)]\n return easy_list", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def square_nums(number_list):", "def iterslices(iterable, n, pad_last=False, pad_value=None):\n current = []\n for a in iterable:\n current.append(a)\n if len(current) == n:\n yield current\n current = []\n if current:\n if pad_last:\n current += [pad_value] * (n-len(current))\n yield current", "def find_dimensions(seq):\n sizes = []\n if _is_sequence(seq):\n sizes.append(len(seq))\n sizes += find_dimensions(seq[0])\n return sizes", "def sequence(side_length):\r\n index = side_length\r\n numbers = []\r\n tmp1 = (index -1 ) / 2\r\n #numbers.append([index, 3, 5, 7, 9])\r\n for i in range(tmp1):\r\n if i == 0:\r\n numbers.append([3, 3, 5, 7, 9])\r\n else:\r\n diff = (3+i*2) - 1\r\n tmp2 = numbers[i-1][4] + diff\r\n numbers.append([3+i*2, tmp2, tmp2+diff, tmp2+diff*2, tmp2+diff*3])\r\n return numbers", "def generate_possible_slices(L, H):\n n_min = 2 * L\n n_max = H\n\n slices = []\n for he in range(1, n_max+1):\n for wi in range(max(1, n_min // he), n_max + 1):\n if he * wi > n_max:\n break\n slices.append((wi, he))\n\n return slices", "def count_to(count):\n numbers = [\"one\", \"two\", \"three\", \"four\", \"five\"]\n for number in numbers[:count]:\n yield number", "def grAList() -> list:\n return [2, 5, 6, 9, 10, 11, 13, 17, 18, 30]", "def distinct(length, digits=DIGITS):\n return (int(''.join(p)) for p in permutations(digits, length))", "def same_length(words, pattern):\r\n new_list = list()\r\n for i in words:\r\n if len(pattern) == len(i):\r\n new_list.append(i)\r\n return new_list", "def window(seq, n):\n seq_it = iter(seq)\n result = tuple(it.islice(seq_it, n))\n if len(result) == n:\n yield result \n for elem in seq_it:\n result = result[1:] + (elem,)\n yield result", "def n_wise(x: List[Any], size: Optional[int] = 2) -> Iterable:\n\n iterator = iter(x)\n\n return iter(lambda: tuple(islice(iterator, size)), ())", "def split_seq(seq, size):\n return [seq[ii:ii + size] for ii in range(0, len(seq), size)]", "def evenquerychunks(l, n):\n\n l = list(l)\n \n import math\n n = int(math.floor(len(l)/float(n))) + 1\n print len(l)\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n results = []\n for i in xrange(0, len(l), n):\n results.append( l[i:i+n])\n \n return results", "def num_array(lower_limit = 0, upper_limit = 5, increment = 1):\n numbers = []\n while lower_limit < upper_limit:\n numbers.append(lower_limit)\n lower_limit += increment\n return numbers", "def _get_sharded_ranges(\n begin,\n end,\n max_length,\n):\n if max_length <= 0:\n raise ValueError(\"max_length <= 0.\")\n length = end - begin\n if length <= max_length:\n return [(begin, end)]\n pivot = begin + length // 2\n return (_get_sharded_ranges(begin, pivot, max_length) +\n _get_sharded_ranges(pivot, end, max_length))", "def split(a, n):\n n = min(n, len(a))\n k, m = divmod(len(a), n)\n return [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]", "def get_kmers(seq,k=2):\n pair_list = []\n for i in range(0,len(seq),k):\n pair_list.append(str(seq)[i:i+k])\n return pair_list", "def calc_run_lengths(sequence: List[int]) -> List[Run]:\n return [Run(object=g[0], length=len(list(g[1])))\n for g in itertools.groupby(sequence)]", "def split_into_n(s, n):\n return [s[k:k + n] for k in range(0, len(s), n)]", "def n_long_words(words, n):\n words_longer_than_n = []\n for word in words:\n if len(word) > n:\n words_longer_than_n.append(word)\n\n return words_longer_than_n", "def seperate(self, array: List[int], digit: int) -> None:\n\n digit = 10 ** digit\n output = []\n counter = 0\n minimum = list(str(min(array)))\n for i in range(len(minimum[1:])):\n minimum[i + 1] = \"0\"\n\n minimum = int(\"\".join(minimum))\n\n for i in range(minimum + digit, max(array) + digit + 1, digit):\n beg = counter\n\n while counter <= len(array):\n if counter == len(array) or array[counter] >= i:\n self.render(array, cur=(counter, beg, counter - beg))\n if counter - beg > 0:\n output.append(array[beg:counter])\n break\n counter += 1\n\n array[:] = output", "def take(num, iterable):\n for i, e in enumerate(iterable):\n if i >= num:\n break\n yield e", "def _lists_of_n(self, myList, n):\n if len(myList) <= 0:\n return []\n \n if len(myList) <= n:\n return [ myList ]\n\n ret = []\n currentList = []\n count = 0\n for item in myList:\n count = count + 1\n currentList.append(item)\n if count % n == 0:\n ret.append(currentList)\n currentList = []\n if len(currentList) > 0:\n ret.append(currentList)\n return ret", "def lindivQ(sample, quantity, criteria=len):\n sizes = [criteria(dna) for dna in sample]\n return [size*quantity/sum(sizes) for size in sizes]", "def nths(x, n):\n return [l[n] for l in x]", "def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))", "def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))", "def get_divisores(num):\n divisores = [] #uso una lista para guardar los divisores\n for i in range(1, num):\n if num%i == 0:\n divisores.append(i)\n return divisores", "def _getAlignmentsFromAlignmentNumbers(self, domSeq, subdomSeqs, numbers):\n\t\tlongestSubLen = self._getLongestLength(subdomSeqs)\n\t\tseqs = \t[]\n\t\tdom = [\" \"] * (2*longestSubLen + len(domSeq) - 2)\n\t\tdom[longestSubLen-1:longestSubLen+len(domSeq)-1] = domSeq\n\t\tseqs.append(''.join(dom))\n\t\tnumStartDashes = longestSubLen - 1\n\t\tnumEndDashes = longestSubLen - 1\n\t\tfor i in range(len(subdomSeqs)):\n\t\t\ts = [\" \"] * (2*longestSubLen + len(domSeq) - 2)\n\t\t\ts[longestSubLen-len(subdomSeqs[i])+numbers[i]:longestSubLen+numbers[i]] = subdomSeqs[i]\n\t\t\tseqs.append(''.join(s))\n\t\t\tif(longestSubLen-len(subdomSeqs[i])+numbers[i] < numStartDashes):\n\t\t\t\tnumStartDashes = longestSubLen-len(subdomSeqs[i])+numbers[i]\n\t\t\tif((longestSubLen+len(domSeq)-2-numbers[i]) < numEndDashes):\n\t\t\t\tnumEndDashes = longestSubLen+len(domSeq)-2-numbers[i]\n\t\treturn [s[numStartDashes:len(s)-numEndDashes] for s in seqs]", "def GetNiceExtentsBySpacing(minval,maxval,spacing,tolerance):\n pass", "def chunks(l, n):\n lists = []\n for i in range(n):\n list1 = np.arange( i*l/n+1 , (i+1)*l/n+1 )\n lists.append(list1)\n return lists", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries" ]
[ "0.6423416", "0.61214864", "0.6046932", "0.5743309", "0.5673493", "0.55953926", "0.54691815", "0.54635596", "0.54383546", "0.5431269", "0.5416143", "0.5397969", "0.53908265", "0.53346854", "0.53308976", "0.5270804", "0.5267939", "0.5258385", "0.5253757", "0.5242062", "0.52373016", "0.52173054", "0.52172107", "0.5206816", "0.5199322", "0.518269", "0.5176938", "0.51730376", "0.5167217", "0.51625323", "0.51396406", "0.51394665", "0.5134108", "0.5132546", "0.51287013", "0.5114847", "0.51146483", "0.51132965", "0.5111429", "0.51093453", "0.5108752", "0.5096833", "0.509041", "0.5085764", "0.5077153", "0.50716335", "0.50586885", "0.50492305", "0.5042711", "0.5042075", "0.50412565", "0.50260085", "0.50237626", "0.5017582", "0.5014385", "0.5012469", "0.5012172", "0.5009476", "0.50068754", "0.50055975", "0.500447", "0.5001081", "0.49863455", "0.49721572", "0.49710408", "0.49687096", "0.49657154", "0.4964859", "0.4964404", "0.49637923", "0.49565616", "0.49552098", "0.49543676", "0.49494445", "0.49478737", "0.49462202", "0.4944666", "0.49437654", "0.4932959", "0.49321085", "0.49302483", "0.49290717", "0.49283028", "0.49267504", "0.49210265", "0.49131462", "0.49129072", "0.49004817", "0.48999238", "0.4898546", "0.48976365", "0.48946366", "0.4884398", "0.4881343", "0.4881343", "0.48776183", "0.48772728", "0.4873979", "0.48694393", "0.48666418" ]
0.63344747
1
Reads in the labelled frames and saves output from CNN model in pickle file
def main(input_file_name,output_file_name,video_length): length_of_video = video_length with open(input_file_name + '.pkl', 'rb') as fin: frames = pickle.load(fin) sorted_frames = list(list(x[1]) for x in itertools.groupby(frames, operator.itemgetter(1))) final_dict = dict() for element in sorted_frames: for f in element: name = f[0] video_name = name[name.rindex("/")+1:name.rindex("frame")-1] if video_name not in final_dict: final_dict[video_name] = [] final_dict[video_name].append(f) new_frames = [] for key in final_dict: #elements = takespread(final_dict[key],length_of_video) new_frames.extend(final_dict[key]) print("size:", len(new_frames)) predictions = predict_on_frames(new_frames) with open(output_file_name + '.pkl', 'wb') as fout: pickle.dump(predictions, fout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]", "def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels", "def _extract_labels(self, filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = self._read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return self._dense_to_one_hot(labels)\n return labels", "def save_all_features(nb_samples, source=\"./datasets/D1/images/\", dest=\"./datasets/D1/features/\", input_size=(416, 416), batch_size=16):\n\n # check if the directory exists, and if not make it\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n # define image height and width\n (img_height, img_width) = input_size\n\n # build the VGG16 network and extract features after every MaxPool layer\n model = VGG16(weights='imagenet', include_top=False)\n\n c1 = model.layers[-16].output\n c1 = GlobalAveragePooling2D()(c1)\n\n c2 = model.layers[-13].output\n c2 = GlobalAveragePooling2D()(c2)\n\n c3 = model.layers[-9].output\n c3 = GlobalAveragePooling2D()(c3)\n\n c4 = model.layers[-5].output\n c4 = GlobalAveragePooling2D()(c4)\n\n c5 = model.layers[-1].output\n c5 = GlobalAveragePooling2D()(c5)\n\n\n model = Model(inputs=model.input, outputs=(c1, c2, c3, c4, c5))\n\n # always save your weights after training or during training\n model.save_weights('first_try.h5')\n model.save('model_save')\n\n # define image generator without augmentation\n datagen = ImageDataGenerator(rescale=1. / 255.)\n\n generator = datagen.flow_from_directory(\n source,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=\"sparse\",\n shuffle=False)\n\n # generate and save features, labels and respective filenames\n steps = nb_samples / batch_size + 1\n X = model.predict_generator(generator, steps)\n Y = np.concatenate([generator.next()[1] for i in range(0, generator.samples, batch_size)])\n names = generator.filenames\n\n for n, i in enumerate(X):\n print(\"Saving \" + n + \" and \" + i)\n with open(dest + \"X-\" + str(img_height) + \"-c\" + str(n + 1) + \"-AVG.npy\", 'w') as f:\n np.save(f.name, i)\n\n if not os.path.exists(dest + \"Y.npy\"):\n with open(dest + \"Y.npy\", 'w') as f:\n np.save(f.name, Y)\n\n if not os.path.exists(dest + \"filenames.npy\"):\n with open(dest + \"filenames.npy\", 'w') as f:\n np.save(f.name, names)", "def read_dataset(image_dir: str = IMAGE_DIR, dump: bool = True, **kwargs):\n global TRAIN_X, TRAIN_Y\n logdir = \"logs/scalars/\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = TensorBoard(log_dir=logdir)\n\n base_model = InceptionV3(include_top=False,\n weights='imagenet',\n input_shape=(WIDHT, HEIGHT, 3))\n for layer in base_model.layers:\n layer.trainable = False\n\n model = Sequential()\n model.add(base_model)\n model.add(GlobalAveragePooling2D())\n # model.add(Dense(512, activation='relu'))\n model.add(Dense(LABEL_SIZE, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'],\n )\n\n def define_label(parent_name):\n return \"-\".join(parent_name.split('-')[1:])\n\n for subdir, dirs, files in os.walk(image_dir):\n for file in files:\n path = pathlib.Path(subdir).absolute() / file\n image_label = define_label(path.parent.name)\n TRAIN_Y.append(image_label)\n\n label_encoder = LabelEncoder()\n TRAIN_Y = label_encoder.fit_transform(TRAIN_Y)\n TRAIN_Y = np.array(to_categorical(TRAIN_Y, num_classes=LABEL_SIZE))\n\n count = 0\n current_length_train_x = 0\n\n for subdir, dirs, files in os.walk(image_dir):\n print(f'PATH: {subdir} is processing')\n count += 1\n for file in files:\n path = pathlib.Path(subdir).absolute() / file\n image = load_img(str(path), target_size=WH)\n TRAIN_X.append(np.array(image))\n\n if count % 40 == 0:\n slice_left = current_length_train_x\n slice_right = slice_left + len(TRAIN_X)\n current_length_train_x = slice_right\n # convert to binary matrix (120 labels at all) 2^10 = 128\n # normalize image\n # split image\n\n # TODO: make active on resume iterations\n # if count == 40:\n # # make empty\n # TRAIN_X = []\n # model = load_model(f'{model_name}_iter_40.dump')\n # continue\n\n x_train, x_test, y_train, y_test = train_test_split(\n np.array(TRAIN_X),\n TRAIN_Y[slice_left:slice_right],\n test_size=0.2,\n random_state=69,\n )\n\n # make empty\n TRAIN_X = []\n\n augs_gen.fit(x_train)\n model.fit_generator(\n augs_gen.flow(x_train, y_train, batch_size=25),\n validation_data=(x_test, y_test),\n validation_steps=1000,\n steps_per_epoch=1000,\n epochs=20,\n verbose=1,\n callbacks=[tensorboard_callback],\n )\n del x_train, x_test, y_train, y_test\n model.save(f'{model_name}_iter_{count}.dump')\n\n print(f'Executed {count} / 121')\n print('Prepare to write data on the disk')\n # if dump:\n # with open(DATA_DIR / 'xes.dump', 'wb') as file_x:\n # pickle.dump(TRAIN_X, file_x)\n # with open(DATA_DIR / 'ykes.dump', 'wb') as file_y:\n # pickle.dump(TRAIN_Y, file_y)\n\n # print('Dumped on the disk')\n # time.sleep(5)", "def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()", "def load_data(datafile, num_class, save=False, save_path='dataset.pkl'):\n train_list = open(datafile, 'r')\n labels = []\n images = []\n for line in train_list:\n tmp = line.strip().split(' ')\n filepath = tmp[0]\n print(filepath)\n img = Image.open(filepath)\n img = prep.resize_image(img, 224, 224)\n np_img = prep.pil_to_nparray(img)\n images.append(np_img)\n\n # one-hot encoder\n index = int(tmp[1])\n label = np.zeros(num_class)\n label[index] = 1\n labels.append(label)\n if save:\n pickle.dump((images, labels), open(save_path, 'wb'))\n return images, labels", "def load_model_label(model_path, label_bin_path):\n # load the model and label binarizer\n print(\"[INFO] loading network and label binarizer...\")\n model = load_model(model_path)\n lb = pickle.loads(open(label_bin_path, \"rb\").read())\n return model, lb", "def extract_labels(filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)[0]\n #print('check', magic, num_items)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels)\n return labels", "def convert_labels() -> None:\n data_folder = 'images'\n validation_split = 0.10\n\n # Convert annotations and split into validation and train set\n number_images = int(len(os.listdir(data_folder)) / 2)\n train_size = int(number_images * (1 - validation_split))\n val_size = number_images - train_size\n\n print(f'Training dataset size: {train_size}')\n print(f'Validation dataset size: {val_size}')\n\n with open('train.txt', 'w') as train_file, open('val.txt', 'w') as val_file:\n files = os.listdir(data_folder)\n print(len(files))\n # shuffle otherwise validation is from the same session\n random.shuffle(files)\n processed = 0\n for file_name in files:\n if file_name.split('.')[1] == 'jpg':\n # if image has no labels\n write = False\n if processed < train_size:\n file_to_write = train_file\n else:\n file_to_write = val_file\n\n with open(f'{data_folder}/{file_name}'.split('.')[0] + '.txt') as label_file:\n labels = []\n for line in label_file:\n line = line.split(' ')\n line[-1] = line[-1].rstrip()\n\n img = cv2.imread(f'{data_folder}/{file_name}')\n img_height = img.shape[0]\n img_width = img.shape[1]\n \n x = float(line[1]) * img_width\n y = float(line[2]) * img_height\n w = float(line[3]) * img_width\n h = float(line[4]) * img_height\n\n xmin = int(x - w/2)\n ymin = int(y - h/2)\n xmax = int(x + w/2)\n ymax = int(y + h/2)\n\n labels.append(f' {xmin},{ymin},{xmax},{ymax},{line[0]}')\n if len(labels) > 0:\n write = True\n file_to_write.write(f'{data_folder}/{file_name}')\n for label in labels:\n file_to_write.write(label)\n if write:\n file_to_write.write('\\n') \n processed += 1\n print(f'[{processed}/{number_images}] Processed {file_name}')", "def extract_labels(filename, one_hot=False):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2049:\n\t\t\traise ValueError('Invalid magic number %d in MNIST label file: %s' %(magic, filename))\n\t\tnum_items = _read32(bytestream)\n\t\tbuf = bytestream.read(num_items)\n\t\tlabels = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tif one_hot:\n\t\t\treturn dense_to_one_hot(labels)\n\t\treturn labels", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n return dense_to_one_hot(labels)", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = pickle.load(f, encoding='latin1')\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\r\n Y = np.array(Y)\r\n return X, Y", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = load_pickle(f)\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000,3072)\r\n Y = np.array(Y)\r\n return X, Y", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000,3072)\n Y = np.array(Y)\n return X, Y", "def load_as_one_hot(self):\n\n labels = [] \n examples = [] \n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n # load examples and labels\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_feat_list = read_cuis(file_path)\n examples.append(' '.join(file_feat_list))\n \n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n\n examples = self.token2int.texts_to_matrix(examples, mode='binary')\n\n return examples, labels", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n data_dict = cPickle.load(f)\r\n ims = data_dict['data']\r\n coarse_labels = np.array(data_dict['coarse_labels'])\r\n fine_labels = np.array(data_dict['fine_labels'])\r\n return ims, coarse_labels, fine_labels", "def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)", "def extract_labels(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def labels_for_training_data():\n current_id = 0\n label_ids = dict()\n faces, faces_ids = list(), list()\n\n # Go through directories and find label and path to image\n for root, dirs, files in walk('data/'):\n for file in files:\n if file.endswith('.jpg') or file.endswith('.png'):\n img_path = path.join(root, file)\n label = path.basename(root).replace(' ', '-').lower()\n if label not in label_ids:\n label_ids[label] = current_id\n current_id += 1\n id_ = label_ids[label]\n\n test_img = cv2.imread(img_path)\n test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n if test_img is None:\n print('Image not loaded properly')\n continue\n\n faces.append(test_img)\n faces_ids.append(id_)\n\n # Make directory with labels doesn't exist make directory and file with labels\n if not path.exists('labels/'):\n makedirs('labels/')\n with open('labels/face-labels.pickle', 'wb') as file:\n pickle.dump(label_ids, file)\n\n return faces, faces_ids", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def pickle_nn(clf):\n\n filename = 'nnMLPClass'\n outfile = open(filename, 'wb')\n pickle.dump(clf, outfile)\n outfile.close()", "def load_labels(labels_dir, trial_name):\n labels_path = labels_dir + trial_name + \".txt\"\n raw_labels_data = np.genfromtxt(labels_path, dtype=np.int,\n converters=LABELS_CONVERTERS,\n usecols=LABELS_USECOLS)\n #print(\"rawlabelsdata: \", raw_labels_data)\n #print(get_first_frame(labels_path))\n frames = np.arange(get_first_frame(labels_path), get_last_frame(labels_path)+1, dtype=np.int)\n #print(\"frames: \", frames)\n #print(frames.shape)\n #labels = np.zeros(frames.shape, dtype=np.int)\n labels1 = []\n #print(labels)\n for start, end, label in raw_labels_data:\n #mask = (frames >= start) & (frames <= end)\n #print(start)\n #print(end)\n i = start\n while(i<end):\n if(i%6 == 0):\n labels1.append(label)\n i = i+1\n\n #labels[mask] = label\n #print(\"labels[mask]: \",labels[mask])\n labels1 = np.array(labels1)\n #print(labels1)\n labels_data = labels1.reshape(-1,1)\n #print(labels1.shape)\n #print(\"labels: \", labels_data)\n \n return labels_data", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def pickle_examples(paths, save_path,train_mark):\n with open(save_path, 'wb') as ft:\n for p in paths:\n label = int(os.path.basename(p).split(\"_\")[0])\n with open(p, 'rb') as f:\n if train_mark == True:\n print(\"Train: img2bny %s\" % p, label)\n else:\n print(\"Val: img2bny %s\" % p, label)\n img_bytes = f.read()\n r = random.random()\n example = (label, img_bytes)\n pickle.dump(example, ft)", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def extract_labels(f, one_hot=False, num_classes=10):\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def _label_encoding(self):\n for feat in self.cat_feats:\n if self.train:\n lbl = preprocessing.LabelEncoder()\n lbl.fit(self.dataframe[feat].values)\n self.dataframe_d_copy.loc[:,feat] = lbl.transform(self.dataframe[feat].values)\n self.label_encoders[feat] = lbl\n else:\n lbl = self.encoders[feat]\n self.dataframe_d_copy.loc[:,feat] = lbl.transform(self.dataframe[feat].values)\n \n if self.train:\n encoder_path = f\"{self.output_path}/_label_encoder.pkl\"\n self.cat_feats_cfg['encoder_path'] = encoder_path\n joblib.dump(self.label_encoders, encoder_path)\n \n return self.dataframe_d_copy", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(10000)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='latin1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(\"float64\")\n Y = np.array(Y)\n return X, Y", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def load_data(fname):\r\n # Open file with pickle\r\n with open(fname, 'rb') as f:\r\n data = pickle.load(f)\r\n # Split the images and labels\r\n images = data[:, :-1]\r\n labels = data[:, -1].astype(int)\r\n # Encode one hot labels\r\n one_hot_labels = np.zeros((len(labels), len(np.unique(labels))))\r\n one_hot_labels[np.arange(len(labels)), labels] = 1\r\n return images, one_hot_labels", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def convert_pickle_to_tfrecord(input_files, output_file):\n print('Generating %s' % output_file)\n with tf.python_io.TFRecordWriter(output_file) as writer:\n # draw 10 random number for getting 10 random classes from Imagenet (fixed value for reproducibility)\n # class_id = [145, 153, 289, 404, 405, 510, 805, 817, 867, 950] # random.sample(range(0, 999), 10)\n # class_id = [153, 156, 161, 174, 197, 207, 215, 216, 218, 224, 227, 230, 236, 254, 260] # 15 dog classes (also used in DAC)\n\n # count = np.zeros(shape=len(class_id))\n for input_file in input_files:\n data_dict = read_pickle_from_file(input_file)\n data = data_dict['data']\n mean_img = data_dict['mean']\n labels = data_dict['labels']\n # Labels are indexed from 1, shift it so that indexes start at 0 (imagenet)\n labels = [i - 1 for i in labels]\n\n num_entries_in_batch = len(labels)\n print('Converting %s' % input_file)\n for i in range(num_entries_in_batch):\n # if labels[i] in class_id:\n # labels[i] = class_id.index(labels[i]) # put the labels into the range of 0 to no. clusters\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'height': _int64_feature(64),\n 'width': _int64_feature(64),\n 'depth': _int64_feature(3),\n 'image': _bytes_feature(data[i].tobytes()),\n 'mean_img': _bytes_feature(mean_img.tobytes()),\n 'label': _int64_feature(labels[i])\n }))\n writer.write(example.SerializeToString())\n # count[labels[i]] += 1 # count number of samples per class\n # for idx, num in enumerate(count):\n # print('Number of samples of class %d: %d' % (idx, num))\n # print('Total Number of samples %d' % np.sum(count))", "def read_data(feature_file, label_file):", "def extract_labels(f, one_hot=False, num_classes=10):\n\tprint('Extracting', f.name)\n\twith gzip.GzipFile(fileobj=f) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2049:\n\t\t\traise ValueError('Invalid magic number %d in MNIST label file: %s' %\n\t\t\t\t\t\t\t\t\t\t\t (magic, f.name))\n\t\tnum_items = _read32(bytestream)\n\t\tbuf = bytestream.read(num_items)\n\t\tlabels = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tif one_hot:\n\t\t\treturn dense_to_one_hot(labels, num_classes)\n\t\treturn labels", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels", "def _load_cifar_batch(fpath, label_key='labels'):\n if isinstance(fpath, (os.PathLike, str, bytes)):\n with open(fpath, 'rb') as f:\n return _load_cifar_batch(f, label_key)\n\n d = pickle.load(fpath, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32).transpose([0, 2, 3, 1])\n return data, labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def _pickle_load(filename):\n with open(filename, 'rb') as f:\n save = pickle.load(f)\n image = save['image'].astype(np.float32)\n label = np.float32(save['label'])\n label = reformat_labels(label)\n return image, label", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def __load_Model(self):\r\n PrintsForUser.printProcess(\"[INFO] Loading network...\")\r\n \r\n self.__model = load_model(self.__model_path)\r\n self.__lb = pickle.loads(open(self.__labels_path, \"rb\").read())", "def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n # datadict = p.load(f)\n datadict = pickle.load(f, encoding = 'bytes')\n X = datadict[b'data']\n Y = datadict[b'labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))", "def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y", "def __init__(self, mod_param, epochNb=3):\n tf.reset_default_graph()\n weight_path = '../caffe_layers_value.pickle'\n model_path = mod_param.paths[\"save_model\"]\n model_path += '-'+str(epochNb)\n self.mod_param = mod_param\n self.labels = mod_param.labels\n self.n_labels = mod_param.n_labels\n \n # Initialize some tensorflow variables\n self.images_tf = tf.placeholder( tf.float32, [None, None, None, 3], name=\"images\")\n self.labels_tf = tf.placeholder( tf.int64, [None], name='labels')\n \n detector = Detector(mod_param)\n c1,c2,c3,c4,conv5, self.conv6, self.gap, self.output = detector.inference( self.images_tf )\n self.detector = detector\n \n self.sess = tf.InteractiveSession()\n saver = tf.train.Saver()\n saver.restore( self.sess, model_path )\n\n if re.match(\".*CAM.*_W_S\", self.mod_param.mod_type):\n self.classmap = self.detector.get_classmap( self.labels_tf, self.conv6 )", "def write_output_to_pickle(image_lst, img_dir, model,save_path=\"output_inception\", k=5, only_first_name=False):\n imgs = image_lst\n output_layer = []\n \n # Looping over images\n \n for img in tqdm(imgs):\n tv = model.transfer_values(image_path=join(img_dir, img))\n output_layer.append(tv)\n # scores = model.get_scores(pred=pred, k=k, only_first_name=only_first_name)\n # top_k.append(scores)\n \n # Building a dataframe with columns for image names, prediction array, scores\n # print(\"THIS:\",np.array(output_layer).ndim)\n # print(output_layer[0])\n \n df = pd.DataFrame({\n \"img\": imgs,\n \"transfer layer\": output_layer,\n })\n \n # saving to pickle\n \n df.to_pickle(save_path)", "def save_data(data_dir):\r\n for k in range(1,11):\r\n fold_name = 'fold' + str(k)\r\n print \"Saving\" + fold_name\r\n features, labels = process_audio(parent_path, [fold_name])\r\n labels = encode(labels)\r\n print \"Features of\", fold_name , \" = \", features.shape\r\n print \"Labels of\", fold_name , \" = \", labels.shape\r\n feature_file = os.path.join(data_dir, fold_name + '_x.npy')\r\n labels_file = os.path.join(data_dir, fold_name + '_y.npy')\r\n np.save(feature_file, features)\r\n print \"Saved \" + feature_file\r\n np.save(labels_file, labels)\r\n print \"Saved \" + labels_file", "def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data", "def loadLabeled(self):\n\n maxNumChannels = self._maxNumChannels # 4\n\n baseFilePath, ext = os.path.splitext(self.path)\n baseFilePath = baseFilePath.replace('_ch1', '')\n baseFilePath = baseFilePath.replace('_ch2', '')\n\n # load mask\n #labeledPath = dvMaskPath + '_mask.tif'\n #labeledData = tifffile.imread(labeledPath)\n\n maskFromLabelGreaterThan = 0\n\n # load labeled\n for channelIdx in range(maxNumChannels):\n channelNumber = channelIdx + 1 # for _ch1, _ch2, ...\n stackListIdx = maxNumChannels + channelIdx # for index into self._stackList\n\n chStr = '_ch' + str(channelNumber)\n labeledPath = baseFilePath + chStr + '_labeled.tif'\n maskPath = baseFilePath + chStr + '_mask.tif'\n\n # if we find _labeeled.tif, load and make a mask\n # o.w. if we find _mask.tif then load that\n if os.path.isfile(maskPath):\n print(' bStack.loadLabeled() loading _mask.tif channelNumber:', channelNumber, 'maskPath:', maskPath)\n maskData = tifffile.imread(maskPath)\n self._stackList[stackListIdx] = maskData\n elif os.path.isfile(labeledPath):\n print(' bStack.loadLabeled() loading channelNumber:', channelNumber, 'labeledPath:', labeledPath)\n labeledData = tifffile.imread(labeledPath)\n self._stackList[stackListIdx] = labeledData > maskFromLabelGreaterThan\n else:\n # did not find _mask or _labeled file\n pass\n\n # erode _mask by 1 (before skel) as skel was getting mized up with z-collisions\n #self._dvMask = bimpy.util.morphology.binary_erosion(self._dvMask, iterations=2)\n\n # bVascularTracing.loadDeepVess() uses mask to make skel", "def label_training_data(input_path, output_path):\r\n import shutil\r\n image_files = [file for file in os.listdir(path=input_path) if '.JPG' in file or '.jpeg' in file]\r\n \r\n for file in image_files:\r\n file_input_path = os.path.join(input_path,file)\r\n \r\n img = cv2.imread(file_input_path)\r\n \r\n file_output_path = os.path.join(output_path, classify_face(img))\r\n \r\n try:\r\n os.makedirs(file_output_path)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n shutil.move(file_input_path, file_output_path)", "def label_file(input_file):\n file_name, file_ext = os.path.splitext(input_file)\n output_file = file_name + \".label\" + file_ext\n\n # read input file and save them in dict\n features = load_protobuf(input_file)\n\n # for each obstacle ID, sort dict by their timestamp\n fea_trajs = build_trajectory(features)\n\n # for each obstacle ID, label them, remove record cannot be labeled\n for fea_key, fea_traj in fea_trajs.items():\n fea_traj = fea_trajs[fea_key]\n fea_traj = TrajectoryToSample.clean(fea_traj)\n fea_traj = TrajectoryToSample.label(fea_traj)\n for i, fea in enumerate(fea_traj):\n if not fea.HasField('label_update_time_delta'):\n del fea_traj[i]\n continue\n if fea.label_update_time_delta < parameters['feature']['threshold_label_time_delta']:\n del fea_traj[i]\n fea_trajs[fea_key] = fea_traj\n # save them in the output file with the same format as the input file\n save_protobuf(output_file, fea_trajs.values())", "def load_png_data():\n m=1 #训练文件个数\n n=1 #测试文件个数\n train_set_x=[]#训练数据集\n train_set_y=[]#训练标签集\n\n test_set_x=[]#测试数据集\n test_set_y=[]#测试标签集\n\n train_data={}\n\n train_path=r\".\\dataset\\train_label\\\\\"\n dirs=os.listdir(train_path)\n\n for file in dirs:\n srcImg=cv2.imread(train_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(train_path+str(m)+'.npy',npImg)\n train_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\trainset\\\\\"+str(m)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\trainset\\\\\" + str(m) + '.npy', npNoiseImg)\n train_set_y.append(npNoiseImg)\n m=m+1\n train_data['train_set_x']=train_set_x\n train_data['train_set_y']=train_set_y\n\n test_path = r\".\\dataset\\test_label\\\\\"\n dirs_test = os.listdir(test_path)\n for file in dirs_test:\n srcImg=cv2.imread(test_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(test_path+str(n)+'.npy',npImg)\n test_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\testset\\\\\"+str(n)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\testset\\\\\" + str(n) + '.npy', npNoiseImg)\n test_set_y.append(npNoiseImg)\n n=n+1\n train_data['test_set_x']=test_set_x\n train_data['test_set_y']=test_set_y\n\n np.savez(r\"E:\\DeepLearning\\CNNDenoiser\\dataset\\train_data.npz\",**train_data)", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def label(filenames, train_path='../data/train_molecules_30.mat'):\n unlabeled = [scipy.io.loadmat(fname) for fname in filenames]\n unlabeled_X = np.vstack([data['X'] for data in unlabeled])\n X, Y = load_data(train_path, shape=(-1, 30, 30, 30))\n\n num_unlabeled = unlabeled_X.shape[0]\n unlabeled_Y = np.zeros(num_unlabeled) - 1\n unlabeled_Y = unlabeled_Y.reshape((-1, 1))\n Y = Y.reshape((-1, 1))\n Y_all = np.vstack((Y, unlabeled_Y))\n\n X_all = np.vstack((X, unlabeled_X))\n X_all = X_all.reshape((-1, 27000))\n\n label_prop_model = LabelSpreading()\n label_prop_model.fit(X_all, Y_all)\n Y_all = label_prop_model.transduction_\n unlabeled_Y = Y_all[num_unlabeled:]\n return (unlabeled_X, unlabeled_Y), (X_all, Y_all)", "def new_mrcnn(semantic_label_file, output_label_file):\n img = skimage.io.imread(semantic_label_file)\n img = img[64:192, 64:192]\n img_labeled = skimage.measure.label(img, connectivity=1)\n idx = [np.where(img_labeled == label) for label in np.unique(img_labeled) if label]\n\n list_of_all_mask_indices = []\n list_of_all_class_ids = []\n for i in range(len(idx)):\n tmp = np.zeros(img.shape)\n tmp[idx[i]] = img[idx[i]]\n cur_class_id = np.unique(tmp)[1].astype(int)\n list_of_all_mask_indices.append(idx[i])\n list_of_all_class_ids.append(cur_class_id)\n np.save(output_label_file, [list_of_all_mask_indices, list_of_all_class_ids, len(list_of_all_class_ids)])", "def load_pickle(path):\n with open(path, 'rb') as f:\n data = cPickle.load(f)\n images = np.asarray([i/np.float32(255) for i in data['data']])\n labels = np.asarray(data['labels'], dtype='int32')\n X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)\n return X_train, y_train, X_test, y_test", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def read_images(self, img_name, label_name):\n image_string = tf.read_file(img_name)\n image_decoded = tf.image.decode_jpeg(image_string, channels=3)\n label_string = tf.read_file(label_name)\n label_decoded = tf.image.decode_jpeg(label_string, channels=1)\n return image_decoded, label_decoded", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def load_batch(batch_name):\n data_dict = unpickle('./datasets/cifar-10-batches-py/' + batch_name)\n X = data_dict[b'data'] / 255\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).reshape(10000, 3072).transpose(1,0)\n y = data_dict[b'labels']\n Y = make_one_hot(y)\n return X, Y, y", "def read():\n\n # load json and create model\n base_model = _model_builder.Network(0, model_type=\"load_model\")\n\n #load image and process\n digit = Image.open(\"./data/number.jpg\").convert(\"L\")\n digit = ImageOps.expand(digit,border=60,fill='black')\n digit = digit.resize((28, 28))\n\n #flatten the matrix (for input into MLP network todo:CNN)\n digit_flat = numpy.zeros((1, 784))\n counter = 0\n for j in range(0, 28):\n for i in range(0, 28):\n digit_flat[0][counter] = (digit.getpixel((i, j)))/255.0\n counter = counter+1\n\n #predict\n os.system('clear')\n base_model.predict(digit_flat)", "def load(self):\n\n x = [] # input documents (n_docs, max_seq_len)\n labels = [] # targets we are predicting for each input\n\n for file_path in glob.glob(self.train_dir + '*.txt'):\n tokens = read_tokens(file_path)\n unique = list(set(tokens))\n x_count = round(len(unique) * 0.85)\n\n for _ in range(self.samples_per_doc):\n random.shuffle(unique)\n x.append(' '.join(unique[:x_count]))\n labels.append(' '.join(unique[x_count:]))\n\n # make x and y\n pkl = open('Model/tokenizer.p', 'rb')\n self.tokenizer = pickle.load(pkl)\n x = self.tokenizer.texts_to_matrix(x, mode='binary')\n y = self.tokenizer.texts_to_matrix(labels, mode='binary')\n\n # column zero is empty\n return x, y[:,1:]", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load_data(outputpath):\n ext = '.npy'\n x_train = np.load(os.path.join(outputpath, 'X_train' + ext))\n y_train_binary = np.load(os.path.join(outputpath, 'y_train' + ext))\n x_val = np.load(os.path.join(outputpath, 'X_val' + ext))\n y_val_binary = np.load(os.path.join(outputpath, 'y_val' + ext))\n x_test = np.load(os.path.join(outputpath, 'X_test' + ext))\n y_test_binary = np.load(os.path.join(outputpath, 'y_test' + ext))\n with open(os.path.join(outputpath, 'labels.json'), 'r') as fn:\n labels = json.load(fn)\n return x_train, y_train_binary, x_val, y_val_binary, \\\n x_test, y_test_binary, labels", "def savemodel(self, fname):\n if not fname.endswith('.gz'):\n fname += '.gz'\n D = {'clf':self.clf, 'vocab':self.vocab,\n 'idxlabelmap':self.labelmap}\n with gzip.open(fname, 'w') as fout:\n dump(D, fout)\n print 'Save model into file: {}'.format(fname)", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()", "def load_cifar_data(filepath):\n with open(filepath, 'rb') as f:\n data = pickle.load(f, encoding='bytes')\n return data[b'data'], data[b'labels']", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def main(args):\n data_transform = transforms.Compose([\n transforms.Scale((256, 256)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n dataset = datasets.ImageFolder(root=args.root_dir, transform=data_transform)\n dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, \n shuffle=False, num_workers=0, pin_memory=True)\n net = get_feature_extractor()\n\n if torch.cuda.is_available():\n net = net.cuda()\n\n features_out = np.zeros((len(dataset), 4096))\n labels_out = np.zeros(len(dataset))\n \n p = progressbar.ProgressBar(widgets=[progressbar.ETA(), ' ', progressbar.Percentage()])\n for i, samples in p(enumerate(dataloader)):\n images, labels = samples\n if torch.cuda.is_available():\n images = images.cuda()\n images = Variable(images)\n features = net(images).cpu().data.numpy()\n features_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = features\n labels_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = labels.int().numpy()\n print(i)\n\n with open(os.path.join(args.out, 'features.pickle'),'wb') as f:\n pickle.dump(features_out, f)\n with open(os.path.join(args.out, 'labels.pickle'),'wb') as f:\n pickle.dump(labels_out, f)", "def features_from_CNN(self):\n\n dataloader = self.datasetManager.get_dataloader()\n print(\"\\nFeatures obtention with CNN\")\n print(\"-\"*15)\n for i, batch in tqdm.tqdm(enumerate(dataloader)):\n img = self.to_device(batch[0])\n img_name = batch[2][0]\n \n temp = re.findall(r'\\d+', img_name)\n res = list(map(int, temp))\n X = res[-2]\n Y = res[-1]\n \n savepath = os.path.join(self.output_dir, 'data%i'%X)\n create_folder(savepath)\n \n out_CNN = self.network(img) \n \n torch.save(out_CNN, os.path.join(savepath,'features_tensor%i.pt'%Y))", "def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def load_and_pickle_mnist():\n\n if os.path.exists(pickle_file):\n print(\"Pickle file found! Unpickling...\")\n with open(pickle_file, \"rb\") as pf:\n mnist = pickle.load(pf)\n else:\n mnist = read_data_sets(data_dir, one_hot=True)\n\n with open(pickle_file, \"wb\") as pf:\n pickle.dump(mnist, pf, pickle.HIGHEST_PROTOCOL)\n\n # Remove .gz files from the mnist download.\n for ptr in glob.glob(os.path.join(data_dir, \"*.gz\")):\n os.remove(ptr)\n\n return mnist", "def _read_labels(test_data=False):\n if not test_data:\n filename = os.path.join(FOLDER_PATH, 'train-labels.idx1-ubyte')\n else:\n filename = os.path.join(FOLDER_PATH, 't10k-labels.idx1-ubyte')\n if not os.path.exists(filename):\n raise ValueError('The file dose not exist.')\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer([filename])\n \n # The first 8 bytes contain file information:\n # [offset] [type] [value] [description]\n # 0000 32 bit integer 0x00000801(2049) magic number\n # 0004 32 bit integer 60000/10000 number of items \n # ...(label value)\n header_bytes = 8\n # Every record consists of a label, with a fixed number of bytes for each.\n record_bytes = 1\n \n # Create a FixedLengthRecordReader to read record.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,\n header_bytes=header_bytes)\n _, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8, then cast to int32.\n record = tf.cast(tf.decode_raw(value, tf.uint8), tf.int32)\n \n # Reshape from [1] to a scalar shape [].\n label = tf.reshape(record, [])\n\n return label", "def load(self, filename='nn_model.pkl'):\n with open(filename,'rb') as f:\n nn_model = pickle.load(f, encoding='bytes')\n f.close()\n\n self.W = nn_model.W\n self.b = nn_model.b\n\n self.num_bn_layers = nn_model.num_bn_layers\n self.bn = nn_model.num_bn_layers > 0\n self.hiddens = nn_model.hiddens\n self.nlayers = len(nn_model.hiddens) + 1\n self.input_size = nn_model.input_size\n self.output_size = nn_model.output_size\n self.activations = nn_model.activations\n self.criterion = nn_model.criterion\n self.lr = nn_model.lr\n self.momentum = nn_model.momentum\n\n if self.bn:\n self.bn_layers = nn_model.bn_layers\n\n self.train_mode = nn_model.train_mode\n self.batch_size = nn_model.batch_size\n self.epochs = nn_model.epochs", "def dump(self, img_labels, images_base_directory, destination_pickle_path, destination_pickle_file_name, preprocessing_transformer):\n # Load images_base_directory\n # files_per_pickle = len(img_labels) // parts\n # pickle_part_num = 1\n result = None\n images = []\n for i in range(len(img_labels)):\n file_name = os.path.join(images_base_directory, img_labels.iloc[i,0]) + '.npy'\n image = np.load(file_name)\n h, w = image.shape\n image = torch.from_numpy(image).reshape(1, h, w)\n image = image.float()\n\n # apply preprocessing\n image = preprocessing_transformer(image)\n images.append(image)\n result = {\n \"images\": images\n }\n\n # Save final remaining parts\n self._save_part(destination_pickle_path, destination_pickle_file_name, result)", "def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def load_batch(fpath, label_key='labels'):\n f = open(fpath, 'rb')\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n f.close()\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32)\n return data, labels", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n #一个样本由标签和图像数据组成\n #3072 = 32 x 32 x 3\n data_dict = p.load(f, encoding= 'bytes')\n images = data_dict[b'data']\n labels = data_dict[b'labels']\n #把原始数据结构调整为BCWH batches, channels, width, height\n images = images.reshape(10000, 3, 32, 32)\n #tensorflow 处理图像数据的结构:BWHC\n #把C移动到最后一个维度\n images = images.transpose(0, 2, 3, 1)\n\n labels = np.array(labels)\n return images, labels", "def get_data(self):\n\n if not self.checked:\n self.check_cache()\n h5f = h5py.File(self.data_filename, 'r')\n train_lbl = h5f['train_lbl'][:]\n train_img = h5f['train_img'][:]\n val_lbl = h5f['val_lbl'][:]\n val_img = h5f['val_img'][:]\n h5f.close()\n return train_img, train_lbl, val_img, val_lbl", "def __savePreProcessedData(self):\n np.savetxt(self.X_filename, self.X, delimiter=',')\n np.savetxt(self.y_filename, self.le.fit_transform(self.y), delimiter=',')\n #Need to save the label Enconder to inverse transform later\n joblib.dump(self.le, self.le_filename)\n\n print(\"Saved X and y\")", "def get_data(pkl_fname, label, sample, replicate, \n incl_curvature=False,\n load_attn1=None, load_attn2=None, \n modelpkl_fname1=None, modelpkl_fname2=None,\n preloadn2v=False,\n out_channels=8, heads=8, negative_slope=0.2, dropout=0.4, \n verbose=True):\n pdfp = os.path.split(pkl_fname)[0]\n \n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n \n if load_attn1 is None and load_attn2 is None and not incl_curvature and preloadn2v is None:\n\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[label]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # load all edge_feat\n elif load_attn1 is not None and load_attn2 is not None and incl_curvature and preloadn2v is not None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(attn2, dtype=float),\n torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # only load attn1\n elif load_attn1 is not None and load_attn2 is None and not incl_curvature and preloadn2v is None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n# F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n# n2v = utils.node2vec_dot2edge(datapkl['adj'], \n# os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n# preloaded=preloadn2v)\n# edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n# torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n# torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n edge_attr = torch.tensor(attn, dtype=float) \n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n\n # attn2 \n elif load_attn1 is None and load_attn2 is not None and not incl_curvature and preloadn2v is None:\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n# F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n# n2v = utils.node2vec_dot2edge(datapkl['adj'], \n# os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n# preloaded=preloadn2v)\n# edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n# torch.tensor(attn2, dtype=float),\n# torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n# torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n edge_attr = torch.tensor(attn2, dtype=float)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean \n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # curvature\n elif load_attn1 is None and load_attn2 is None and incl_curvature and preloadn2v is None:\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[label]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n# n2v = utils.node2vec_dot2edge(datapkl['adj'], \n# os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n# preloaded=preloadn2v)\n# edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n# torch.tensor(attn2, dtype=float),\n# torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n# torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n edge_attr = torch.tensor(utils.range_scale(F_e)).reshape(-1,1)\n d = Data(x=node_features, edge_index=edge_index, edge_attr=edge_attr, y=labels)\n del node_features,edge_index,labels,edge_attr\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # n2v\n elif load_attn1 is None and load_attn2 is None and not incl_curvature and preloadn2v is not None:\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[label]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n # add other edge feats\n# F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n# edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n# torch.tensor(attn2, dtype=float),\n# torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n# torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n edge_attr = torch.tensor(utils.range_scale(n2v)).reshape(-1,1)\n d = Data(x=node_features, edge_index=edge_index, edge_attr=edge_attr, y=labels)\n del node_features,edge_index,labels,edge_attr\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # attn1 + attn2\n elif load_attn1 is not None and load_attn2 is not None and not incl_curvature and preloadn2v is None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(attn2, dtype=float)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # attn1 + attn2 + n2v\n elif load_attn1 is not None and load_attn2 is not None and not incl_curvature and preloadn2v is not None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(attn2, dtype=float),\n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # attn1 + attn2 + curvature\n elif load_attn1 is not None and load_attn2 is not None and incl_curvature and preloadn2v is None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(attn2, dtype=float),\n torch.tensor(utils.range_scale(F_e)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # n2v + curvature\n elif load_attn1 is None and load_attn2 is None and incl_curvature and preloadn2v is not None:\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[label]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=node_features, edge_index=edge_index, edge_attr=edge_attr, y=labels)\n del node_features,edge_index,labels,edge_attr\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n \n # attn1 + curvature\n elif load_attn1 is not None and load_attn2 is None and incl_curvature and preloadn2v is None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(utils.range_scale(F_e)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n \n # attn1 + n2v\n elif load_attn1 is not None and load_attn2 is None and not incl_curvature and preloadn2v is not None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n \n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # attn1 + n2v + curvature\n elif load_attn1 is not None and load_attn2 is None and incl_curvature and preloadn2v is not None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n\n else:\n print('Can only load edge feats of a specific entry set type. Exiting.')\n exit()\n \n return d", "def load_model(file_index):\n normal, abnormal, all = read_in(file_index, 1, 2, 0.3)\n autoencoder = keras.models.load_model('Working_Data/ae_patient_' + str(file_index) + '_dim' + str(100) + '_model.h5')\n reconstructed = autoencoder.predict(all)\n reconstruction_save = \"Working_Data/reconstructed_cdae_10d_Idx\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstructed)", "def pickle_dump_files():\n with open('data/' + dataset_name + '_' + model_name + '_' + 'predictions', 'wb') as f:\n pickle.dump(predictions, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'state_sentences', 'wb') as f:\n pickle.dump(final_state_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'decoded_sentences', 'wb') as f:\n pickle.dump(final_decoded_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'ids', 'wb') as f:\n pickle.dump(idx, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'exemplars', 'wb') as f:\n pickle.dump(exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'counter_exemplars', 'wb') as f:\n pickle.dump(counter_exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_exemplar_words', 'wb') as f:\n pickle.dump(top_exemplar_words, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_counter_exemplar_words', 'wb') as f:\n pickle.dump(top_counter_exemplar_words, f)", "def load_tiny_imagenet(directory):\n path_train, path_val, path_test = directory + '/train', directory + '/val', directory + '/test'\n labels = os.listdir(path_train)\n train_data = []\n train_labels = []\n for label in labels:\n imgs_path = os.path.join(path_train, label, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_data = []\n test_labels = []\n with open(path_val+'/val_annotations.txt', 'r') as f:\n val_annotations = [line.strip().split('\\t') for line in f]\n val_annotations = np.array(val_annotations)\n imgs_path = os.path.join(path_val, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n label = val_annotations[val_annotations[:, 0] == img_name, 1].astype('U9')\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.concatenate(test_labels)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, label, labels, imgs_path, img_name, img, imgs, val_annotations\n \n return train_data, train_labels, test_data, test_labels", "def load_data():\n global X, Y, X_final, Y_final, input_shape\n\n pickle_in = open(\"X_train.pickle\", \"rb\")\n X = pickle.load(pickle_in)\n pickle_in = open(\"y_train.pickle\", \"rb\")\n Y = pickle.load(pickle_in)\n\n pickle_in = open(\"X_test.pickle\", \"rb\")\n X_final = pickle.load(pickle_in)\n pickle_in = open(\"y_test.pickle\", \"rb\")\n Y_final = pickle.load(pickle_in)\n\n if K.image_data_format() == 'channels_first':\n input_shape = (3, img_rows, img_cols)\n else:\n input_shape = (img_rows, img_cols, 3)\n\n X = X.astype('float32')\n X /= 255\n X_final = X_final.astype('float32')\n X_final /= 255\n print('X shape:', X.shape)\n print(X.shape[0], 'Samples')\n\n Y_final = to_categorical(Y_final, num_classes)\n\n if not b_eval_advanced:\n Y = to_categorical(Y, num_classes)\n\n print(\"Y_final Shape\",Y_final.shape)", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _XlnetSequenceClassifierLoader\n jModel = _XlnetSequenceClassifierLoader(folder, spark_session._jsparkSession)._java_obj\n return XlnetForSequenceClassification(java_model=jModel)" ]
[ "0.6610099", "0.6471816", "0.6420202", "0.6321379", "0.6279507", "0.6236104", "0.62183523", "0.6211333", "0.6210031", "0.61808336", "0.61574364", "0.6154807", "0.61260164", "0.6123372", "0.6122588", "0.6109854", "0.60853416", "0.60820115", "0.6063963", "0.60560757", "0.6048186", "0.6045157", "0.6045082", "0.6041376", "0.60380185", "0.6035644", "0.60305023", "0.60225075", "0.60189265", "0.60162824", "0.6016278", "0.60142297", "0.6013441", "0.60068697", "0.59902763", "0.5974973", "0.5965739", "0.59603566", "0.5953463", "0.59522456", "0.5949958", "0.5945283", "0.5945281", "0.59419304", "0.59384406", "0.5937187", "0.59277445", "0.5925963", "0.59160966", "0.5914102", "0.59106624", "0.5907299", "0.5901361", "0.58998895", "0.5894479", "0.5890318", "0.5877355", "0.5875839", "0.5872133", "0.58679116", "0.58543265", "0.5838717", "0.58384526", "0.58360523", "0.5832116", "0.58314013", "0.58186305", "0.5786256", "0.5786256", "0.5776784", "0.5774297", "0.57484365", "0.57410127", "0.5740451", "0.5737099", "0.57331717", "0.5732414", "0.5726177", "0.5715879", "0.5710542", "0.5704258", "0.57032734", "0.5703226", "0.5700329", "0.5695663", "0.56930953", "0.5691564", "0.56818956", "0.568127", "0.5681217", "0.567967", "0.5668038", "0.5666031", "0.56609786", "0.5660731", "0.5652139", "0.56502867", "0.5646032", "0.56398135", "0.56380737", "0.56352836" ]
0.0
-1
group([0,3,4,10,2,3], 2) => iterator Group an iterable into an ntuples iterable. Incomplete tuples are padded with Nones e.g. >>> list(group(range(10), 3)) [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, None, None)]
def group(lst, n): iters = tee(lst, n) iters = [iters[0]] + [chain(iter, repeat(None)) for iter in iters[1:]] return izip( *[islice(iter, i, None, n) for i, iter in enumerate(iters)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grouper(iterable: Iterable, n: int, fillvalue: Any = None) -> Iterator[tuple]:\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n # pylint: disable=invalid-name\n args = [iter(iterable)] * n\n # pylint: enable=invalid-name\n return itertools.zip_longest(*args, fillvalue=fillvalue)", "def grouper(n, iterable):\n args = [iter(iterable)] * n\n return list((tuple([e for e in t if e != None]) for t in zip_longest(*args)))", "def group(n, iterable, fill_value = None):\n args = [iter(iterable)] * n\n return izip_longest(fillvalue = fill_value, *args)", "def _grouper(iterable: Iterable[Any], n: int, fillvalue=None) -> Iterator[Tuple[Any]]:\n it = iter(iterable)\n while True:\n values = []\n for _ in range(n):\n try:\n value = next(it)\n except StopIteration:\n values.extend([fillvalue] * (n - len(values)))\n yield tuple(values)\n return\n values.append(value)\n yield tuple(values)", "def _grouper(iterable, n, fillvalue=None):\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)", "def grouper(iterable, n, fillvalue=None):\r\n args = [iter(iterable)] * n\r\n return zip_longest(*args, fillvalue=fillvalue)", "def grouper(iterable, n, fillvalue=None):\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue=fillvalue)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue=fillvalue)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue=fillvalue)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return izip_longest(fillvalue=fillvalue, *args)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return izip_longest(fillvalue=fillvalue, *args)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(fillvalue=fillvalue, *args)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return list(zip_longest(fillvalue=fillvalue, *args))", "def grouper(iterable, n, fillvalue=None):\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)", "def grouper(iterable, n, fillvalue=None):\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\n args = [iter(iterable)] * n\n return izip_longest(fillvalue=fillvalue, *args)", "def grouper(iterable, n, fillvalue=None):\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\n args = [iter(iterable)] * n\n return zip_longest(fillvalue=fillvalue, *args)", "def grouper(iterable, n, fillvalue=None):\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\n args = [iter(iterable)] * n\n return zip_longest(fillvalue=fillvalue, *args)", "def grouper(n, iterable, fillvalue=None):\n from itertools import zip_longest\n args = [iter(iterable)] * n\n return zip_longest(fillvalue=fillvalue, *args)", "def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(fillvalue=fillvalue, *args)", "def grouper(n, iterable, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(fillvalue=fillvalue, *args)", "def grouper( n, iterable, fillvalue=None ):\n args = [iter(iterable)]*n\n return list( it.izip_longest(fillvalue=fillvalue, *args) )", "def grouper(n, iterable, fill_value=None):\n args = [iter(iterable)] * n\n return zip_longest(fillvalue=fill_value, *args)", "def list_grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)", "def grouper(n, iterable):\n\tit = iter(iterable)\n\twhile True:\n\t\tchunk = tuple(itertools.islice(it, n))\n\t\tif not chunk:\n\t\t\treturn\n\t\tyield chunk", "def grouper(n, iterable):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk", "def grouper(n, iterable, padvalue=None):\n\n\treturn zip_longest(*[iter(iterable)]*n, fillvalue=padvalue)", "def grouper(iterable, n):\n it = iter(iterable)\n while True:\n chunk = tuple(islice(it, n))\n if not chunk:\n return\n yield chunk", "def grouper(iterable, n):\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(*args)", "def grouper(n, iterable, padvalue=None):\n return zip_longest(*[iter(iterable)] * n, fillvalue=padvalue)", "def grouper(n, iterable, fillvalue=None):\n args = [iter(iterable)] * n\n return it.izip_longest(fillvalue=fillvalue, *args)", "def grouper(iterable, n):\n args = [iter(iterable)] * n\n return zip(*args)", "def grouper(iterable: Iterable, n: int = 100, fillvalue: Any = None) -> Any:\r\n args = [iter(iterable)] * n\r\n return zip_discard_compr(*args)", "def grouper(iterable, n):\n for i in range(0, len(iterable), n):\n yield iterable[i:i+n]", "def naive_grouper(inputs, n):\n num_groups = len(inputs) // n\n return [tuple(inputs[i*n:(i+1)*n]) for i in range(num_groups)]", "def _grouper(iterable, chunk_len, fillvalue=None):\n # To understand how/why this works, please refer to the following\n # stackoverflow post: https://stackoverflow.com/a/49181132/4859885.\n args = [iter(iterable)] * chunk_len\n return itertools.izip_longest(*args, fillvalue=fillvalue) # pylint: disable=deprecated-itertools-function", "def grouped(iterable, n):\n # https://gist.github.com/yoyonel/fb8c9d6fb06871db527492f5144b2e7b\n iterable = iter(iterable)\n return iter(lambda: list(itertools.islice(iterable, n)), [])", "def grouper(iterable, block_size, fillvalue=None) -> list:\n args = [iter(iterable)] * block_size\n return zip_longest(*args, fillvalue=fillvalue)", "def _miler_grouper(iterable):\r\n length = len(iterable) + 1\r\n if length == 3:\r\n yield [each.text for each in iterable]\r\n for i in range(3, length, 3):\r\n previous = i - 3\r\n group = iterable[previous: i]\r\n yield [each.text for each in group]", "def grouper(size, iterable, fillvalue=None):\n args = [iter(iterable)] * size\n return zip_longest(fillvalue=fillvalue, *args)", "def group(lst, n):\n return zip(*[lst[i::n] for i in range(n)])", "def group_elements(iterable, group_size):\n groups = [iter(iterable)] * group_size\n\n return itertools.zip_longest(*groups)", "def better_grouper(inputs, n):\n iters = [iter(inputs)] * n\n return zip(*iters)", "def itergroup(iterable, size: int):\n group = []\n for item in iterable:\n group.append(item)\n if len(group) == size:\n yield group\n group = []\n if group:\n yield group", "def chunker(results, n):\n\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n m = int(len(results) / n)\n return list(grouper(iterable=results, n=m, fillvalue=None))", "def group(seq, size):\n if not hasattr(seq, 'next'):\n seq = iter(seq)\n while True:\n yield [seq.next() for i in xrange(size)]", "def lazy_groups_of(iterator: Iterator[A], group_size: int) -> Iterator[List[A]]:\n return iter(lambda: list(islice(iterator, 0, group_size)), [])", "def groups_of(seq, n):\n for i in range(0, len(seq), n):\n yield seq[i : (i + n)]", "def grouped(iterable, n):\n batch_window = [None for _ in range(n)]\n cur_size = 0\n for item in iterable:\n batch_window[cur_size] = item\n cur_size += 1\n if cur_size >= n:\n batched = batch_window[:]\n batch_window = [None for _ in range(n)]\n cur_size = 0\n yield batched", "def shortest_grouper(iterable: Iterable, group_size: int):\n\treturn zip(*[iter(iterable)] * group_size)", "def group_tuples(l, n):\n assert len(l) % n == 0\n ret = []\n for i in range(0, len(l), n):\n v = l[i:i + n]\n ret.append(tuple(v))\n return ret", "def group(seq):\n num = len(seq)\n assert num >= 12\n new_tup = []\n def help_me(seq, new_tup, num):\n if num == 12:\n new_tup.append(seq[0:4])\n new_tup.append(seq[4:8])\n new_tup.append(seq[8:])\n elif num == 13:\n new_tup.append (seq[0:4]) \n new_tup.append (seq[4:8]) \n new_tup.append (seq[8:])\n elif num == 14:\n new_tup.append (seq[0:4]) \n new_tup.append (seq[4:9])\n new_tup.append (seq[9:])\n elif num == 15:\n new_tup.append (seq[0:5]) \n new_tup.append (seq[5:10])\n new_tup.append (seq[10:])\n else:\n new_tup.append(seq[0:4])\n return help_me(seq[4:], new_tup, num=len(seq[4:]))\n return tuple(new_tup)\n return help_me(seq, new_tup, num=len(seq))", "def generate_groups():\n groups = group_elements(\n generate_examples(file_name),\n cfg.tfrecord_size)\n\n # pairing groups to unique numbers and \n # filtering nulls from zip_longest\n groups = (\n list(filter(is_not_none, group))\n for group in groups\n )\n\n yield from groups", "def gallery_groups(self):\n\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n n = 3\n iterable = self.context['gallery'].values()\n args = [iter(iterable)] * 3\n return izip_longest(fillvalue=None, *args)", "def chunks(iterable, size=20):\n stop = object()\n\n for chunk in recipes.grouper(size, iterable, fillvalue=stop):\n if chunk[-1] is stop:\n is_not_stop = functools.partial(operator.is_not, stop)\n\n yield tuple(itertools.takewhile(is_not_stop, chunk))\n\n break\n\n yield chunk", "def _split_iterators(iterator, n=None):\n #if n is None:\n # item, iterator = cytoolz.peek(iterator)\n # n = len(item)\n iterators = itertools.tee(iterator, n)\n #iterators = ((sample[i] for sample in iterator) for i, iterator in enumerate(iterators))\n # Above does not work?!\n\n out = list()\n out.append(s[0] for s in iterators[0])\n out.append(s[1] for s in iterators[1])\n out.append(s[2] for s in iterators[2])\n iterators = out\n return iterators", "def longest_grouper(iterable: Iterable, group_size: int, fillvalue=None):\n\t#\n\targs = [iter(iterable)] * group_size\n\treturn zip_longest(*args, fillvalue=fillvalue)", "def group_by_count(iterable: List[Any], count: int, default_value: Any) -> List[List[Any]]:\n return [list(l) for l in zip_longest(*[iter(iterable)] * count, fillvalue=default_value)]", "def generate(group, number, n):\n return [get_group(group, number) for i in xrange(n)]", "def just2(n, seq):\n for inner_seq in seq:\n yield tuple(just(n, inner_seq))", "def batch(\n iterable: Iterable[_T], n: int\n) -> Generator[tuple[_T, ...], None, None]:\n iterator = iter(iterable)\n while True:\n try:\n # Unnecessary list here, but a generator won't raise StopIteration,\n # instead it will raise RuntimeError: \"generator raises StopIteration\".\n # I'd rather have a list comprehension in place of a generator expression\n # than catch RuntimeError and have to inspect the payload to verify it's\n # the one I want to be catching.\n yield tuple([next(iterator) for _ in range(n)])\n except StopIteration:\n return", "def iterslices(iterable, n, pad_last=False, pad_value=None):\n current = []\n for a in iterable:\n current.append(a)\n if len(current) == n:\n yield current\n current = []\n if current:\n if pad_last:\n current += [pad_value] * (n-len(current))\n yield current", "def n_wise(x: List[Any], size: Optional[int] = 2) -> Iterable:\n\n iterator = iter(x)\n\n return iter(lambda: tuple(islice(iterator, size)), ())", "def chunk(it, size):\n\tit = iter(it)\n\treturn iter(lambda: tuple(islice(it, size)), ())", "def iwindow(seq, n):\n it = iter(seq)\n result = tuple(islice(it, n))\n\n if len(result) == n:\n yield result\n\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def list_split(iterable, n, reverse=False):\n\n if reverse:\n extra = len(iterable) % n\n if extra:\n yield tuple(iterable[0:extra])\n\n iterable = iterable[extra:]\n else:\n extra = 0\n\n iterable = iter(iterable)\n\n res = tuple(islice(iterable, n))\n while len(res) != 0:\n yield res\n res = tuple(islice(iterable, n))", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield (i+n, l[i:i+n] )", "def chunk(iter_list, size):\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def split_every(n, iterable):\r\n iterator = iter(iterable)\r\n return takewhile(bool, (list(islice(iterator, n)) for _ in repeat(None)))", "def window(seq, n):\n seq_it = iter(seq)\n result = tuple(it.islice(seq_it, n))\n if len(result) == n:\n yield result \n for elem in seq_it:\n result = result[1:] + (elem,)\n yield result", "def chunks(l, n):\r\n for i in xrange(0, len(l), n):\r\n yield l[i+1:i+n+1:2]", "def chunks(item_list, n_items):\n for i in range(0, len(item_list), n_items):\n yield item_list[i : i + n_items]", "def group(seq):\n pass # replace with your solution", "def chunk(seq, size, groupByList=True):\n func = tuple\n if groupByList:\n func = list\n return [func(seq[i:i + size]) for i in range(0, len(seq), size)]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def subgroup(gen, idx, stop):\n id = 0\n for item in gen:\n if stop:\n if id >= idx and id < stop:\n yield item\n else:\n if id >= idx:\n yield item\n id += 1", "def generate_ngrams(iterable, n):\n return zip(*[itertools.islice(it, i, None) for i, it in enumerate(itertools.tee(iterable, n))])", "def chunks(alist, n):\n for i in range(0, len(alist), n):\n yield alist[i:i + n]", "def chunks(l, n):\n\to = []\n\tfor i in xrange(0, len(l), n):\n\t\to.append(tuple(l[i:i+n]))\n\treturn o", "def chunks(data: List[Any], num: int) -> Generator[List[Any], None, None]:\n for i in range(0, len(data), num):\n yield data[i : i + num]", "def chunks(l: list, n: int):\n if n < 1:\n raise AttributeError('n = %s' % n)\n n = max(1, n)\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(lst, n):\r\n for i in range(0, len(lst), n):\r\n yield lst[i:i + n]", "def yield2(l):\n\n l = list(l)\n\n for x in range(0,len(l),2):\n try:\n yield [l[x],l[x+1]]\n except IndexError:\n yield [l[x],None]", "def _chunks(l, n):\n\tfor i in range(0, len(l), n):\n\t\tyield l[i:i + n]", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]", "def chunks(lst: list, n: int):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, n):\n yield l[i::n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(lst, n):\n\tfor i in range(0, len(lst), n):\n\t\tyield lst[i:i + n]", "def chunks(lst, n):\n\tfor i in range(0, len(lst), n):\n\t\tyield lst[i:i + n]", "def chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]", "def chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]" ]
[ "0.80415297", "0.8012354", "0.7891814", "0.78612876", "0.7830936", "0.7768379", "0.77457225", "0.774194", "0.774194", "0.7739891", "0.7731009", "0.7731009", "0.7730615", "0.7714614", "0.7697723", "0.76915014", "0.76890963", "0.76890963", "0.76738936", "0.76717544", "0.7668773", "0.7640462", "0.76323277", "0.7625165", "0.756429", "0.7530986", "0.7518038", "0.7477758", "0.74739355", "0.7471594", "0.745669", "0.7390306", "0.7368393", "0.71268445", "0.69917357", "0.692234", "0.6916979", "0.6844155", "0.67948467", "0.6778596", "0.6776709", "0.6753387", "0.6738954", "0.6658853", "0.6620068", "0.6618738", "0.65951705", "0.65838367", "0.6540388", "0.65177745", "0.65144175", "0.6513752", "0.63305235", "0.62515795", "0.6249835", "0.6239074", "0.6207055", "0.62025064", "0.61723465", "0.616668", "0.6012258", "0.59905255", "0.59621763", "0.59319204", "0.5917413", "0.59077865", "0.5884103", "0.5872136", "0.58627576", "0.5860954", "0.5860954", "0.58354306", "0.58174664", "0.58138704", "0.58118784", "0.5806169", "0.57994634", "0.5792015", "0.57884693", "0.57681125", "0.57644904", "0.576065", "0.57562506", "0.57521594", "0.5749267", "0.57431996", "0.5742676", "0.5739649", "0.5735297", "0.57280993", "0.5721852", "0.57214546", "0.57190025", "0.571777", "0.57161117", "0.57058084", "0.570417", "0.570417", "0.5701457", "0.5701457" ]
0.71935266
33
Defines the initial bounds and labels for the plotter.
def set_up_plotter(self, n_levels: int, param_labels: List[str]): self.ax.set_ylim(0, n_levels) self.ax.set_zlim(0, 5) self.ax.set_xlim(0, 1) self.ax.invert_xaxis() self.ax.set_zlabel(param_labels[0], labelpad=5) self.ax.set_ylabel("Optimization level", labelpad=10) self.ax.set_xlabel(param_labels[1], labelpad=10) self.fig.show() self.fig.canvas.draw()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initWidgets(self):\n self.loctext.setText(\"{0:g}\".format(self.loc))\n self.scaletext.setText(\"{0:g}\".format(self.scale))", "def set_up(self):\n self.h, = self.ax.plot(self.x, lw=2)\n self.ax.set_ylim(0,100)\n self.ax.set_xlim(0,100)\n self.ax.title.set_text(self.config[\"title\"])\n self.ax.set_xlabel(self.config[\"x_label\"])\n self.ax.set_ylabel(self.config[\"y_label\"])", "def set_range(self, **rangekwargs):\n\n if 'xrange' in rangekwargs.keys(): \n xrange = rangekwargs['xrange']\n else: \n xrange = [-50.0, 50.0] # (default)\n\n if 'yrange' in rangekwargs.keys(): \n yrange = rangekwargs['yrange']\n else: \n yrange = [0.0, 1.25 * self.hist_max]\n\n self.sub.set_xlim(xrange) \n self.sub.set_ylim(yrange) \n\n self.sub.set_xlabel(r\"$\\mathtt{d_{LOS}}$ (Mpc/h)\", fontsize=20)\n\n return None", "def updateLabels(self):\n # Intensity range\n self.minIntensityLabel.setText(\"Intensity: \"+str(self.ABsettings[\"intensity_range\"][0]).rjust(3))\n self.labelMaxInt.setText(str(self.ABsettings[\"intensity_range\"][1]).ljust(3))\n # Z range\n self.minZLabel.setText(\"Z range: \"+str(self.ABsettings[\"zrange\"][0]+1).rjust(2))\n self.labelMaxZ.setText(str(self.ABsettings[\"zrange\"][1]+1).ljust(2))", "def _plot_setup(self, fig, ax):\n\n self._check_data_valid()\n\n if ax:\n self.fig = fig\n self.ax = ax\n else:\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection=self.wcs)\n\n # Set basic figure display options\n if self.options.get('grid', True):\n self.ax.coords.grid(color='white', alpha=0.5)\n\n if self.options.get('title', True):\n title = self.options.get('title', self.surveyname)\n self.ax.set_title(title, fontdict={'fontsize': 20, 'fontweight': 10})\n\n self.set_xlabel('RA (J2000)')\n self.set_ylabel('Dec (J2000)')\n\n # Set compact or extended label / tick configuration\n if self.options.get('compact', False):\n tickcolor = 'k' if np.nanmax(np.abs(self.data)) == np.nanmax(self.data) else 'gray'\n\n lon = self.ax.coords[0]\n lat = self.ax.coords[1]\n\n lon.display_minor_ticks(True)\n lat.display_minor_ticks(True)\n\n lon.set_ticks(number=5)\n lat.set_ticks(number=5)\n\n self.ax.tick_params(axis='both', direction='in', length=5, color=tickcolor)\n self.padlevel = self.options.get('ylabelpad', 5)\n\n # Set colourmap normalisation\n self.norm = self._get_cmap_normalisation()", "def hogg_lim_and_label():\n plt.xlim(-20., 1020.)\n plt.xlabel(\"time (d)\")\n plt.ylim(-20., 20.)\n plt.ylabel(\"radial velocity (m\\,s$^{-1}$)\")\n return None", "def setBounds_0(self, bounds):\n self.setBounds(bounds.getX(), bounds.getY(), bounds.getWidth(), bounds.getHeight())", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus1, self.locus2], \\\n 'elements': [self.pop._get_axis_elements(self.locus1), self.pop._get_axis_elements(self.locus2)] }\n self._init_labels(self.labels)", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus1, self.locus2, self.locus3], \\\n 'elements': [self.pop._get_axis_elements(self.locus1), self.pop._get_axis_elements(self.locus2), self.pop._get_axis_elements(self.locus3)] }\n self._init_labels(self.labels)", "def initialize_plot(self, ranges=None):\n raise NotImplementedError", "def _plot_init(self):\n pass", "def _plot_init(self):\n pass", "def setupPlotVariables(self):\n\n ### Borrowed from Thomas' plot routines\n self.plotLabels = [r'$m_1$', r'$m_2$', r'eccentricity', \\\n r'period (days)', \\\n r'inclination (rad)',r'$\\omega$ (rad)',r'$t_0$',r'$\\alpha$ (rad)']\n\n ### Change these to update the plot ranges for each\n ### parameter. \n angOut = np.pi+0.3\n self.plotLimsLo = [1.0, -1.0, -0.2, -1.0, -angOut, -angOut, -10,0]\n self.plotLimsHi = [2.2, 10.0, 1.2, 35.0, angOut, angOut, 10,1.2]\n\n ### We specify the method for the uniformly-spaced grid. If we\n ### want to make one of these logspace (say) we just change\n ### the method identified in the appropriate place in the\n ### list.\n nMeth = len(self.plotLimsLo)\n self.plotSpacerMethods = [np.linspace for i in range(nMeth)]\n\n self.plotNfine = 1000 ### number of fine points to use\n self.plotNcols = 3 ### number of columns in the plot\n\n self.plotNrows = int(np.ceil(nMeth/float(self.plotNcols)) )", "def __createLimits(self):\r\n self.lowerXLabel = QLabel(\"lower limits of (x)\")\r\n self.lowerXField = QLineEdit(self)\r\n self.lowerXField.setPlaceholderText(\"-10\")\r\n\r\n self.upperXLabel = QLabel(\"upper limits of (x)\")\r\n self.upperXField = QLineEdit(self)\r\n self.upperXField.setPlaceholderText(\"10\")", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus], \\\n 'elements': [self.pop._get_axis_elements(self.locus)] }\n self._init_labels(self.labels)", "def __init__( self, x1, y1, x2 = 1.1, y2 = 1.1, halign = \"fixed\", valign = \"fixed\", font=42, textSize = None ):\n if x2 == 1.1 and halign == \"fixed\": halign = \"left\"\n if y2 == 1.1 and valign == \"fixed\": valign = \"top\"\n self.halign = halign\n self.valign = valign\n ROOT.TLegend.__init__( self, x1, y1, x2, y2 )\n self.SetTextFont( font )\n if textSize: self.SetTextSize( textSize )", "def __init__(self):\n\n fig_width_pt = 800.0 \n pylab.rcParams.update(plot_params)", "def set_figure_variables(self):\n #self.fig.canvas.manager.full_screen_toggle()\n self.gs = self.fig.add_gridspec(2, 3)\n self.ax1 = self.fig.add_subplot(self.gs[0, 0])\n self.ax2 = self.fig.add_subplot(self.gs[0, 1])\n self.ax3 = self.fig.add_subplot(self.gs[0, 2])\n self.ax4 = self.fig.add_subplot(self.gs[1, 0])\n self.ax5 = self.fig.add_subplot(self.gs[1, 1])\n self.ax6 = self.fig.add_subplot(self.gs[1, 2])\n # histogram with indicator scoring\n self.ax1.set_xlabel(\"indicators\")\n self.ax1.set_ylabel(\"score (%)\")\n # graph with flood safety levels\n self.ax2.set_xlabel(\"dike section\")\n self.ax2.set_ylabel(\"chance of flooding occurrence\")\n # graph with water levels vs dike height\n self.ax3.set_xlabel(\"river length (meters)\")\n self.ax3.set_ylabel(\"height (meters)\")\n # graph with overall costs made\n self.ax6.set_ylabel(\"million Euros\")\n \n self.ax1.set_ylim([0, 100])\n self.ax2.set_ylim([0, 100])\n self.ax3.set_ylim([14, 18])\n self.ax6.set_ylim([0, 25000000])\n \n self.ax1.set_title(\"Overall score on indicators\")\n self.ax2.set_title(\"Flood safety levels\")\n self.ax3.set_title(\"Normative water levels vs dike crest height\")\n self.ax6.set_title(\"Budget spent\")\n \n self.x_pos = np.arange(len(self.indicators))\n self.ax1.set_xticks(self.x_pos)\n self.ax1.set_xticklabels(self.indicators)\n \n flood_safety_levels = [100, 200, 400, 600, 800, 1000, 1250]\n self.ax2.set_yticks(flood_safety_levels)\n self.ax2.set_yticklabels([\"1/\"+str(value) for value in flood_safety_levels])\n \n self.plot1 = None\n self.plot2 = None\n self.plot3 = None\n self.plot4 = None\n self.plot5 = None\n self.plot6 = None\n return", "def set_labels(self):\n\n if 1 <= self.selected_data <= 2:\n self.plot_select.setLabel(\"left\", \"P (kPa)\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"P (kPa)\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 3:\n self.plot_select.setLabel(\"left\", \"ext\", \"\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"ext\", \"\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 4:\n self.plot_select.setLabel(\"left\", \"U\", \"V\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"U\", \"V\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n # self.plot_simulate.setLabel(\"left\", \"ext\", \"\")\n # self.plot_simulate.setLabel(\"bottom\", \"t\", \"s\")\n\n self.plot_distribution.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_distribution.setLabel(\"bottom\", \"d_p\", \"m\")\n self.plot_distribution.showGrid(y=True)\n\n self.plot_rotatometer.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_rotatometer.setLabel(\"bottom\", \"laimennusvirtaus\")\n self.plot_rotatometer.showGrid(y=True)", "def plot_setup(labels=['X', 'Y'], fsize=18,\r\n setlimits=False, limits=[0,1,0,1],\r\n title='', legend=False,\r\n save=False, filename='plot.jpg', dpi=200):\r\n plt.xlabel(str(labels[0]), fontsize=fsize)\r\n plt.ylabel(str(labels[1]), fontsize=fsize)\r\n plt.title(title, fontsize=fsize)\r\n fig = plt.gcf()\r\n fig.set_size_inches(6, 4)\r\n if legend:\r\n plt.legend(fontsize=fsize-4)\r\n if setlimits:\r\n plt.xlim((limits[0], limits[1]))\r\n plt.ylim((limits[2], limits[3]))\r\n if save:\r\n fig.savefig(filename, dpi=dpi, bbox_inches='tight')\r\n #plt.tight_layout()\r", "def setUp(self):\n premask = np.array([[0.0, 3.0, 2.0], [0.5, 0.0, 1.5], [0.2, 0.0, 0]])\n self.mask = np.ma.masked_where(premask > 1.0, premask)\n\n self.x_coord = DimCoord([1, 2, 3], long_name=\"longitude\")\n self.y_coord = DimCoord([1, 2, 3], long_name=\"latitude\")\n self.coords = [self.x_coord, self.y_coord]\n self.upper = 100.0\n self.lower = 0.0\n self.units = \"m\"", "def __init__(self):\n self.pt = Plotter(2, width=self.width, height=self.height)\n self.pt.use_grid()\n self.pt.set_title(\n \"Exponentials plotted from {:.1f} to {:.1f}\", self.xMin, self.xMax)\n self.pt.set_xlabel(\"X\")\n self.pt.set_ylabel(\"a*exp(-b*X)\")", "def __init__(self):\n self.label = \"AlternateUnitBounds_Processing\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [FindMissingBounds, FindUpdatedIMDParks]", "def draw_bounds():\n\n pass", "def __executeActions(self):\n if 'labelFormat' not in self.options:\n if self.dim == 2:\n self.ax.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())\n self.ax.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())\n self.ax.ticklabel_format(**{'style': 'sci', 'scilimits': (0,1), 'useOffset': False, 'axis': 'both'})\n if 'title' not in self.options:\n self.ax.set_title(self.name, fontdict={'verticalalignment': 'baseline', 'horizontalalignment': 'center'})\n for key in self.options:\n if key in ['how', 'plotSettings', 'figureProperties', 'colorbar']:\n pass\n elif key == 'range':\n if 'ymin' in self.options[key]:\n self.ax.set_ylim(bottom=ast.literal_eval(self.options[key]['ymin']))\n if 'ymax' in self.options[key]:\n self.ax.set_ylim(top=ast.literal_eval(self.options[key]['ymax']))\n if 'xmin' in self.options[key]:\n self.ax.set_xlim(left=ast.literal_eval(self.options[key]['xmin']))\n if 'xmax' in self.options[key]:\n self.ax.set_xlim(right=ast.literal_eval(self.options[key]['xmax']))\n if self.dim == 3:\n if 'zmin' in self.options[key]:\n self.ax.set_zlim(bottom=ast.literal_eval(self.options[key]['zmin']))\n if 'zmax' in self.options[key]:\n self.ax.set_zlim(top=ast.literal_eval(self.options[key]['zmax']))\n elif key == 'labelFormat':\n if 'style' not in self.options[key]:\n self.options[key]['style'] = 'sci'\n if 'limits' not in self.options[key]:\n self.options[key]['limits'] = '(0,0)'\n if 'useOffset' not in self.options[key]:\n self.options[key]['useOffset'] = 'False'\n if 'axis' not in self.options[key]:\n self.options[key]['axis'] = 'both'\n self.ax.ticklabel_format(**{'style': self.options[key]['style'],\n 'scilimits': ast.literal_eval(self.options[key]['limits']),\n 'useOffset': ast.literal_eval(self.options[key]['useOffset']),\n 'axis': self.options[key]['axis']})\n elif key == 'camera':\n if self.dim == 2:\n self.raiseAWarning('2D plots have not a camera attribute... They are 2D!!!!')\n else:\n if 'elevation' in self.options[key] and 'azimuth' in self.options[key]:\n self.ax.view_init(elev=float(self.options[key]['elevation']), azim=float(self.options[key]['azimuth']))\n elif 'elevation' in self.options[key] and 'azimuth' not in self.options[key]:\n self.ax.view_init(elev=float(self.options[key]['elevation']), azim=None)\n elif 'elevation' not in self.options[key] and 'azimuth' in self.options[key]:\n self.ax.view_init(elev=None, azim=float(self.options[key]['azimuth']))\n elif key == 'title':\n self.ax.set_title(self.options[key]['text'], **self.options[key].get('attributes', {}))\n elif key == 'scale':\n major, minor = [int(x) for x in matplotlib.__version__.split('.')[:2]]\n #matplotlib before 3.5 used nonpos instead of nonpositive\n useNonpos = (major == 3 and minor < 5)\n if 'xscale' in self.options[key]:\n if useNonpos:\n self.ax.set_xscale(self.options[key]['xscale'], nonposx='clip')\n elif self.options[key]['xscale'].lower() == 'log':\n self.ax.set_xscale(self.options[key]['xscale'], nonpositive='clip')\n else:\n self.ax.set_xscale(self.options[key]['xscale'])\n if 'yscale' in self.options[key]:\n if useNonpos:\n self.ax.set_yscale(self.options[key]['yscale'], nonposy='clip')\n elif self.options[key]['yscale'].lower() == 'log':\n self.ax.set_yscale(self.options[key]['yscale'], nonpositive='clip')\n else:\n self.ax.set_yscale(self.options[key]['yscale'])\n if self.dim == 3:\n if 'zscale' in self.options[key]:\n self.ax.set_zscale(self.options[key]['zscale'])\n elif key == 'addText':\n if 'position' not in self.options[key]:\n if self.dim == 2:\n self.options[key]['position'] = '0.0,0.0'\n else:\n self.options[key]['position'] = '0.0,0.0,0.0'\n if 'withdash' not in self.options[key]:\n self.options[key]['withdash'] = 'False'\n if 'fontdict' not in self.options[key]:\n self.options[key]['fontdict'] = 'None'\n else:\n try:\n tempVar = ast.literal_eval(self.options[key]['fontdict'])\n self.options[key]['fontdict'] = str(tempVar)\n except AttributeError:\n self.raiseAnError(TypeError, f'In {key} tag: can not convert the string \"{self.options[key][\"fontdict\"]}\" to a dictionary! Check syntax for python function ast.literal_eval')\n if self.dim == 2 :\n self.ax.text(float(self.options[key]['position'].split(',')[0]),\n float(self.options[key]['position'].split(',')[1]),\n self.options[key]['text'],\n fontdict=ast.literal_eval(self.options[key]['fontdict']),\n **self.options[key].get('attributes', {}))\n else:\n self.ax.text(float(self.options[key]['position'].split(',')[0]),\n float(self.options[key]['position'].split(',')[1]),\n float(self.options[key]['position'].split(',')[2]),\n self.options[key]['text'],\n fontdict=ast.literal_eval(self.options[key]['fontdict']),\n **self.options[key].get('attributes', {}))\n elif key == 'autoscale':\n if 'enable' not in self.options[key]:\n self.options[key]['enable'] = 'True'\n elif utils.stringIsTrue(self.options[key]['enable']):\n self.options[key]['enable'] = 'True'\n elif utils.stringIsFalse(self.options[key]['enable']):\n self.options[key]['enable'] = 'False'\n if 'axis' not in self.options[key]:\n self.options[key]['axis'] = 'both'\n if 'tight' not in self.options[key]:\n self.options[key]['tight'] = 'None'\n self.ax.autoscale(enable=ast.literal_eval(self.options[key]['enable']),\n axis=self.options[key]['axis'],\n tight=ast.literal_eval(self.options[key]['tight']))\n elif key == 'horizontalLine':\n if self.dim == 3:\n self.raiseAWarning('horizontalLine not available in 3-D plots!!')\n else:\n if 'y' not in self.options[key]:\n self.options[key]['y'] = '0'\n if 'xmin' not in self.options[key]:\n self.options[key]['xmin'] = '0'\n if 'xmax' not in self.options[key]:\n self.options[key]['xmax'] = '1'\n if 'hold' not in self.options[key]:\n self.options[key]['hold'] = 'None'\n self.ax.axhline(y=ast.literal_eval(self.options[key]['y']),\n xmin=ast.literal_eval(self.options[key]['xmin']),\n xmax=ast.literal_eval(self.options[key]['xmax']),\n hold=ast.literal_eval(self.options[key]['hold']),\n **self.options[key].get('attributes', {}))\n elif key == 'verticalLine':\n if self.dim == 3:\n self.raiseAWarning('verticalLine not available in 3-D plots!!')\n else:\n if 'x' not in self.options[key]:\n self.options[key]['x'] = '0'\n if 'ymin' not in self.options[key]:\n self.options[key]['ymin'] = '0'\n if 'ymax' not in self.options[key]:\n self.options[key]['ymax'] = '1'\n if 'hold' not in self.options[key]:\n self.options[key]['hold'] = 'None'\n self.ax.axvline(x=ast.literal_eval(self.options[key]['x']),\n ymin=ast.literal_eval(self.options[key]['ymin']),\n ymax=ast.literal_eval(self.options[key]['ymax']),\n hold=ast.literal_eval(self.options[key]['hold']),\n **self.options[key].get('attributes', {}))\n elif key == 'horizontalRectangle':\n if self.dim == 3:\n self.raiseAWarning('horizontalRectangle not available in 3-D plots!!')\n else:\n if 'ymin' not in self.options[key]:\n self.raiseAnError(IOError, 'ymin parameter is needed for function horizontalRectangle!!')\n if 'ymax' not in self.options[key]:\n self.raiseAnError(IOError, 'ymax parameter is needed for function horizontalRectangle!!')\n if 'xmin' not in self.options[key]:\n self.options[key]['xmin'] = '0'\n if 'xmax' not in self.options[key]:\n self.options[key]['xmax'] = '1'\n self.ax.axhspan(ast.literal_eval(self.options[key]['ymin']),\n ast.literal_eval(self.options[key]['ymax']),\n xmin=ast.literal_eval(self.options[key]['xmin']),\n xmax=ast.literal_eval(self.options[key]['xmax']),\n **self.options[key].get('attributes', {}))\n elif key == 'verticalRectangle':\n if self.dim == 3:\n self.raiseAWarning('vertical_rectangle not available in 3-D plots!!')\n else:\n if 'xmin' not in self.options[key]:\n self.raiseAnError(IOError, 'xmin parameter is needed for function verticalRectangle!!')\n if 'xmax' not in self.options[key]:\n self.raiseAnError(IOError, 'xmax parameter is needed for function verticalRectangle!!')\n if 'ymin' not in self.options[key]:\n self.options[key]['ymin'] = '0'\n if 'ymax' not in self.options[key]:\n self.options[key]['ymax'] = '1'\n self.ax.axvspan(ast.literal_eval(self.options[key]['xmin']),\n ast.literal_eval(self.options[key]['xmax']),\n ymin=ast.literal_eval(self.options[key]['ymin']),\n ymax=ast.literal_eval(self.options[key]['ymax']),\n **self.options[key].get('attributes', {}))\n elif key == 'axesBox':\n if self.dim == 3:\n self.raiseAWarning('axesBox not available in 3-D plots!!')\n else:\n self.ax.set_frame_on(self.options[key][key])\n elif key == 'axis':\n self.ax.axis(self.options[key][key])\n elif key == 'grid':\n if 'b' not in self.options[key]:\n self.options[key]['b'] = 'off'\n if utils.stringIsTrue(self.options[key]['b']):\n self.options[key]['b'] = 'on'\n elif utils.stringIsFalse(self.options[key]['b']):\n self.options[key]['b'] = 'off'\n if 'which' not in self.options[key]:\n self.options[key]['which'] = 'major'\n if 'axis' not in self.options[key]:\n self.options[key]['axis'] = 'both'\n if self.dim == 2:\n self.ax.grid(b=self.options[key]['b'],\n which=self.options[key]['which'],\n axis=self.options[key]['axis'],\n **self.options[key].get('attributes', {}))\n else:\n self.ax.grid(b=self.options[key]['b'], **self.options[key].get('attributes', {}))\n else:\n self.raiseAWarning(f'Attempting to perform action {key}. If this does not work, check manual and relevant matplotlib method specification.')\n kwargs = {}\n for taco in self.options[key]:\n if taco != 'attributes' and taco != key:\n try:\n kwargs[taco] = ast.literal_eval(self.options[key][taco])\n except ValueError:\n kwargs[taco] = self.options[key][taco]\n try:\n customFunctionCall = getattr(self.ax, key)\n self.actPlot = customFunctionCall(**kwargs)\n except AttributeError as ae:\n self.raiseAnError(RuntimeError, f'<{str(ae)}> -> in executing custom action \"{key}\" in Plot {self.name}.\\n {self.printTag} command has been called in the following way: ax.{key}(**{str(kwargs)})')", "def __init__(self):\n super(GraphVisualizerPointDraw, self).__init__()\n\n self.setMinimumSize(QSize(13, 13))\n self.setMaximumSize(QSize(13, 13))", "def _setFig(self):\n self.p.background_fill_color = grey['light']\n self.p.xgrid.grid_line_color = None\n self.p.ygrid.grid_line_color = None\n self.p.ygrid.grid_line_dash = 'dotted'\n self.p.ygrid.grid_line_dash = 'dotted'\n\n self.p.xgrid.minor_grid_line_color = grey['median']\n self.p.ygrid.minor_grid_line_color = grey['median']\n self.p.xgrid.minor_grid_line_dash = 'dotted'\n self.p.ygrid.minor_grid_line_dash = 'dotted'\n\n self.p.xaxis.axis_label = \"tsne_feature_0\"\n self.p.yaxis.axis_label = \"tsne_feature_1\"", "def __init__(self):\n import matplotlib.pyplot as plt\n\n\n SMALL_SIZE = 12\n MEDIUM_SIZE = 14\n BIGGER_SIZE = 16\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title", "def initScale(self):\n\t\t# Note name labels\n\t\tnumLabels = float(len(Pitch.noteNames))\n\t\tself.labelCoords = []\n\t\tfor i in xrange(int(numLabels)):\n\t\t\tsectionWidth = (self.width - self.margin) / numLabels \n\t\t\t# Label position\n\t\t\tlabelCx = self.margin + i* sectionWidth\n\t\t\tlabelCy = self.height * 0.7\n\t\t\tnoteNames = Pitch.noteNames[1:] + [Pitch.noteNames[0]]\n\t\t\tnoteName = noteNames[i]\n\t\t\t\n\t\t\t# Store calculated label locations for scale drawing purposes\n\t\t\tself.labelCoords.append((labelCx, labelCy, noteName))\t\t\t\n\n\t\t\t# Create label\n\t\t\tlabel = self.createText( labelCx, labelCy, None, self.labelFont)\n\t\t\tlabel.text = noteName\n\t\t\tlabel.anchor = W\n\t\t\t\n\t\t\t# Scale position\n\t\t\tbarTop = self.height * 0.33\n\t\t\tbarLeft = labelCx - self.width/36 \n\t\t\tbarRight = barLeft + (self.width-self.margin)*1.0 / numLabels\n\t\t\tbarBottom = self.height * 0.6\n\t\t\tself.scaleNoteWidth = (barRight - barLeft)\n\t\t\t\n\t\t\t# Create scale bar\n\t\t\tbarRect = self.createRectangle(barLeft, barTop, barRight, barBottom)\n\t\t\tbarRect.fill = self.barColor[ (i % 2 == 1) ]\n\t\t\tbarRect.lineWidth = 0\n\n\t\t\t# Draw ticks\n\t\t\tfor step in range(self.scaleSubSections):\n\t\t\t\tbarDiv = ( (1.0*barRight-barLeft) / self.scaleSubSections)\n\t\t\t\tlineX = barLeft + barDiv * step\n\t\t\t\tline = self.createLine(lineX, barTop, lineX, barBottom )\n\t\t\t\tline.fill = Color(255,255,255)\n\t\t\t\ttopTicks = self.createLine(lineX, barTop-10, lineX, barTop)\t\n\t\t\t\tbottomTicks = self.createLine(lineX, barBottom, lineX, barBottom+10)\t\n\t\t\t\ttopTicks.fill, bottomTicks.fill = Color(200, 200, 200), Color(200, 200, 200)\n\t\t\t\t\n\t\t\t\tif ( step % 2 == 0 ):\n\t\t\t\t\tcentsPerTick = 200 / self.scaleSubSections # 200 cents per step\n\t\t\t\t\tcentMultiplier = step - 4 # middle = in tune = 0 cents\n\t\t\t\t\tcentLabel = ''.join([c + \"\\n\" for c in str(centsPerTick * centMultiplier)])\n\t\t\t\t\tcent = self.createText(lineX, barBottom+30, None, font=self.centFont)\n\t\t\t\t\tcent.text = centLabel\n\t\t\t\tif ( step == self.scaleSubSections/2 ):\n\t\t\t\t\tline.width = barDiv / 2 \n\t\t\t\t\tline.fill = barRect.fill * 0.8", "def plot_roi_bounds(bounds,color='w',label=False):\n X1,X2,Y1,Y2=bounds\n plt.plot([X1,X2,X2,X1,X1],[Y1,Y1,Y2,Y2,Y1],'-',color=color)\n if label:\n plt.text(X1,Y1-3,label,verticalalignment='bottom',color=color,\n backgroundcolor=(0,0,0,.5))\n plt.margins(0,0)", "def test_legend_default_position():\n\n fig = Figure()\n\n fig.basemap(region=[-1, 1, -1, 1], frame=True)\n\n fig.plot(x=[0], y=[0], style=\"p10p\", label=\"Default\")\n fig.legend()\n\n return fig", "def _plotDisplay(self):\n self.gc.tick_labels.set_xformat('ddd')\n self.gc.tick_labels.set_yformat('ddd')\n if self.csys == 'GAL':\n if self.xlabel is None: self.xlabel = r'Galactic longitude $l$ $(^{\\circ})$'\n if self.ylabel is None: self.ylabel = r'Galactic latitude $b$ $(^{\\circ})$'\n else:\n if self.xlabel is None: self.xlabel = r'RA (J2000)'\n if self.ylabel is None: self.ylabel = r'Dec (J2000)'\n self.gc.axis_labels.set_xtext(self.xlabel)\n self.gc.axis_labels.set_ytext(self.ylabel)\n self.gc.set_axis_labels_font(size=self.ftsize1)\n self.gc.tick_labels.set_font(size=self.ftsize2) # <====== perhaps a string here?\n self.gc.ticks.set_color('black')", "def __init__(self, refstd, fig=None, rect=111, label='_', srange=(0, 1.5)):\n\n from matplotlib.projections import PolarAxes\n import mpl_toolkits.axisartist.floating_axes as FA\n import mpl_toolkits.axisartist.grid_finder as GF\n\n self.refstd = refstd # Reference standard deviation\n\n tr = PolarAxes.PolarTransform()\n\n # Correlation labels\n rlocs = NP.concatenate((NP.arange(10)/10., [0.95, 0.99]))\n tlocs = NP.arccos(rlocs) # Conversion to polar angles\n gl1 = GF.FixedLocator(tlocs) # Positions\n tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))\n\n # Standard deviation axis extent (in units of reference stddev)\n self.smin = srange[0]*self.refstd\n self.smax = srange[1]*self.refstd\n\n ghelper = FA.GridHelperCurveLinear(tr,\n extremes=(0, NP.pi, # 1st quadrant\n self.smin, self.smax),\n grid_locator1=gl1,\n tick_formatter1=tf1)\n\n if fig is None:\n fig = PLT.figure()\n\n ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)\n fig.add_subplot(ax)\n\n # Adjust axes\n ax.axis[\"top\"].set_axis_direction(\"bottom\") # \"Angle axis\"\n ax.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_text(\"Correlation\")\n\n ax.axis[\"left\"].set_axis_direction(\"bottom\") # \"X axis\"\n ax.axis[\"left\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"right\"].set_axis_direction(\"top\") # \"Y axis\"\n ax.axis[\"right\"].toggle(ticklabels=True)\n ax.axis[\"right\"].major_ticklabels.set_axis_direction(\"left\")\n #ax.axis[\"right\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"bottom\"].set_visible(False) # Useless\n\n self._ax = ax # Graphical axes\n self.ax = ax.get_aux_axes(tr) # Polar coordinates\n\n # Add reference point and stddev contour\n l, = self.ax.plot([0], self.refstd, 'k*',\n ls='', ms=10, label=label)\n t = NP.linspace(0, NP.pi/2)\n r = NP.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n\n # Collect sample points for latter use (e.g. legend)\n self.samplePoints = [l]", "def show_bounds(\n self,\n mesh=None,\n bounds=None,\n axes_ranges=None,\n show_xaxis=True,\n show_yaxis=True,\n show_zaxis=True,\n show_xlabels=True,\n show_ylabels=True,\n show_zlabels=True,\n bold=True,\n font_size=None,\n font_family=None,\n color=None,\n xtitle='X Axis',\n ytitle='Y Axis',\n ztitle='Z Axis',\n n_xlabels=5,\n n_ylabels=5,\n n_zlabels=5,\n use_2d=False,\n grid=None,\n location='closest',\n ticks=None,\n all_edges=False,\n corner_factor=0.5,\n fmt=None,\n minor_ticks=False,\n padding=0.0,\n use_3d_text=True,\n render=None,\n **kwargs,\n ):\n self.remove_bounds_axes()\n\n if font_family is None:\n font_family = self._theme.font.family\n if font_size is None:\n font_size = self._theme.font.size\n if fmt is None:\n fmt = self._theme.font.fmt\n if fmt is None:\n fmt = '%.1f' # fallback\n\n if 'xlabel' in kwargs: # pragma: no cover\n xtitle = kwargs.pop('xlabel')\n warnings.warn(\n \"`xlabel` is deprecated. Use `xtitle` instead.\",\n PyVistaDeprecationWarning,\n )\n if 'ylabel' in kwargs: # pragma: no cover\n ytitle = kwargs.pop('ylabel')\n warnings.warn(\n \"`ylabel` is deprecated. Use `ytitle` instead.\",\n PyVistaDeprecationWarning,\n )\n if 'zlabel' in kwargs: # pragma: no cover\n ztitle = kwargs.pop('zlabel')\n warnings.warn(\n \"`zlabel` is deprecated. Use `ztitle` instead.\",\n PyVistaDeprecationWarning,\n )\n assert_empty_kwargs(**kwargs)\n\n color = Color(color, default_color=self._theme.font.color)\n\n if mesh is None and bounds is None:\n # Use the bounds of all data in the rendering window\n bounds = np.array(self.bounds)\n elif bounds is None:\n # otherwise, use the bounds of the mesh (if available)\n bounds = np.array(mesh.bounds)\n else:\n bounds = np.asanyarray(bounds, dtype=float)\n\n # create actor\n cube_axes_actor = pyvista.CubeAxesActor(\n self.camera,\n minor_ticks=minor_ticks,\n tick_location=ticks,\n x_title=xtitle,\n y_title=ytitle,\n z_title=ztitle,\n x_axis_visibility=show_xaxis,\n y_axis_visibility=show_yaxis,\n z_axis_visibility=show_zaxis,\n x_label_format=fmt,\n y_label_format=fmt,\n z_label_format=fmt,\n x_label_visibility=show_xlabels,\n y_label_visibility=show_ylabels,\n z_label_visibility=show_zlabels,\n n_xlabels=n_xlabels,\n n_ylabels=n_ylabels,\n n_zlabels=n_zlabels,\n )\n\n cube_axes_actor.use_2d_mode = use_2d or not np.allclose(self.scale, [1.0, 1.0, 1.0])\n\n if grid:\n grid = 'back' if grid is True else grid\n if not isinstance(grid, str):\n raise TypeError(f'`grid` must be a str, not {type(grid)}')\n grid = grid.lower()\n if grid in ('front', 'frontface'):\n cube_axes_actor.SetGridLineLocation(cube_axes_actor.VTK_GRID_LINES_CLOSEST)\n elif grid in ('both', 'all'):\n cube_axes_actor.SetGridLineLocation(cube_axes_actor.VTK_GRID_LINES_ALL)\n elif grid in ('back', True):\n cube_axes_actor.SetGridLineLocation(cube_axes_actor.VTK_GRID_LINES_FURTHEST)\n else:\n raise ValueError(f'`grid` must be either \"front\", \"back, or, \"all\", not {grid}')\n # Only show user desired grid lines\n cube_axes_actor.SetDrawXGridlines(show_xaxis)\n cube_axes_actor.SetDrawYGridlines(show_yaxis)\n cube_axes_actor.SetDrawZGridlines(show_zaxis)\n # Set the colors\n cube_axes_actor.GetXAxesGridlinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetYAxesGridlinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetZAxesGridlinesProperty().SetColor(color.float_rgb)\n\n if isinstance(location, str):\n location = location.lower()\n if location in ('all'):\n cube_axes_actor.SetFlyModeToStaticEdges()\n elif location in ('origin'):\n cube_axes_actor.SetFlyModeToStaticTriad()\n elif location in ('outer'):\n cube_axes_actor.SetFlyModeToOuterEdges()\n elif location in ('default', 'closest', 'front'):\n cube_axes_actor.SetFlyModeToClosestTriad()\n elif location in ('furthest', 'back'):\n cube_axes_actor.SetFlyModeToFurthestTriad()\n else:\n raise ValueError(\n f'Value of location (\"{location}\") should be either \"all\", \"origin\",'\n ' \"outer\", \"default\", \"closest\", \"front\", \"furthest\", or \"back\".'\n )\n elif location is not None:\n raise TypeError('location must be a string')\n\n if isinstance(padding, (int, float)) and 0.0 <= padding < 1.0:\n if not np.any(np.abs(bounds) == np.inf):\n cushion = (\n np.array(\n [\n np.abs(bounds[1] - bounds[0]),\n np.abs(bounds[3] - bounds[2]),\n np.abs(bounds[5] - bounds[4]),\n ]\n )\n * padding\n )\n bounds[::2] -= cushion\n bounds[1::2] += cushion\n else:\n raise ValueError(f'padding ({padding}) not understood. Must be float between 0 and 1')\n cube_axes_actor.bounds = bounds\n\n # set axes ranges if input\n if axes_ranges is not None:\n if isinstance(axes_ranges, (collections.abc.Sequence, np.ndarray)):\n axes_ranges = np.asanyarray(axes_ranges)\n else:\n raise TypeError('Input axes_ranges must be a numeric sequence.')\n\n if not np.issubdtype(axes_ranges.dtype, np.number):\n raise TypeError('All of the elements of axes_ranges must be numbers.')\n\n # set the axes ranges\n if axes_ranges.shape != (6,):\n raise ValueError(\n '`axes_ranges` must be passed as a [xmin, xmax, ymin, ymax, zmin, zmax] sequence.'\n )\n\n cube_axes_actor.x_axis_range = axes_ranges[0], axes_ranges[1]\n cube_axes_actor.y_axis_range = axes_ranges[2], axes_ranges[3]\n cube_axes_actor.z_axis_range = axes_ranges[4], axes_ranges[5]\n\n # set color\n cube_axes_actor.GetXAxesLinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetYAxesLinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetZAxesLinesProperty().SetColor(color.float_rgb)\n\n # set font\n font_family = parse_font_family(font_family)\n\n if not use_3d_text or not np.allclose(self.scale, [1.0, 1.0, 1.0]):\n use_3d_text = False\n cube_axes_actor.SetUseTextActor3D(False)\n else:\n cube_axes_actor.SetUseTextActor3D(True)\n\n props = [\n cube_axes_actor.GetTitleTextProperty(0),\n cube_axes_actor.GetTitleTextProperty(1),\n cube_axes_actor.GetTitleTextProperty(2),\n cube_axes_actor.GetLabelTextProperty(0),\n cube_axes_actor.GetLabelTextProperty(1),\n cube_axes_actor.GetLabelTextProperty(2),\n ]\n\n for prop in props:\n prop.SetColor(color.float_rgb)\n prop.SetFontFamily(font_family)\n prop.SetBold(bold)\n\n # this merely makes the font sharper\n if use_3d_text:\n prop.SetFontSize(50)\n\n # Note: font_size does nothing as a property, use SetScreenSize instead\n # Here, we normalize relative to 12 to give the user an illusion of\n # just changing the font size relative to a font size of 12. 10 is used\n # here since it's the default \"screen size\".\n cube_axes_actor.SetScreenSize(font_size / 12 * 10.0)\n\n self.add_actor(cube_axes_actor, reset_camera=False, pickable=False, render=render)\n self.cube_axes_actor = cube_axes_actor\n\n if all_edges:\n self.add_bounding_box(color=color, corner_factor=corner_factor)\n\n self.Modified()\n return cube_axes_actor", "def __init__(self, refstd, fig=None, rect=111, label='_', srange=(0, 1.5)):\n\n from matplotlib.projections import PolarAxes\n import mpl_toolkits.axisartist.floating_axes as FA\n import mpl_toolkits.axisartist.grid_finder as GF\n\n self.refstd = refstd # Reference standard deviation\n\n tr = PolarAxes.PolarTransform()\n\n # Correlation labels\n rlocs = NP.concatenate((NP.arange(10)/10., [0.95, 0.99]))\n tlocs = NP.arccos(rlocs) # Conversion to polar angles\n gl1 = GF.FixedLocator(tlocs) # Positions\n tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))\n\n # Standard deviation axis extent (in units of reference stddev)\n self.smin = srange[0]*self.refstd\n self.smax = srange[1]*self.refstd\n\n ghelper = FA.GridHelperCurveLinear(tr,\n extremes=(0, NP.pi/2, # 1st quadrant\n self.smin, self.smax),\n grid_locator1=gl1,\n tick_formatter1=tf1)\n\n if fig is None:\n fig = PLT.figure()\n\n ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)\n fig.add_subplot(ax)\n\n # Adjust axes\n ax.axis[\"top\"].set_axis_direction(\"bottom\") # \"Angle axis\"\n ax.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_text(\"Correlation\")\n\n ax.axis[\"left\"].set_axis_direction(\"bottom\") # \"X axis\"\n ax.axis[\"left\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"right\"].set_axis_direction(\"top\") # \"Y axis\"\n ax.axis[\"right\"].toggle(ticklabels=True)\n ax.axis[\"right\"].major_ticklabels.set_axis_direction(\"left\")\n #ax.axis[\"right\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"bottom\"].set_visible(False) # Useless\n\n self._ax = ax # Graphical axes\n self.ax = ax.get_aux_axes(tr) # Polar coordinates\n\n # Add reference point and stddev contour\n l, = self.ax.plot([0], self.refstd, 'k*',\n ls='', ms=10, label=label)\n t = NP.linspace(0, NP.pi/2)\n r = NP.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n\n # Collect sample points for latter use (e.g. legend)\n self.samplePoints = [l]", "def cla(self):\n\t\tAxes.cla(self)\n\t\tself.set_longitude_grid(45)\n\t\tself.set_latitude_grid(20)\n\t\t# Do not display ticks -- we only want gridlines and text\n\t\tself.xaxis.set_ticks_position('none')\n\t\tself.yaxis.set_ticks_position('none')\n\n\t\tself.x_lim = [-180, 180]\n\t\tself.y_lim = [-90, 90]\n\t\tself.set_xlim(self.x_lim)\n\t\tself.set_ylim(self.y_lim)", "def rec_default(self):\n self.res_triggers.setText('(0, 1, 320)')\n self.sigma_range.setText('(2.0)')\n self.det_range.setText('(.7)')", "def __init__(self, plot_design, label_classes, original_label_mapping):\n self.plot_design = plot_design\n self.labels, self.label_mapping = self._create_labels_and_mapping(label_classes, original_label_mapping)", "def __init__(self, dat, frame, box_size, centre,\n label=False, **kwargs):\n\n super().__init__(dat, frame, box_size, centre) # initialise superclass\n\n self.label = label # write labels\n\n self.draw()", "def __init__(self, fig, variables, ranges, n_ordinate_levels=6):\n angles = np.arange(0, 360, 360./len(variables))\n axes = [fig.add_axes([0.1, 0.1, 0.9, 0.9], polar=True,\n label=\"axes{}\".format(i)) for i in range(len(variables))]\n for ax in axes[1:]:\n ax.patch.set_visible(False)\n ax.grid(\"off\")\n ax.xaxis.set_visible(False)\n for i, ax in enumerate(axes):\n grid = np.linspace(*ranges[i], num=n_ordinate_levels)\n gridlabel = [\"{}\".format(round(x, 2)) for x in grid]\n if ranges[i][0] > ranges[i][1]:\n grid = grid[::-1] # hack to invert grid\n gridlabel[0] = \"\" # clean up origin\n set_rgrids(ax, grid, labels=gridlabel, angle=angles[i])\n ax.set_ylim(*ranges[i])\n # variables for plotting\n self.angle = np.deg2rad(np.r_[angles, angles[0]])\n self.ranges = ranges\n self.ax = axes[0]", "def initialize_mwplot(self):\n if self.fig is None:\n self.fig, self.ax = plt.subplots(1, figsize=self.figsize, dpi=self.dpi)\n if self.title is not None:\n self.fig.suptitle(self.title, fontsize=self.fontsize)\n if self.__grid == 'galactic':\n self.ax.set_xlabel('Galactic Longitude (Degree)', fontsize=self.fontsize)\n self.ax.set_ylabel('Galactic Latitude (Degree)', fontsize=self.fontsize)\n self.__ext = [(self.__center[0] - self.__radius[0]).value, (self.__center[0] + self.__radius[0]).value,\n (self.__center[1] - self.__radius[1]).value, (self.__center[1] + self.__radius[1]).value]\n self.ax.set_facecolor('k') # have a black color background for image with <1.0 alpha\n self.ax.imshow(self.__img, zorder=2, extent=self.__ext, alpha=self.imalpha, rasterized=True)\n self.ax.tick_params(labelsize=self.fontsize * 0.8, width=self.fontsize / 10, length=self.fontsize / 2)", "def __init__(self, lowerBound=None, upperBound=None):\n super().__init__()\n self.range = 0.0\n self.type = 'Uniform'\n self.distType = 'Continuous'\n self.compatibleQuadrature.append('Legendre')\n self.compatibleQuadrature.append('ClenshawCurtis')\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'Legendre'\n self.preferredPolynomials = 'Legendre'\n if upperBound is not None:\n self.upperBound = upperBound\n self.upperBoundUsed = True\n if lowerBound is not None:\n self.lowerBound = lowerBound\n self.lowerBoundUsed = True\n if self.lowerBoundUsed and self.upperBoundUsed:\n self.range = self.upperBound - self.lowerBound", "def draw_defaults(self):\n\n pass", "def initialize_plot(self):\n\n self.scat = self.config.ax.scatter(self.lons[0, 0], self.lats[0, 0], c=self.z[0,0]\n # , vmax=self.vmax\n # , vmin=self.vmin\n # ,cmap='coolwarm'\n # , norm=LogNorm()\n , transform=self.config.projection, edgecolor='none', s=0.6)\n cbar = plt.colorbar(self.scat)\n # cbar.set_label('Melt water Flux (mm/yr)')\n\n self.ttl = self.config.ax.text(1.5, 1.05, '', transform=self.config.ax.transAxes, va='center')\n\n return self.scat", "def _add_labels(self):\n coords = self['pore.coords']\n self['pore.front'] = coords[:,0]<(0.1*self._Lx)\n self['pore.back'] = coords[:,0]>(0.9*self._Lx)\n self['pore.left'] = coords[:,1]<(0.1*self._Ly)\n self['pore.right'] = coords[:,1]>(0.9*self._Ly)\n self['pore.bottom'] = coords[:,2]<(0.1*self._Lz)\n self['pore.top'] = coords[:,2]>(0.9*self._Lz)\n bnds = self.pores(labels=['front','back','left','right','bottom','top'])\n self['pore.boundary'] = False\n self['pore.boundary'] = bnds", "def ConfigureDefaults(area_bounds=None, \n area_bounds_format=['x_min','y_min','x_max','y_max'], \n area_bounds_range=None, years_are_bounds=False,\n dates_are_bounds=False, init_date_str_format='%y%m%d',\n member_name='realization', period_name='time', \n initialistion_time_name='forecast_reference_time'): \n global default_area_bounds\n global default_area_bounds_format\n global default_area_bounds_range\n global default_years_are_bounds\n global default_dates_are_bounds\n global default_init_date_str_format\n global default_member_name\n global default_period_name\n global default_initialistion_time_name\n \n default_area_bounds = area_bounds\n default_area_bounds_format = area_bounds_format\n default_area_bounds_range = area_bounds_range\n default_years_are_bounds = years_are_bounds\n default_dates_are_bounds = dates_are_bounds\n default_init_date_str_format = init_date_str_format\n default_member_name = member_name\n default_period_name = period_name\n default_initialistion_time_name = initialistion_time_name", "def __init__(self, fig_width=7.824, fig_height=5.956,\n alpha=.6, var_name=r'\\Delta G', method_name='HNC/ISc',\n units='kcal/mol', labelsize=35):\n # general\n self.back_color = [i/255. for i in (249,250,255)]\n # box positions\n self.box_pos_x = .47\n self.box_pos_y = .62\n # text parameters\n xlabel = '$\\mathrm{' + var_name + '_{exp}}$'\n ylabel = '$\\mathrm{' + var_name + '_{' + method_name + '}}$'\n xlabel = xlabel + units\n ylabel = ylabel + units\n # initalize figure\n self.fig, self.ax = plt.subplots(1, 1, figsize=(fig_width, fig_height))\n self.ax.set_ylabel(ylabel, fontsize=labelsize)\n self.ax.set_xlabel(xlabel, fontsize=labelsize)", "def setlimits(self, Xlim=[], Ylim=[]):\n self.data['Xmin'] = Xlim[0]\n self.data['Xmax'] = Xlim[1]\n self.data['Ymin'] = Ylim[0]\n self.data['Ymax'] = Ylim[1]", "def setupPhysicalBounds(self):\n \n ### 2018-05-06 WIC - **do not** enforce +/- pi limits on the\n ### angles here.\n self.boundsPhysLo = np.array(\\\n [0.00, 0.00, 0., 0., -np.inf, -np.inf,-np.inf,0 ] )\n self.boundsPhysHi = np.array(\\\n [np.inf, np.inf, 1., np.inf, np.inf, np.inf,np.inf, np.inf ] )", "def __init__(self, name='', description='', lon_bounds=[], lat_bounds=[],\n mask_bounds=[], do_land_mask=False):\n self.name = name\n self.description = description\n if lon_bounds and lat_bounds and not mask_bounds:\n self.mask_bounds = [(lat_bounds, lon_bounds)]\n else:\n self.mask_bounds = mask_bounds\n self.do_land_mask = do_land_mask", "def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])", "def autolabel(rects):", "def __init__(self,\n title = '',\n x_title = None,\n y_title = None,\n plot_header = True,\n ratio = False,\n x_range = None,\n y_max = None,\n y_min = None,\n legendColumns = 1):\n # Store the title\n self._title = title\n self._x_title, self._y_title = x_title, y_title\n\n # Store whether or not the user wants to create a plot header\n self._plot_header = plot_header\n\n # Calculate a unique name for the plot components\n name = _rand_uuid()\n\n # Default logy if off\n self._logy = False\n\n # Default off for integer x-ticks \n self._x_integer_ticks = False \n\n # store n columns for legend\n self.PLOT_LEGEND_N_COLUMNS = legendColumns \n\n # Create a canvas\n self._canvas = TCanvas(name + '_canvas',\n name,\n int(self.PLOT_WIDTH),\n int(self.PLOT_HEIGHT))\n SetOwnership(self._canvas, False)\n\n\n\n # Create the main plot and draw it\n self._plot = TPad(\n 'upperPad',\n 'upperPad',\n #name + '_plot', # WJF: don't need upper pad to have unique name \n #name,\n 0.0,\n (self.PLOT_RATIO_FRACTION\n if ratio\n else 0.0),\n 1.0,\n 1.0\n )\n SetOwnership(self._plot, False)\n self._plot.SetMargin(*(self.PLOT_MARGINS_WITH_RATIO\n if ratio\n else self.PLOT_MARGINS))\n self._plot.Draw()\n\n # Store ranges\n self._x_range = x_range\n if y_max is not None:\n self._set_maximum_value(y_max)\n if y_min is not None:\n self._set_minimum_value(y_min)\n\n # Switch back to the context of the canvas\n self._canvas.cd()\n\n\n # Create a ratio plot and draw it if requested\n if ratio:\n self._ratio_plot = TPad(\n 'lowerPad', # WJF, don't need lower pad to have unique name\n 'lowerPad',\n 0.0,\n 0.0,\n 1.0,\n self.PLOT_RATIO_FRACTION\n )\n SetOwnership(self._ratio_plot, False)\n self._ratio_plot.SetMargin(*self.PLOT_RATIO_MARGINS)\n self._ratio_plot.SetGridy(True)\n self._ratio_plot.Draw()\n else:\n self._ratio_plot = None\n # increase canvas margins\n #self._canvas.SetBottomMargin(1)\n #self._plot.SetMargin\n #self._canvas.SetLeftMargin(\n\n # Track whether or not we've already drawn to the main pad\n self._drawn = False\n\n # Track whether or not we've already drawn to the ratio pad\n self._ratio_drawn = False\n\n # Track that object which sets up the axes in the main plot\n self._axes_object = None\n\n # Track whether or not we've already added the atlas label to the main pad\n self._atlas_label_drawn = False\n\n # Create a structure to track any histograms we generate internally\n # which need to be added to any legends created\n self._legend_extras = []\n \n # Flag if y-axis has been set to a log scale \n self._logy = False", "def __init__(self, xRange, yData, max_width, min_width, max_gap):\n\n super(Classic, self).__init__(xRange, yData)\n self.max_width = max_width\n self.min_width = min_width\n self.max_gap = max_gap", "def _setDefaultValues(self, **kwargs):\n # Use to be able to be parent window.\n self.win = self\n self.batch = pyglet.graphics.Batch()\n # List to keep track oj objects.\n self.object_list = []\n # Determines whether or not the window will be resizeable.\n self.resizeable = kwargs.get('resize', True)\n # Vertical synchronisation.\n self.vsync = kwargs.get('vsync', 0)\n # Check whether a status bar is desired.\n self.status_bar = kwargs.get('status_bar', True)\n # Get number of layer. Defaults to six.\n self.layers = kwargs.get('layers', 6)\n # Number of bars for each waveform plot.\n self.detail = 1000\n # Logarithmic scale.\n self.log_scale = 10\n # List to store the actual WaveformPlot objects.\n self.waveforms = []\n # Start- and Endtime of the plots. Needs to be stored into the window\n # object because it has to be the same for all traces.\n self.starttime = kwargs.get('starttime', UTCDateTime(2010,1,1))\n self.endtime = kwargs.get('endtime', UTCDateTime(2010,2,19) - 1.0)\n # Waveform Layer. Waveforms will need some more layers. I will just add\n # three.\n # XXX: Maybe switch to some more elegant solution.\n self.waveform_layer1 = kwargs.get('waveform_layer', 3)\n self.waveform_layer2 = self.waveform_layer1 + 1\n self.waveform_layer3 = self.waveform_layer2 + 1\n # Offset of the waveform plots in the y-direction.\n # XXX: Currently used?\n self.waveform_offset = 0\n # Zoom box.\n self.zoom_box = None\n # Default error.\n self.default_error = ''", "def zoom(self, xmin, xmax, xlen, ymin, ymax, ylen):\n self.xmax = xmax\n self.xmin = xmin\n self.xmax = xmax\n self.xlen = xlen\n self.ymin = ymin\n self.ymax = ymax\n self.ylen = ylen\n self.refresh()", "def __init__(self, init_val):\n self.err_fig_num = init_val\n self.disp_fig_num = init_val", "def __init__(self, refstd, radmax=1.5, fig=None, rect=111, label='_'):\n\n from matplotlib.projections import PolarAxes\n import mpl_toolkits.axisartist.floating_axes as FA\n import mpl_toolkits.axisartist.grid_finder as GF\n\n self.refstd = refstd # Reference standard deviation\n\n tr = PolarAxes.PolarTransform()\n\n # Correlation labels\n rlocs = np.concatenate((np.arange(10) / 10., [0.95, 0.99]))\n tlocs = np.arccos(rlocs) # Conversion to polar angles\n gl1 = GF.FixedLocator(tlocs) # Positions\n tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))\n\n # Standard deviation axis extent\n self.smin = 0\n self.smax = radmax * self.refstd\n\n ghelper = FA.GridHelperCurveLinear(tr,\n extremes=(0, np.pi / 2, # 1st quadrant\n self.smin, self.smax),\n grid_locator1=gl1,\n tick_formatter1=tf1,\n )\n\n if fig is None:\n fig = plt.figure()\n\n ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)\n fig.add_subplot(ax)\n\n # Adjust axes\n ax.axis[\"top\"].set_axis_direction(\"bottom\") # \"Angle axis\"\n ax.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_text(\"Correlation\")\n\n ax.axis[\"left\"].set_axis_direction(\"bottom\") # \"X axis\"\n ax.axis[\"left\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"right\"].set_axis_direction(\"top\") # \"Y axis\"\n ax.axis[\"right\"].toggle(ticklabels=True)\n ax.axis[\"right\"].major_ticklabels.set_axis_direction(\"left\")\n\n ax.axis[\"bottom\"].set_visible(False) # Useless\n\n # Contours along standard deviations\n ax.grid(False)\n\n self._ax = ax # Graphical axes\n self.ax = ax.get_aux_axes(tr) # Polar coordinates\n\n # Add reference point and stddev contour\n # print \"Reference std:\", self.refstd\n l, = self.ax.plot([0], self.refstd, 'k*',\n ls='', ms=10, label=label)\n t = np.linspace(0, np.pi / 2)\n r = np.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n\n # Collect sample points for latter use (e.g. legend)\n self.samplePoints = [l]", "def resetDefaults(self):\n self.client.SetFont(wx.Font(10,wx.SWISS,wx.NORMAL,wx.NORMAL))\n self.client.SetFontSizeAxis(10)\n self.client.SetFontSizeLegend(7)\n self.client.setLogScale((False,False))\n self.client.SetXSpec('auto')\n self.client.SetYSpec('auto')", "def render(self, **kwargs):\n self.color_domain = [\n self.vmin + (self.vmax - self.vmin) * k / 499.0 for k in range(500)\n ]\n self.color_range = [self.__call__(x) for x in self.color_domain]\n if self.tick_labels is None:\n self.tick_labels = legend_scaler(self.index, self.max_labels)\n\n super().render(**kwargs)\n\n figure = self.get_root()\n assert isinstance(figure, Figure), (\n \"You cannot render this Element \" \"if it is not in a Figure.\"\n )\n\n figure.header.add_child(\n JavascriptLink(\"https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js\"),\n name=\"d3\",\n ) # noqa", "def initialize(self):\n\n super(RectTab,self).initialize()\n # special tkinter variables that will be changed with the scales\n self.width = tk.IntVar()\n self.height = tk.IntVar()\n\n # make width scale\n self.widthScale = tk.Scale(self, from_=1, to=5, orient=tk.HORIZONTAL,\n label='Width', resolution=1, variable=self.width,\n command=self.updateSize)\n self.widthScale.grid(column=2, row=6, columnspan=1, sticky='W' + 'E')\n self.widthScale.set(2)\n\n # make height scale\n self.heightScale = tk.Scale(self, from_=1, to=5, orient=tk.HORIZONTAL,\n label='Height', resolution=1, variable=self.height,\n command=self.updateSize)\n self.heightScale.grid(column=2, row=7, columnspan=1, sticky='W' + 'E')\n self.heightScale.set(2)", "def __init__(self, refstd, fig=None, rect=111, label='_', max_std=None):\n\n from matplotlib.projections import PolarAxes\n import mpl_toolkits.axisartist.floating_axes as FA\n import mpl_toolkits.axisartist.grid_finder as GF\n\n self.refstd = refstd # Reference standard deviation\n\n tr = PolarAxes.PolarTransform()\n\n # Correlation labels\n rlocs = NP.concatenate((NP.arange(10) / 10., [0.95, 0.99]))\n tlocs = NP.arccos(rlocs) # Conversion to polar angles\n gl1 = GF.FixedLocator(tlocs) # Positions\n tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))\n\n # Standard deviation axis extent\n self.smin = 0\n self.smax = 1.5 * self.refstd\n\n if max_std is not None:\n self.smax = 1.1 * max_std\n\n ghelper = FA.GridHelperCurveLinear(tr,\n extremes=(0, NP.pi / 2, # 1st quadrant\n self.smin, self.smax),\n grid_locator1=gl1,\n tick_formatter1=tf1,\n )\n\n if fig is None:\n fig = PLT.figure()\n\n ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)\n fig.add_subplot(ax)\n\n # Adjust axes\n ax.axis[\"top\"].set_axis_direction(\"bottom\") # \"Angle axis\"\n ax.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_text(\"Correlation\")\n\n ax.axis[\"left\"].set_axis_direction(\"bottom\") # \"X axis\"\n ax.axis[\"left\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"right\"].set_axis_direction(\"top\") # \"Y axis\"\n ax.axis[\"right\"].toggle(ticklabels=True)\n ax.axis[\"right\"].major_ticklabels.set_axis_direction(\"left\")\n\n ax.axis[\"bottom\"].set_visible(False) # Useless\n\n # Contours along standard deviations\n ax.grid(False)\n\n self._ax = ax # Graphical axes\n self.ax = ax.get_aux_axes(tr) # Polar coordinates\n\n # Add reference point and stddev contour\n print \"Reference std:\", self.refstd\n l, = self.ax.plot([0], self.refstd, 'k*',\n ls='', ms=10, label=label)\n t = NP.linspace(0, NP.pi / 2)\n r = NP.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n\n # Collect sample points for latter use (e.g. legend)\n self.samplePoints = [l]", "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()", "def __place_statistics_labels(self):\n\n base_x = self.__statistics_coords[\"x\"]\n base_y = self.__statistics_coords[\"y\"]\n active_lines_label = Label(self.__main_window, textvariable=self.__active_lines_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_buses_label = Label(self.__main_window, textvariable=self.__active_buses_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_people_lable = Label(self.__main_window, textvariable=self.__number_of_people_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n session_time_lable = Label(self.__main_window, textvariable=self.__session_time_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 23))\n number_of_people_lable.place(x=base_x, y=base_y)\n active_lines_label.place(x=base_x-35, y=base_y + 35)\n number_of_buses_label.place(x=base_x+54, y=base_y + 69)\n session_time_lable.place(x=base_x-70, y=base_y + 116)", "def label_axes(self):\n xparam, yparam = split_pair_xy(self.pair)\n self.ax.set_xlabel(param_labels[xparam])\n self.ax.set_ylabel(param_labels[yparam])\n\n if xparam in ['P', 'Dmolar']:\n self.ax.set_xscale('log')\n if yparam in ['P', 'Dmolar']:\n self.ax.set_yscale('log')", "def make_prior_plot(prior_files_dict, x_key, y_key, get_grid, resolution):\n fig = plt.figure(figsize=(8, 6))\n ax = fig.add_subplot(111)\n cs = get_colors(len(prior_files_dict), alpha=0.8)\n legend_elements = []\n for i, (file, pri) in enumerate(prior_files_dict.items()):\n logging.info(f\"Creating bounds for {file}\")\n mass_data = get_masses_sample_from_prior(pri, 1000)\n x_vals = mass_data[x_key].values\n y_vals = mass_data[y_key].values\n x_range = [min(0.8 * x_vals), max(x_vals * 1.2)]\n y_range = [min(0.8 * y_vals), max(y_vals * 1.2)]\n x, y, z = get_grid(x_range, y_range, pri, resolution)\n assert np.count_nonzero(z) > 0, f\"None of the sampled values are in {file}\"\n label = file.replace(\".prior\", \"\")\n if i % 2 != 0:\n ax.contour(\n x,\n y,\n z,\n levels=1,\n colors=\"k\",\n linewidths=1,\n linestyles=\"solid\",\n alpha=0.4,\n )\n ax.contourf(x, y, z, levels=1, colors=[TRANSPARENT, cs[i]])\n legend_elements.append(Patch(facecolor=cs[i], edgecolor=\"k\", label=label))\n else:\n ax.contourf(x, y, z, levels=1, colors=[TRANSPARENT, cs[i]])\n legend_elements.append(Patch(facecolor=cs[i], label=label))\n\n ax.set_xscale(\"log\")\n ax.set_yscale(\"log\")\n ax.tick_params(which=\"both\", width=1)\n ax.tick_params(which=\"major\", length=8)\n ax.tick_params(which=\"minor\", length=5)\n\n ax.legend(\n handles=legend_elements, loc=\"upper left\", prop={\"size\": 12, \"weight\": \"normal\"}\n )\n return fig", "def setBounds(self, *args):\n return _libsbml.Dimensions_setBounds(self, *args)", "def initialize_mwplot(self):\n if self.fig is None:\n self.fig, self.ax = plt.subplots(1, figsize=self.figsize, dpi=self.dpi)\n if self.title is not None:\n self.fig.suptitle(self.title, fontsize=self.fontsize)\n self.ax.set_xlabel(f'{self._coord_english} ({self._unit_english})', fontsize=self.fontsize)\n self.ax.set_ylabel(f'{self._coord_english} ({self._unit_english})', fontsize=self.fontsize)\n self.ax.set_aspect(self.__aspect)\n self.ax.set_facecolor('k') # have a black color background for image with <1.0 alpha\n self.ax.imshow(self.__img, zorder=2, extent=self.__ext, alpha=self.imalpha, rasterized=True)\n self.ax.tick_params(labelsize=self.fontsize * 0.8, width=self.fontsize / 10, length=self.fontsize / 2)", "def _boundRect(self):\n addresstamp = reduce(lambda x, y: x + y, [v.addresstamp for v in self.footprints])\n self.upperleft = list(map(min, zip(*addresstamp)))\n self.bottomright = list(map(max, zip(*addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]", "def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()", "def __init__(self):\n super(vanderpol_output,self).__init__()\n\n # add figure object for further use\n fig = plt.figure()\n self.ax = fig.add_subplot(111)\n self.ax.set_xlim([-2.5,2.5])\n self.ax.set_ylim([-10.5,10.5])\n plt.ion()\n self.sframe = None", "def set_labels(x, y=''):\n plt.xlabel(x)\n plt.ylabel(y)", "def __init__(self, *args, **kwargs):\n # Set tick length to zero so azimuthal labels are not too offset\n # Change default radial axis formatter but keep default theta one\n super().__init__(*args, **kwargs)\n formatter = axistools.Formatter('auto')\n self.yaxis.set_major_formatter(formatter)\n self.yaxis.isDefault_majfmt = True\n for axis in (self.xaxis, self.yaxis):\n axis.set_tick_params(which='both', size=0)", "def initialize_visualization(self) -> None:\n pass", "def plotDistributionWithLimitsRefine(lXs, llYs, lKClassif,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ax.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i%25])\n axCurrent.set_title(\"Cluster K{}, (position: {})\".format(i,lKClassif[i]))\n axCurrent.fill_between(lXs, val[3], val[4], alpha=0.85, edgecolor='darkgray', facecolor='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n axCurrent.text(10, ymaxCurrent*0.90, \"#nucleosomes: {}\".format(legend[i]), fontsize=12)\n axis_font = {'size':'28'}\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n ax.legend([\"K{}\".format(x) for x in range(0,nbPlots)])\n ax.set_title(\"all nucleosomes\", **axis_font)\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)", "def init_graphics(self):\n m, n = 1280, 1024\n self.image = pg.ImageItem(np.zeros((m,n)))\n self.zoom = pg.ImageItem(np.zeros((50,50)))\n self.residuals = pg.ImageItem(np.zeros((50,50)))\n self.residuals.setLevels(self._residual_levels)\n self.x_fit = pg.PlotDataItem(np.zeros(m), pen={'width':2})\n self.x_slice = pg.PlotDataItem(np.zeros(m), pen=None, symbol='o', pxMode=True, symbolSize=4)\n self.y_fit = pg.PlotDataItem(np.zeros(n), pen={'width':2})\n self.y_slice = pg.PlotDataItem(np.zeros(n), pen=None, symbol='o', pxMode=True, symbolSize=4)\n\n # Only the residuals have any sort of false color - initialise the\n # lookup table and the legend\n cmap = self.get_color_map()\n self.residual_LUT = cmap.getLookupTable(nPts=256)\n self.res_legend = pg.GradientLegend(size=(10,255), offset=(0,20))\n self.res_legend.setGradient(cmap.getGradient())\n n_ticks = 5\n self.res_legend.setLabels({\"{}\".format(level):val\n for (level, val) in zip(\n np.linspace(*self._residual_levels, n_ticks),\n np.linspace(0, 1, n_ticks))})\n\n ypen = pg.mkPen(color=(255,255,0,85), width=3)\n\n # Centroid position markers in main image, aligned with x,y\n self.fit_v_line = pg.InfiniteLine(pos=1, angle=90, pen=ypen)\n self.fit_h_line = pg.InfiniteLine(pos=1, angle=0, pen=ypen)\n\n # Plot fading recent position markers\n n_history = 5\n self.history = collections.deque(maxlen=n_history)\n self.history_plot = pg.ScatterPlotItem()\n self.history_brushes = [pg.mkBrush(\n color=(255,255,0,int((i+1)*255/n_history)))\n for i in range(n_history)]\n\n # User marked position\n rpen = pg.mkPen(color=(255,0,0,127), width=3, style=QtCore.Qt.DotLine)\n self.mark_v_line = pg.InfiniteLine(pos=1, angle=90, pen=rpen)\n self.mark_h_line = pg.InfiniteLine(pos=1, angle=0, pen=rpen)\n self.mark_widgets.extend([\n self.mark_v_line, self.mark_h_line,\n ])\n\n # Mouse cursor\n wpen = pg.mkPen(color=(255,255,255,63), width=3)\n red = pg.mkColor(255,0,0,223)\n yellow = pg.mkColor(255,255,0,223)\n self.cursor_v = pg.InfiniteLine(pos=1, angle=90, pen=wpen)\n self.cursor_h = pg.InfiniteLine(pos=1, angle=0, pen=wpen)\n self.cursor_text = pg.TextItem()\n self.cursor_delta = pg.TextItem(anchor=(-0.1, -0.1), color=red)\n self.beam_delta = pg.TextItem(anchor=(-0.1, -0.1), color=yellow)\n self.zoom_text = pg.TextItem(anchor=(-0.1, -0.1), color=yellow)\n self.residuals_text = pg.TextItem(anchor=(-0.1, -0.1))\n self.mark_widgets.append(self.cursor_delta)\n self.mark_widgets.append(self.beam_delta)\n\n # Centroid position markers in zoomed image, aligned with beam\n # ellipse axes\n zoom_centre = QtCore.QPointF(25,25)\n self.fit_maj_line = pg.InfiniteLine(pos=zoom_centre, angle=90, pen=ypen)\n self.fit_min_line = pg.InfiniteLine(pos=zoom_centre, angle=0, pen=ypen)\n\n # Shows 1/e^2 ellipse of beam\n isopen = pg.mkPen(color=(255,255,0,85), width=3, style=QtCore.Qt.DotLine)\n self.isocurve = pg.IsocurveItem(pen=isopen)\n self.isocurve.setParentItem(self.zoom)", "def plotDistributionWithLimits(lXs, llYs, lKClassif,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for val in llYs:\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n ax.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i%25])\n axCurrent.set_title(\"Cluster K{}, (position: {})\".format(i,lKClassif[i]))\n axCurrent.fill_between(lXs, val[3], val[4], alpha=0.85, edgecolor='darkgray', facecolor='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n axCurrent.text(10, ymaxCurrent*0.90, \"#nucleosomes: {}\".format(legend[i]), fontsize=12)\n axis_font = {'size':'28'}\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n ax.legend([\"K{}\".format(x) for x in range(0,nbPlots)])\n ax.set_title(\"all nucleosomes\", **axis_font)\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)", "def setValues(self, values):\n if values is not None:\n self.scale_min, self.scale_max = values\n if self.scale_min is None:\n self.scale_min = self.start\n if self.scale_max is None:\n self.scale_max = self.end\n else:\n self.scale_min = self.start\n self.scale_max = self.end\n self.emitRange()\n self.updateDisplayValues()\n self.update()", "def init_fig(self, fig):\n # type: (Figure) -> None\n self.init_vars()\n\n self.xs, self.ys = np.meshgrid(np.arange(0., self.max_iter+.5)-.5, np.arange(0., self.n_vars+.5)-.5)\n self.cs = np.zeros((self.n_vars, self.max_iter))\n\n self.ax = fig.add_subplot(111)\n self.ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n self.ax.yaxis.set_ticks(np.arange(0, self.n_vars))\n self.ax.yaxis.set_ticklabels(self.var_names)\n\n self.ax.set_xlim([-.5, .5])\n self.ax.set_ylim([-.5, self.n_vars-.5])\n self.quad = self.ax.pcolormesh(self.xs, self.ys, self.cs,\n vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, norm=self.norm)\n\n fig.colorbar(self.quad)\n\n self.ax.set_xlabel('Evaluation #')", "def set_all_labels(ax, xax, ra_label, yax, dec_label, roundnum=1):\n ax.set_xticks(xax)\n ax.set_xticklabels(np.round(ra_label, roundnum))\n ax.set_yticks(yax)\n ax.set_yticklabels(np.round(dec_label, roundnum))\n ax.set_ylim(yax[0], yax[-1])\n \n return ax", "def setup_layout(self):\n\n # check if we should animate plot\n anim = self.get_option(self.sctn,'animate')\n if anim != None:\n self.animate = anim.lower() in ['t','true','1']\n else:\n self.animate = False\n self.anim_range=[]\n t = self.get_option(self.sctn,'anim_start')\n if t!=None:\n self.anim_range.append(int(t))\n else:\n self.anim_range.append(0)\n t = self.get_option(self.sctn,'anim_end')\n if t!=None:\n self.anim_range.append(int(t))\n else:\n self.anim_range.append(5)\n \n self.times = self.get_option(self.sctn,'times')\n if self.times == \"None\":\n self.times = [None]\n else:\n self.times = self.times.split()\n \n if len(self.variables)>1:\n self.numdata = len(self.variables)\n else:\n self.numdata = len(self.times)\n try:\n self.numcol = int(self.get_option(self.sctn,'ncol'))\n except:\n self.numcol = self.numdata\n if len(self.variables)>1:\n self.numrow = len(self.times)\n else:\n self.numrow = 1", "def init(self, constrain=True, rescale=True, **kwargs):\n # pylint: disable=W0201\n self.constrain = constrain\n self.rescale = rescale\n self.y_min = None\n self.y_max = None\n self.yhat_mean = None\n self.yhat_sd = None\n self.y_mean = None\n self.y_sd = None\n orig_init(self, **kwargs)", "def __init__(self, ymax, prev=None):\n XForm.__init__(self, prev)\n self.ymax = ymax", "def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )", "def autolabel(rects):\n #for rect in rects:\n for i in range(len(rects)):\n rect = rects[i]\n height = rect.get_height()\n ax.annotate('{}'.format(('%.2f' % (height)) + '% of\\n' + ('%d' % range_data[i].shape[0]) + ' people' ),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')", "def init_plot(self):\n self.dpi = 100\n self.fig = Figure((5.0, 5.0), dpi = self.dpi)\n\n self.main_plot = self.fig.add_subplot(111)\n self.main_plot.set_axis_bgcolor('black')\n self.main_plot.set_title('Dynamic venous flow view', size = 12)\n\n pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)\n pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)\n\n # Plot the data as a green line\n self.plot_data = self.main_plot.plot(\n self.daq.data0,\n linewidth = 1,\n color = (0, 1, 0),\n )[0]\n self.main_plot.grid(True, color='gray')", "def cla(self):\n # Don't forget to call the base class\n Axes.cla(self)\n \n x_min = 0\n y_min = 0\n x_max = 1\n y_max = 1\n x_spacing = 0.1\n y_spacing = 0.1\n self.xaxis.set_minor_locator(NullLocator())\n self.yaxis.set_minor_locator(NullLocator())\n self.xaxis.set_ticks_position('bottom')\n self.yaxis.set_ticks_position('left')\n Axes.set_xlim(self, x_min, x_max)\n Axes.set_ylim(self, y_min, y_max)\n self.xaxis.set_ticks(np.arange(x_min, x_max+x_spacing, x_spacing))\n self.yaxis.set_ticks(np.arange(y_min, y_max+y_spacing, y_spacing))", "def init_limits_and_stat(self):\n self.stat_dict.clear()\n self.stat_dict = {k: False for k in self.map_keys}\n\n self._init_rgb_dict()\n\n self.limit_dict.clear()\n self.limit_dict = {k: {\"low\": 0.0, \"high\": 100.0} for k in self.map_keys}\n\n self.set_low_high_value()", "def __init__(self, start, end, label=\"\"):\n self.start = start\n self.end = end\n self.label = label", "def initializePlot( self ):\n\n self.mNTaxa = len(self.mTree.get_taxa())\n self.mNNodes = max( self.mTree.chain.keys() ) + 1\n\n self.calculateCoordinates()\n \n self.calculateCanvasSize( )", "def init_all_params(self):\n self.annotations_timestamp = 0\n # self.annotations_offset = 0\n # self.annotation_offset_text.configure(text='Current: %d' % self.annotations_offset)\n self.annotations_timestamp_text.configure(text='Annotation timestamp:\\n %d' % self.annotations_timestamp)\n self.annotations_timestamp_text.grid(sticky=\"W\", row=9, column=0, columnspan=10)\n # set text frames\n # self.annotations_offset_entry.delete(0, 'end')\n # self.annotations_offset_entry.insert(0, str(self.annotations_offset))\n self.current_frame_entry.delete(0, 'end')\n self.current_frame_entry.insert(0, str(self.vid.frame_number))", "def __init__(self, limits, resolution):\n self.limits = limits\n self.resolution = resolution\n self.X, self.Y = self._create_meshgrid()\n self.coords, self.tree = self._generate_coords()\n self.fitness_function = self._calculate_fitness().reshape(self.resolution, self.resolution)\n self.max, self.min = np.max(self.fitness_function), np.min(self.fitness_function)", "def setMplDefaults():\n\n rcParams['figure.dpi'] = 300\n rcParams['figure.figsize'] = (4.5, 3)\n rcParams['savefig.dpi'] = 300\n rcParams['axes.grid'] = True\n rcParams['grid.linewidth'] = 0.5\n rcParams['grid.linestyle'] = ':'\n rcParams['font.family'] = 'Arial', 'Helvetica', 'DejaVu Sans'\n rcParams['font.size'] = 6\n rcParams['lines.markersize'] = 4\n rcParams['lines.linestyle'] = '-'\n rcParams['savefig.transparent'] = False\n rcParams['figure.subplot.bottom'] = 0.15\n rcParams['figure.subplot.top'] = 0.85\n rcParams['figure.subplot.left'] = 0.15\n rcParams['figure.subplot.right'] = 0.9", "def __init__(self):\n self.label = \"PFRR Ranger\"\n self.description = \"This tool plots the predicted impact point (Lon,Lat) \" + \\\n \"then calculates a 3sigma cirlce centered on the predicted impact point, and \" + \\\n \"intersects the circle with underlying Land Ownership to output a new \" + \\\n \"land_ownership_within_3sigma layer in the map\"\n self.canRunInBackground = False", "def __init__(self, units):\n super(PintAxisInfo, self).__init__(label='{:P}'.format(units))", "def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ..." ]
[ "0.66450757", "0.65903693", "0.64363885", "0.6421661", "0.64124984", "0.6323452", "0.6308811", "0.6259724", "0.6237798", "0.6225696", "0.61938274", "0.61938274", "0.614099", "0.6140022", "0.61156905", "0.6096484", "0.60959345", "0.6092208", "0.6075896", "0.6053978", "0.6044057", "0.60332817", "0.6031882", "0.6001694", "0.60007274", "0.59811664", "0.592745", "0.5910409", "0.5895897", "0.5873889", "0.5865471", "0.58651817", "0.5856062", "0.58546996", "0.585132", "0.584672", "0.5819406", "0.58077717", "0.5805578", "0.580259", "0.5776846", "0.5776739", "0.57648176", "0.574867", "0.574213", "0.57415706", "0.57119983", "0.57036924", "0.56880236", "0.56862223", "0.56851095", "0.5677195", "0.5670595", "0.56652206", "0.56632435", "0.5634081", "0.56312144", "0.56310356", "0.5628033", "0.56210774", "0.5619306", "0.5617264", "0.56075835", "0.559824", "0.5595758", "0.55915564", "0.55896884", "0.55800897", "0.5579848", "0.55780935", "0.5577691", "0.5570338", "0.5561572", "0.5559939", "0.5558938", "0.5551979", "0.5549257", "0.5542796", "0.5542006", "0.55323994", "0.55205494", "0.5517177", "0.55166596", "0.5512766", "0.5511828", "0.55112326", "0.55103093", "0.5509764", "0.55064285", "0.5504233", "0.55031943", "0.5502349", "0.5488717", "0.5475033", "0.5474934", "0.5469941", "0.54687345", "0.54687345", "0.54687345", "0.54687345" ]
0.6262071
7
Draws the bounds for a level's parameter space.
def add_bounds_to_ax(self, x: np.ndarray, y: np.ndarray, z: int) -> None: width = max(y) - min(y) height = max(x) - min(x) p = Rectangle( (min(y), min(x)), width, height, edgecolor="black", facecolor="none", linestyle="--", ) self.ax.add_patch(p) art3d.pathpatch_2d_to_3d(p, z=z, zdir="y") self.draw()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bounds():\n\n pass", "def bounds(self, pos):", "def write_bounds(self):\n optimized_par_df = \\\n self.parameter_df.loc[self.parameter_df.estimate == 1\n & (~self.parameter_df.index.isin(\n self.amici_model.getFixedParameterIds())), :]\n self.f.require_dataset('/parameters/lowerBound',\n shape=optimized_par_df.lowerBound.shape,\n data=optimized_par_df.lowerBound, dtype='f8')\n self.f.require_dataset('/parameters/upperBound',\n shape=optimized_par_df.upperBound.shape,\n data=optimized_par_df.upperBound, dtype='f8')", "def parameter_bounds(self):\n for name, bound in self.named_parameter_bounds():\n yield bound", "def cb_bounds(self, variable, results_dict, keys, fixed_bounds):\n tas_bound, pr_bound = fixed_bounds\n if variable == \"tas\":\n if tas_bound:\n bound_limit = tas_bound\n else:\n bound_limit = self.find_abs_bound_range(results_dict, keys)\n cmap = plt.cm.RdBu_r\n else:\n if pr_bound:\n bound_limit = pr_bound\n else:\n bound_limit = self.find_abs_bound_range(results_dict,\n keys,\n avg_over=25)\n cmap = plt.cm.BrBG\n bounds = np.linspace(-1 * bound_limit, bound_limit, 11)\n return [bounds, cmap]", "def get_params_bounds(self) -> np.array:\n pass", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:\n return [(-1.0, 1.0)] * len(list(self.params()))", "def bounds(self): # -> tuple[()]:\n ...", "def get_bounds():\n return [0.00], [1.00]", "def get_bounds_parameters(self):\n bounds = []\n bounds += self.var_noise.bounds\n bounds += self.mean.bounds\n bounds += self.kernel.get_bounds_parameters()\n\n return bounds", "def getBounds(self, nStates, nParams):\n raise NotImplementedError(\n \"bounds have not been implemented for this Experiment\")", "def draw_geo_bound(data, ax, percent=0.05, color=\"black\", bound_width=1):\r\n x1, y1, x2, y2 = data.total_bounds\r\n w, h = x2 - x1, y2 - y1\r\n w_margin, h_margin = w * percent, h * percent\r\n rect = plt.Rectangle((x1 - w_margin, y1 - h_margin),\r\n w + w_margin * 2, h + h_margin * 2,\r\n color=color, fill=False,\r\n lw=bound_width, clip_on=False)\r\n ax.add_patch(rect)", "def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds", "def print_level(self, list_level, window, begin, wall, end):\n\t\tfor y in range(0,15):\n\t\t\tfor x in range(0,15):\n\t\t\t\tif list_level[y][x] == 'd':\n\t\t\t\t\tposition_x = x * 30\n\t\t\t\t\tposition_y = y * 30\n\t\t\t\t\twindow.blit(begin, (position_x,position_y))\n\t\t\t\telif list_level[y][x] == 'm':\n\t\t\t\t\tposition_x = x * 30\n\t\t\t\t\tposition_y = y * 30\n\t\t\t\t\twindow.blit(wall, (position_x,position_y))\n\t\t\t\telif list_level[y][x] == 'a':\n\t\t\t\t\tposition_x = x * 30\n\t\t\t\t\tposition_y = y * 30\n\t\t\t\t\twindow.blit(end, (position_x,position_y))\n\t\t\t\telse: # it's a 0\n\t\t\t\t\tcontinue", "def printLimits():\n print(\"MinX:\",Drawable._minX)\n print(\"MaxX:\",Drawable._maxX)\n print(\"MinY:\",Drawable._minY)\n print(\"MaxY:\",Drawable._maxY)", "def _boundRect(self):\n addresstamp = reduce(lambda x, y: x + y, [v.addresstamp for v in self.footprints])\n self.upperleft = list(map(min, zip(*addresstamp)))\n self.bottomright = list(map(max, zip(*addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]", "def check_parameter_bounds(self):\n for p in self.variables.keys():\n data = self.get_attr(p)\n if isinstance(data, dc_cp):\n if data.val > data.max_val + err:\n msg = (\n 'Invalid value for ' + p + ': ' + p + ' = ' +\n str(data.val) + ' above maximum value (' +\n str(data.max_val) + ') at component ' + self.label +\n '.')\n logger.warning(msg)\n\n elif data.val < data.min_val - err:\n msg = (\n 'Invalid value for ' + p + ': ' + p + ' = ' +\n str(data.val) + ' below minimum value (' +\n str(data.min_val) + ') at component ' + self.label +\n '.')\n logger.warning(msg)\n\n elif isinstance(data, dc_cc) and data.is_set:\n expr = self.get_char_expr(data.param, **data.char_params)\n data.char_func.get_domain_errors(expr, self.label)\n\n elif isinstance(data, dc_gcc) and data.is_set:\n for char in data.elements:\n char_data = self.get_attr(char)\n expr = self.get_char_expr(\n char_data.param, **char_data.char_params)\n char_data.char_func.get_domain_errors(expr, self.label)", "def draw_level(self):\r\n self.level_surface.blit(self.map_image, self.viewport, self.viewport)\r\n self.level_surface.blit(self.title_box, self.title_rect)", "def GetBounds(self, p_int, p_int_1, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def set_bounds(self, **kwargs):\n for name, bounds in kwargs.items():\n if name not in self._parameters:\n raise AttributeError('Unknown parameter %s for %s' % (name, self.__class__.__name__))\n param = self._parameters[name]\n # Set bounds\n lower_bound, upper_bound = bounds\n if torch.is_tensor(lower_bound) and torch.is_tensor(upper_bound):\n if lower_bound.size() != upper_bound.size() or \\\n lower_bound.size() != param.size():\n raise AttributeError('Lower bound, upper bound, and param should have the same size')\n elif not (isinstance(lower_bound, int) or isinstance(lower_bound, float)) or \\\n not (isinstance(upper_bound, int) or isinstance(upper_bound, float)):\n raise AttributeError('Unsupported argument types for parameter %s' % name)\n\n if name not in self._bounds:\n self._bounds[name] = [None, None]\n self._bounds[name][0] = lower_bound\n self._bounds[name][1] = upper_bound\n return self", "def list_bounds(self) -> None:\n print(\"\")\n print(\n \"Mu = {}\".format(self.experiment_file_dict[\"motors\"][\"mu\"][\"bounds\"])\n )\n print(\n \"Eta = {}\".format(self.experiment_file_dict[\"motors\"][\"eta\"][\"bounds\"])\n )\n print(\n \"Chi = {}\".format(self.experiment_file_dict[\"motors\"][\"chi\"][\"bounds\"])\n )\n print(\n \"Phi = {}\".format(self.experiment_file_dict[\"motors\"][\"phi\"][\"bounds\"])\n )\n print(\n \"Nu = {}\".format(self.experiment_file_dict[\"motors\"][\"nu\"][\"bounds\"])\n )\n print(\n \"Del = {}\".format(self.experiment_file_dict[\"motors\"][\"del\"][\"bounds\"])\n )\n print(\"\")", "def canvas_bounds(self) -> utils.BoxRegion:", "def return_parameter_bounds(maximum_luminosity=20):\n return [(maximum_luminosity, maximum_luminosity + 3),\n (3 * 10 ** -4, 8 * 10 ** -3), (2., 350), (-8., -0.2),\n (-400, 400)]", "def BoundsToJsonp(lat, lng, lod, bounds, function_name):\n print \"%s('%s');\" % (function_name, BoundsToRawJson(lat, lng, lod, bounds))", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p) for p in self.shape_parameters.keys()]\n elif parameter_name in list(self.__likelihood.rate_parameters.keys()) + list(self.__likelihood.shape_parameters.keys()):\n return self.__likelihood.get_bounds(parameter_name)\n # in the newly added parameters\n else:\n anchor_settings = list(self.shape_parameters[parameter_name][0].keys())\n return min(anchor_settings), max(anchor_settings)", "def create_bounds(self):\n # Bounds should be created for\n x0, y0 = (0, 0)\n x1 = self.game_area_size[0]\n y1 = self.game_area_size[1]\n space = self.space\n\n borders = [\n phy.Segment(space.static_body, phy.Vec2d(x0, y0), phy.Vec2d(x1, y0), 0),\n phy.Segment(space.static_body, phy.Vec2d(x0, y0), phy.Vec2d(x0, y1), 0),\n phy.Segment(space.static_body, phy.Vec2d(x1, y0), phy.Vec2d(x1, y1), 0),\n phy.Segment(space.static_body, phy.Vec2d(x0, y1), phy.Vec2d(x1, y1), 0),\n ]\n for b in borders:\n b.elasticity = 0.5\n self.space.add(borders)", "def draw_level(self, DISP, level:int):\r\n windowsize = DISP.get_size()\r\n Level_Text_Obj = self.FontObj.render(\"LEVEL: \" + str(level), True, Colors.colors['WHITE'])\r\n Level_Text_rec = Level_Text_Obj.get_rect()\r\n Level_Text_rec.top = windowsize[1] - Level_Text_rec.height\r\n Level_Text_rec.left = windowsize[0] - Level_Text_rec.width\r\n DISP.blit(Level_Text_Obj, Level_Text_rec)", "def GetBounds(self, vtkAMRBox, , , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def draw_boundary() -> None:\n # Upper edge\n print(rpipes.terminal.move_xy(0, 0), WBorder.HORIZONTAL * (rpipes.terminal.width - 1))\n\n # Left and Right edges\n for row in range(rpipes.terminal.height - 2):\n print(\n WBorder.VERTICAL,\n rpipes.terminal.move_right(rpipes.terminal.width - 4),\n WBorder.VERTICAL,\n )\n\n # Bottom edge\n print(\n rpipes.terminal.move_xy(0, rpipes.terminal.height - 2),\n WBorder.HORIZONTAL * (rpipes.terminal.width - 1),\n )\n\n # Top left corner\n print(rpipes.terminal.move_xy(0, 0) + WBorder.DOWN_AND_RIGHT)\n\n # Top right corner\n print(rpipes.terminal.move_xy(rpipes.terminal.width - 1, 0) + WBorder.DOWN_AND_LEFT)\n\n # Bottom left corner\n print(rpipes.terminal.move_xy(0, rpipes.terminal.height - 2) + WBorder.UP_AND_RIGHT)\n\n # Bottom right corner\n print(\n rpipes.terminal.move_xy(rpipes.terminal.width - 1, rpipes.terminal.height - 2)\n + WBorder.UP_AND_LEFT\n )", "def named_parameter_bounds(self):\n for name, _ in self.named_parameters():\n yield name, self.bound_for(name)", "def bounds(self):\n return [(2, None)]", "def bounds(self) -> Box:\n raise NotImplementedError()", "def bounds(self):\n return self.kernel.bounds", "def get_bounds(self):\n raise Exception(\"Non-implemented base class method.\")", "def GetIndexBounds(self, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def unit_bounds(dimension):\n\n return [-1.0, 1.0] * dimension", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...", "def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...", "def bounds(self):\n if self.change_dimensionality:\n return [self._bounds[0]] * self.N\n else:\n return self._bounds", "def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)", "def draw(self, verbosity=0):\n\n # Calculate overall scale and position of the map\n self.update_bounds()\n # Draw the dungeon background (everything behind the grid)\n self.draw_background(verbosity)\n # Draw the grid\n self.draw_grid(verbosity)\n # Draw the dungeon foreground (everything in front of the grid)\n self.draw_foreground(verbosity)\n\n pygame.display.flip()", "def fitting_parameter_plot(self, d, bin1, name, no):\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tdist_names = ['rayleigh', 'norm', 'lognorm', 'gamma']\n\t\t\tcolors = ['b', 'g', 'r', 'y', 'm']\n\t\t\tfor dist_name,col in zip(dist_names,colors):\n\t\t\t\tdist = getattr(sp, dist_name)\n\t\t\t\tshape[i], location[i], scale[i] = dist.fit(s)\n\t\treturn shape, location, scale", "def _boundRect(self):\n self.upperleft = list(map(min, zip(*self.addresstamp)))\n self.bottomright = list(map(max, zip(*self.addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]", "def plot_stability_function(self,bounds=[-20,1]):\n import matplotlib.pyplot as plt\n p,q=self.stability_function()\n xx=np.arange(bounds[0], bounds[1], 0.01)\n yy=p(xx)/q(xx)\n fig, = plt.plot(xx,yy)\n plt.draw()", "def __str__(self):\n template = ('Bounds: minx={f}, miny={f}, minz={f}\\n '\n 'maxx={f}, maxy={f}, maxz={f}'.format(f=self._format))\n # Formatter must be recreated each time to reset value counter\n return NoneFormatter().format(template, *self)", "def get_bounds(shakefile, parameter='pga', threshold=2):\n from mapio.shake import ShakeGrid\n\n shakemap = ShakeGrid.load(shakefile, adjust='res')\n if parameter == 'pga':\n vals = shakemap.getLayer('pga')\n elif parameter == 'pgv':\n vals = shakemap.getLayer('pgv')\n else:\n raise Exception('parameter not valid')\n xmin, xmax, ymin, ymax = vals.getBounds()\n lons = np.linspace(xmin, xmax, vals.getGeoDict().nx)\n lats = np.linspace(ymax, ymin, vals.getGeoDict().ny) # backwards so it plots right\n row, col = np.where(vals.getData() > float(threshold))\n lonmin = lons[col].min()\n lonmax = lons[col].max()\n latmin = lats[row].min()\n latmax = lats[row].max()\n\n boundaries1 = {'dx': 100, 'dy': 100., 'nx': 100., 'ny': 100} # dummy fillers, only really care about bounds\n if xmin < lonmin:\n boundaries1['xmin'] = lonmin\n else:\n boundaries1['xmin'] = xmin\n if xmax > lonmax:\n boundaries1['xmax'] = lonmax\n else:\n boundaries1['xmax'] = xmax\n if ymin < latmin:\n boundaries1['ymin'] = latmin\n else:\n boundaries1['ymin'] = ymin\n if ymax > latmax:\n boundaries1['ymax'] = latmax\n else:\n boundaries1['ymax'] = ymax\n\n return boundaries1", "def setConstraints(self, boundsDict):\n for param in boundsDict.keys():\n try:\n if boundsDict[param][0] < boundsDict[param][1]:\n constraintString = \"{:4.4e} < {:s} < {:4.4e}\".format(boundsDict[param][0], param, boundsDict[param][1])\n self.addConstraints(constraintString)\n else:\n print('Setting constraints on mbvg; reversing bounds')\n self.addConstraints(\"{:4.4e} < A < {:4.4e}\".format(boundsDict[param][1], boundsDict[param][0]))\n except ValueError:\n print('Cannot set parameter {:s} for mbvg. Valid choices are', \\\n '(\\'A\\', \\'muX\\', \\'muY\\', \\'sigX\\', \\'sigY\\', \\'sigP\\', \\'bg\\')'.format(param))", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax", "def optimization_bounds(self, topology):\n bounds_low = np.zeros(self.number_of_parameters())\n bounds_up = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n bounds_low[pkey] = parameter.bound_low(topology)\n bounds_up[pkey] = parameter.bound_up(topology)\n\n return bounds_low, bounds_up", "def show_grid(self, **kwargs):\n kwargs.setdefault('grid', 'back')\n kwargs.setdefault('location', 'outer')\n kwargs.setdefault('ticks', 'both')\n return self.show_bounds(**kwargs)", "def bounds(*tile):\n tile = _parse_tile_arg(*tile)\n xtile, ytile, zoom, provider_bounds = tile\n a = ul(xtile, ytile, zoom, provider_bounds)\n b = ul(xtile + 1, ytile + 1, zoom, provider_bounds)\n return Bbox(a[0], b[1], b[0], a[1])", "def bounds(self):\n return self.GetBounds()", "def DrawBase(screen, base_x, base_y, base_len, base_width):\n pygame.draw.rect(screen, (255,0,0),(base_x, base_y, base_len*2, base_width*2), 4)", "def internal_bounds(self) -> tuple[float, float, float, float]:\n xres, yres = self.res\n w, s, e, n = self.bounds\n y0, y1 = (n, s) if yres < 0 else (s, n)\n x0, x1 = (e, w) if xres < 0 else (w, e)\n return x0, y0, x1, y1", "def get_bounds(shakefile, parameter='pga', threshold=2.0):\n shakemap = ShakeGrid.load(shakefile, adjust='res')\n if parameter == 'pga':\n vals = shakemap.getLayer('pga')\n elif parameter == 'pgv':\n vals = shakemap.getLayer('pgv')\n else:\n raise Exception('parameter not valid')\n xmin, xmax, ymin, ymax = vals.getBounds()\n lons = np.linspace(xmin, xmax, vals.getGeoDict().nx)\n lats = np.linspace(ymax, ymin, vals.getGeoDict().ny)\n row, col = np.where(vals.getData() > float(threshold))\n lonmin = lons[col].min()\n lonmax = lons[col].max()\n latmin = lats[row].min()\n latmax = lats[row].max()\n\n # dummy fillers, only really care about bounds\n boundaries1 = {'dx': 100, 'dy': 100., 'nx': 100., 'ny': 100}\n\n if xmin < lonmin:\n boundaries1['xmin'] = lonmin\n else:\n boundaries1['xmin'] = xmin\n if xmax > lonmax:\n boundaries1['xmax'] = lonmax\n else:\n boundaries1['xmax'] = xmax\n if ymin < latmin:\n boundaries1['ymin'] = latmin\n else:\n boundaries1['ymin'] = ymin\n if ymax > latmax:\n boundaries1['ymax'] = latmax\n else:\n boundaries1['ymax'] = ymax\n\n return boundaries1", "def draw(self, axes, feature, bbox, location, style_param):\n pass", "def bounds(self):\n return self._bounds", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def internal_stability_plot(self,bounds=None,N=200,use_butcher=False,formula='lts',levels=[1,100,500,1000,1500,10000]):\n import nodepy.stability_function as stability_function\n import matplotlib.pyplot as plt\n from nodepy.utils import find_plot_bounds\n from matplotlib.colors import LogNorm\n\n p,q = self.stability_function(use_butcher=use_butcher,formula=formula)\n # Convert coefficients to floats for speed\n if p.coeffs.dtype=='object':\n p = np.poly1d([float(c) for c in p.coeffs])\n q = np.poly1d([float(c) for c in q.coeffs])\n\n stable = lambda z : np.abs(p(z)/q(z))<=1.0\n bounds = find_plot_bounds(stable,guess=(-10,1,-5,5))\n\n theta = self.internal_stability_polynomials(use_butcher=use_butcher,formula=formula)\n\n x=np.linspace(bounds[0],bounds[1],N)\n y=np.linspace(bounds[2],bounds[3],N)\n X=np.tile(x,(N,1))\n Y=np.tile(y[:,np.newaxis],(1,N))\n Z=X + Y * 1j\n\n th_vals = np.zeros((len(theta), N, N), dtype=np.complex64)\n\n for j in range(len(theta)):\n thetaj = np.poly1d([float(c) for c in theta[j].coeffs])\n th_vals[j,...] = thetaj(Z)\n th_max = np.max(np.abs(th_vals),axis=0)\n\n fig = plt.figure()\n CS = plt.contour(X,Y,th_max,colors='k',levels=levels)\n plt.clabel(CS, fmt='%d', colors='k')#,manual=True)\n\n p,q=self.__num__().stability_function(mode='float')\n stability_function.plot_stability_region(p,q,N,color='k',filled=False,bounds=bounds,\n fignum=fig.number)", "def action_space(self):\n lower_bounds = np.array([])\n upper_bounds = np.array([])\n for joint in self._used_joints:\n joint_idx = self._joint_limits.joint_names.index(joint)\n if self._control_mode == 'position':\n lower_bounds = np.concatenate(\n (lower_bounds,\n np.array(self._joint_limits.position_lower[\n joint_idx:joint_idx + 1])))\n upper_bounds = np.concatenate(\n (upper_bounds,\n np.array(self._joint_limits.position_upper[\n joint_idx:joint_idx + 1])))\n elif self._control_mode == 'velocity':\n velocity_limit = np.array(\n self._joint_limits.velocity[joint_idx:joint_idx + 1]) * 0.1\n lower_bounds = np.concatenate((lower_bounds, -velocity_limit))\n upper_bounds = np.concatenate((upper_bounds, velocity_limit))\n elif self._control_mode == 'effort':\n effort_limit = np.array(\n self._joint_limits.effort[joint_idx:joint_idx + 1])\n lower_bounds = np.concatenate((lower_bounds, -effort_limit))\n upper_bounds = np.concatenate((upper_bounds, effort_limit))\n else:\n raise ValueError(\n 'Control mode %s is not known!' % self._control_mode)\n return gym.spaces.Box(\n np.concatenate((lower_bounds, np.array([0]))),\n np.concatenate((upper_bounds, np.array([100]))),\n dtype=np.float32)", "def draw(self, DISP, life_counter:int, level:int):\r\n assert self.is_init, 'Call first Game_Field.init() before draw game!'\r\n y_count,x_count = 3, 0\r\n start_maze = 0, 0\r\n \r\n DISP.fill(Colors.colors['BLACK'])\r\n # Maze get blit on the Screen of the game\r\n DISP.blit(self.maze, start_maze) \r\n # Draw the numer of Pac-Mans's life\r\n self.draw_pacman_life(life_counter, DISP) \r\n # Draw the actual level on the screen\r\n self.draw_level(DISP, level)\r\n for y in self.look_up_table[3 : -2]: #< y is a list of one row from the maze\r\n for x in y: #< x is a string that is decoded as already explained\r\n pos = [self.grid_size * x_count, self.grid_size * y_count]\r\n # Set reference position in the middle of one square\r\n pos[0] += self.grid_size // 2\r\n pos[1] += self.grid_size // 2\r\n x_count += 1\r\n # Check if x is a Dot or an Energizer\r\n if x != None and (x[0] == 'p' or x == 'e'):\r\n radius = 6\r\n if x == 'e':\r\n radius = self.grid_size // 2 - 4\r\n pg.draw.circle(DISP, Colors.colors['POINTS'], tuple(pos), radius)\r\n elif x[0] == 'p':\r\n pg.draw.rect(DISP, Colors.colors['POINTS'], ((pos[0] - radius // 2, pos[1] - radius // 2), (radius, radius)))\r\n \r\n \r\n y_count += 1\r\n x_count = 0", "def draw1 ( self ,\n dataset = None ,\n nbins = 100 ,\n silent = True ,\n in_range = None ,\n args = () , **kwargs ) :\n if in_range and isinstance ( in_range , tuple ) and 2 == len ( in_range ) :\n range_name = 'aux2_rng2_%s' % self.name \n with rooSilent ( 3 ) : \n self.yvar.setRange ( range_name , in_range[0] , in_range[1] )\n if dataset:\n dataset.get_var(self.yvar.GetName()).setRange ( range_name , in_range[0] , in_range[1] )\n\n in_range = range_name \n\n return self.draw ( drawvar = self.xvar , \n dataset = dataset ,\n nbins = nbins ,\n ybins = 20 , ## fake \n silent = silent ,\n in_range = in_range ,\n args = args , **kwargs )", "def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row", "def draw(self, base, level):\n\n a = base.a\n b = base.b\n\n if level > 0:\n delta = base.b - base.a\n px = a.x + delta.x / 3\n py = a.y + delta.y / 3\n rx = a.x + 2 * delta.x / 3\n ry = a.y + 2 * delta.y / 3\n p = Point(px, py)\n r = Point(rx, ry)\n q = Point(rx, ry)\n q.rotate_deg(60, p)\n self.draw(Line(a,p), level-1)\n self.draw(Line(p,q), level-1)\n self.draw(Line(q,r), level-1)\n self.draw(Line(r,b), level-1)\n else:\n self.container.window.create_line(a.x, a.y, b.x, b.y)", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p)\n for p in self.shape_parameters]\n if parameter_name in self.shape_parameters.keys():\n bounds = []\n for ll in self.likelihood_list:\n if parameter_name in ll.shape_parameters.keys():\n bounds.append(ll.get_bounds(parameter_name))\n bounds = np.array(bounds)\n ret= np.max(bounds[:,0]), np.min(bounds[:,1])\n if ret[1] <= ret[0]:\n raise InvalidParameterSpecification(\"lower bound %s higher than upper bound!\" % parameter_name)\n return ret\n\n elif parameter_name.endswith('_rate_multiplier'):\n return 0, float('inf')\n else:\n raise InvalidParameter(\"Non-existing parameter %s\" % parameter_name)", "def setActiveBounded(self, bounds=None, status=1): \n if bounds==None or len(bounds)!=4:\n return\n x1,y1,x2,y2 = bounds\n if x1>x2 :\n temp=x1;x1=x2;x2=temp\n if y1>y2:\n temp=y1;y1=y2;y2=temp\n for i in range(0,self.length()):\n x=self.x[i]; y=self.y[i]\n if (x>x1 and x<x2) and (y>y1 and y<y2):\n self.active[i]= status \n return", "def bounds(self):\n return (\n self.x, self.y,\n self.x, self.y\n )", "def bounds(self):\n frame_ = self.to_frame().total_bounds.flatten().tolist()\n return BBox(\n left=frame_[0], bottom=frame_[1], right=frame_[2], top=frame_[3]\n )", "def normalize_bounds(self, bounds):\n scaled_bounds = []\n scalings = []\n intercepts = []\n \n non_fixed_params = []\n \n print(self.device)\n \n for name, domain in self.bounds.items():\n # Get any fixed parmeters\n if type(domain) == int or type(domain) == float:\n # Take note\n self.fixed_parameters.append(name)\n\n # Free parameters\n elif type(domain) == tuple:\n # Bookkeeping\n self.free_parameters.append(name)\n\n # Get scaling\n lower_bound = min(domain)\n upper_bound = max(domain)\n scale = upper_bound - lower_bound\n\n # Transform to [0, 1] domain\n #scaled_bound = {'name': name, 'type': 'continuous', 'domain': (0., 1.)} #torch.adjustment required\n non_fixed_params.append(name)\n \n # Store\n #scaled_bounds.append(scaled_bound)\n scalings.append(scale)\n intercepts.append(lower_bound)\n else:\n raise ValueError(\"Domain bounds not understood\")\n \n n_hyperparams = len(non_fixed_params)\n \n scaled_bounds = cat([zeros(1,n_hyperparams, device = self.device), \n ones(1, n_hyperparams, device = self.device)], 0)\n return scaled_bounds, tensor(scalings, device = self.device, requires_grad = False), tensor(intercepts, device = self.device, requires_grad = False) #torch.adjustment required", "def show_bounds(\n self,\n mesh=None,\n bounds=None,\n axes_ranges=None,\n show_xaxis=True,\n show_yaxis=True,\n show_zaxis=True,\n show_xlabels=True,\n show_ylabels=True,\n show_zlabels=True,\n bold=True,\n font_size=None,\n font_family=None,\n color=None,\n xtitle='X Axis',\n ytitle='Y Axis',\n ztitle='Z Axis',\n n_xlabels=5,\n n_ylabels=5,\n n_zlabels=5,\n use_2d=False,\n grid=None,\n location='closest',\n ticks=None,\n all_edges=False,\n corner_factor=0.5,\n fmt=None,\n minor_ticks=False,\n padding=0.0,\n use_3d_text=True,\n render=None,\n **kwargs,\n ):\n self.remove_bounds_axes()\n\n if font_family is None:\n font_family = self._theme.font.family\n if font_size is None:\n font_size = self._theme.font.size\n if fmt is None:\n fmt = self._theme.font.fmt\n if fmt is None:\n fmt = '%.1f' # fallback\n\n if 'xlabel' in kwargs: # pragma: no cover\n xtitle = kwargs.pop('xlabel')\n warnings.warn(\n \"`xlabel` is deprecated. Use `xtitle` instead.\",\n PyVistaDeprecationWarning,\n )\n if 'ylabel' in kwargs: # pragma: no cover\n ytitle = kwargs.pop('ylabel')\n warnings.warn(\n \"`ylabel` is deprecated. Use `ytitle` instead.\",\n PyVistaDeprecationWarning,\n )\n if 'zlabel' in kwargs: # pragma: no cover\n ztitle = kwargs.pop('zlabel')\n warnings.warn(\n \"`zlabel` is deprecated. Use `ztitle` instead.\",\n PyVistaDeprecationWarning,\n )\n assert_empty_kwargs(**kwargs)\n\n color = Color(color, default_color=self._theme.font.color)\n\n if mesh is None and bounds is None:\n # Use the bounds of all data in the rendering window\n bounds = np.array(self.bounds)\n elif bounds is None:\n # otherwise, use the bounds of the mesh (if available)\n bounds = np.array(mesh.bounds)\n else:\n bounds = np.asanyarray(bounds, dtype=float)\n\n # create actor\n cube_axes_actor = pyvista.CubeAxesActor(\n self.camera,\n minor_ticks=minor_ticks,\n tick_location=ticks,\n x_title=xtitle,\n y_title=ytitle,\n z_title=ztitle,\n x_axis_visibility=show_xaxis,\n y_axis_visibility=show_yaxis,\n z_axis_visibility=show_zaxis,\n x_label_format=fmt,\n y_label_format=fmt,\n z_label_format=fmt,\n x_label_visibility=show_xlabels,\n y_label_visibility=show_ylabels,\n z_label_visibility=show_zlabels,\n n_xlabels=n_xlabels,\n n_ylabels=n_ylabels,\n n_zlabels=n_zlabels,\n )\n\n cube_axes_actor.use_2d_mode = use_2d or not np.allclose(self.scale, [1.0, 1.0, 1.0])\n\n if grid:\n grid = 'back' if grid is True else grid\n if not isinstance(grid, str):\n raise TypeError(f'`grid` must be a str, not {type(grid)}')\n grid = grid.lower()\n if grid in ('front', 'frontface'):\n cube_axes_actor.SetGridLineLocation(cube_axes_actor.VTK_GRID_LINES_CLOSEST)\n elif grid in ('both', 'all'):\n cube_axes_actor.SetGridLineLocation(cube_axes_actor.VTK_GRID_LINES_ALL)\n elif grid in ('back', True):\n cube_axes_actor.SetGridLineLocation(cube_axes_actor.VTK_GRID_LINES_FURTHEST)\n else:\n raise ValueError(f'`grid` must be either \"front\", \"back, or, \"all\", not {grid}')\n # Only show user desired grid lines\n cube_axes_actor.SetDrawXGridlines(show_xaxis)\n cube_axes_actor.SetDrawYGridlines(show_yaxis)\n cube_axes_actor.SetDrawZGridlines(show_zaxis)\n # Set the colors\n cube_axes_actor.GetXAxesGridlinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetYAxesGridlinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetZAxesGridlinesProperty().SetColor(color.float_rgb)\n\n if isinstance(location, str):\n location = location.lower()\n if location in ('all'):\n cube_axes_actor.SetFlyModeToStaticEdges()\n elif location in ('origin'):\n cube_axes_actor.SetFlyModeToStaticTriad()\n elif location in ('outer'):\n cube_axes_actor.SetFlyModeToOuterEdges()\n elif location in ('default', 'closest', 'front'):\n cube_axes_actor.SetFlyModeToClosestTriad()\n elif location in ('furthest', 'back'):\n cube_axes_actor.SetFlyModeToFurthestTriad()\n else:\n raise ValueError(\n f'Value of location (\"{location}\") should be either \"all\", \"origin\",'\n ' \"outer\", \"default\", \"closest\", \"front\", \"furthest\", or \"back\".'\n )\n elif location is not None:\n raise TypeError('location must be a string')\n\n if isinstance(padding, (int, float)) and 0.0 <= padding < 1.0:\n if not np.any(np.abs(bounds) == np.inf):\n cushion = (\n np.array(\n [\n np.abs(bounds[1] - bounds[0]),\n np.abs(bounds[3] - bounds[2]),\n np.abs(bounds[5] - bounds[4]),\n ]\n )\n * padding\n )\n bounds[::2] -= cushion\n bounds[1::2] += cushion\n else:\n raise ValueError(f'padding ({padding}) not understood. Must be float between 0 and 1')\n cube_axes_actor.bounds = bounds\n\n # set axes ranges if input\n if axes_ranges is not None:\n if isinstance(axes_ranges, (collections.abc.Sequence, np.ndarray)):\n axes_ranges = np.asanyarray(axes_ranges)\n else:\n raise TypeError('Input axes_ranges must be a numeric sequence.')\n\n if not np.issubdtype(axes_ranges.dtype, np.number):\n raise TypeError('All of the elements of axes_ranges must be numbers.')\n\n # set the axes ranges\n if axes_ranges.shape != (6,):\n raise ValueError(\n '`axes_ranges` must be passed as a [xmin, xmax, ymin, ymax, zmin, zmax] sequence.'\n )\n\n cube_axes_actor.x_axis_range = axes_ranges[0], axes_ranges[1]\n cube_axes_actor.y_axis_range = axes_ranges[2], axes_ranges[3]\n cube_axes_actor.z_axis_range = axes_ranges[4], axes_ranges[5]\n\n # set color\n cube_axes_actor.GetXAxesLinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetYAxesLinesProperty().SetColor(color.float_rgb)\n cube_axes_actor.GetZAxesLinesProperty().SetColor(color.float_rgb)\n\n # set font\n font_family = parse_font_family(font_family)\n\n if not use_3d_text or not np.allclose(self.scale, [1.0, 1.0, 1.0]):\n use_3d_text = False\n cube_axes_actor.SetUseTextActor3D(False)\n else:\n cube_axes_actor.SetUseTextActor3D(True)\n\n props = [\n cube_axes_actor.GetTitleTextProperty(0),\n cube_axes_actor.GetTitleTextProperty(1),\n cube_axes_actor.GetTitleTextProperty(2),\n cube_axes_actor.GetLabelTextProperty(0),\n cube_axes_actor.GetLabelTextProperty(1),\n cube_axes_actor.GetLabelTextProperty(2),\n ]\n\n for prop in props:\n prop.SetColor(color.float_rgb)\n prop.SetFontFamily(font_family)\n prop.SetBold(bold)\n\n # this merely makes the font sharper\n if use_3d_text:\n prop.SetFontSize(50)\n\n # Note: font_size does nothing as a property, use SetScreenSize instead\n # Here, we normalize relative to 12 to give the user an illusion of\n # just changing the font size relative to a font size of 12. 10 is used\n # here since it's the default \"screen size\".\n cube_axes_actor.SetScreenSize(font_size / 12 * 10.0)\n\n self.add_actor(cube_axes_actor, reset_camera=False, pickable=False, render=render)\n self.cube_axes_actor = cube_axes_actor\n\n if all_edges:\n self.add_bounding_box(color=color, corner_factor=corner_factor)\n\n self.Modified()\n return cube_axes_actor", "def plot_stability_region(self,N=200,color='r',filled=True,bounds=None,\n plotroots=False,alpha=1.,scalefac=1.,\n to_file=False,longtitle=True,fignum=None):\n import nodepy.stability_function as stability_function\n import matplotlib.pyplot as plt\n\n p,q=self.__num__().stability_function(mode='float')\n\n fig = stability_function.plot_stability_region(p,q,N,color,filled,\n bounds,plotroots,alpha,scalefac,fignum)\n\n p,q = self.embedded_method.__num__().stability_function(mode='float')\n stability_function.plot_stability_region(p,q,N,color='k',filled=False,bounds=bounds,\n plotroots=plotroots,alpha=alpha,scalefac=scalefac,fignum=fig.number)\n\n ax = fig.get_axes()\n if longtitle:\n plt.setp(ax,title='Absolute Stability Region for '+self.name)\n else:\n plt.setp(ax,title='Stability region')\n if to_file:\n plt.savefig(to_file, transparent=True, bbox_inches='tight', pad_inches=0.3)\n else:\n plt.draw()\n return fig", "def plot_stability_region(self,N=200,color='r',filled=True,bounds=None,\n plotroots=False,alpha=1.,scalefac=1.,\n to_file=False, longtitle=True,fignum=None):\n import nodepy.stability_function as stability_function\n import matplotlib.pyplot as plt\n\n p,q=self.__num__().stability_function(mode='float')\n\n fig = stability_function.plot_stability_region(p,q,N,color,filled,bounds,\n plotroots,alpha,scalefac,fignum)\n\n ax = fig.get_axes()\n if longtitle:\n plt.setp(ax,title='Absolute Stability Region for '+self.name)\n else:\n plt.setp(ax,title='Stability region')\n if to_file:\n plt.savefig(to_file, transparent=True, bbox_inches='tight', pad_inches=0.3)\n else:\n plt.draw()\n return fig", "def plot_roi_bounds(bounds,color='w',label=False):\n X1,X2,Y1,Y2=bounds\n plt.plot([X1,X2,X2,X1,X1],[Y1,Y1,Y2,Y2,Y1],'-',color=color)\n if label:\n plt.text(X1,Y1-3,label,verticalalignment='bottom',color=color,\n backgroundcolor=(0,0,0,.5))\n plt.margins(0,0)", "def appendFromBounds(self, label=None, p1=None, p2=None, n=None):\n\n di = (p2 - p1) / n\n\n p = p1\n self.appendUpper(label=label, p=p)\n for i in range(1, n):\n p += di\n self.appendUpper(label=label, p=p)\n self.appendUpper(label=label, p=p2)", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p) for p in self.shape_parameters.keys()]\n if parameter_name in self.shape_parameters:\n anchor_settings = list(self.shape_parameters[parameter_name][0].keys())\n return min(anchor_settings), max(anchor_settings)\n elif parameter_name.endswith('_rate_multiplier'):\n for source_name, allow_negative in zip(self.source_name_list,self.source_allowed_negative):\n if parameter_name.startswith(source_name) and allow_negative==True:\n return float('-inf'), float('inf')\n return 0, float('inf')\n else:\n raise InvalidParameter(\"Non-existing parameter %s\" % parameter_name)", "def _compute_bounds(self, axis, view):\n return None", "def only_bounds(must_print):\n\n #Extracting input.\n input = find_input()\n\n #Running the experiment.\n result = bound.execute_script(input, must_print, True)[2:]\n #result = bound2.execute_script(input, must_print, True)[2:]\n\n #Storing output.\n store_output(result) #result = [P_BOUND, R_BOUND]", "def _cell_bounds_xy(self, x, y, dx = None):\n\t\tif dx is None:\n\t\t\tlev = bhpix.get_pixel_level(x, y)\n\t\t\tdx = bhpix.pix_size(lev)\n\t\t\t##dx = bhpix.pix_size(self.level)\n\n\t\tbounds = Polygon.Shapes.Rectangle(dx)\n\t\tbounds.shift(x - 0.5*dx, y - 0.5*dx);\n\n\t\tif fabs(fabs(x) - fabs(y)) == 0.5:\n\t\t\t# If it's a \"halfpixel\", return a triangle\n\t\t\t# by clipping agains the sky\n\t\t\tbounds &= bn.ALLSKY\n\n\t\treturn bounds", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def get_bounds():\n lower_bound = 0\n upper_bound = input(\"Please enter a whole number: \")\n domain = [lower_bound, upper_bound]\n return domain", "def _calculate_grid_parameters(limits, dx:float=0.01) -> Grid:\n cardinality = int((limits[-1] - limits[0]) / dx)\n grid_values = np.linspace(limits[0], limits[1], cardinality)\n resolution = (limits[-1] - limits[0]) / (cardinality - 1)\n hist_bin_edges = np.histogram_bin_edges((limits[0]-resolution/2, limits[1]+resolution/2), cardinality)\n\n return Grid(grid_values, cardinality, resolution, hist_bin_edges, limits)", "def Bounds(self):\n assert self.points is not None\n\n if self.points.shape[1] == 3:\n bounds = np.array([[np.min(self.points[:,0]),\n np.min(self.points[:,1]),\n np.min(self.points[:,2])],\n [np.max(self.points[:,0]),\n np.max(self.points[:,1]),\n np.max(self.points[:,2])]])\n makezero(bounds)\n return bounds\n elif self.points.shape[1] == 2:\n bounds = np.array([[np.min(self.points[:,0]),\n np.min(self.points[:,1])],\n [np.max(self.points[:,0]),\n np.max(self.points[:,1])]])\n makezero(bounds)\n return bounds\n elif self.points.shape[1] == 1:\n bounds = np.array([[np.min(self.points[:,0])],\n [np.max(self.points[:,0])]])\n makezero(bounds)\n return bounds\n else:\n raise ValueError(\"Invalid dimension for mesh coordinates\")", "def drawRect(self,color,x1,y1,x2,y2):\n if not self.changed: self.edit()\n wmap = 512\n mapd = self.mapd\n for y in xrange(y1,y2):\n ymoff = wmap*y\n for x in xrange(x1,x2):\n mapd[x+ymoff] = color", "def bounds(sceneid):\n scene_params = _sentinel_parse_scene_id(sceneid)\n return {\"sceneid\": sceneid, \"bounds\": list(_get_bounds(scene_params))}", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def bounds(sceneid):\n scene_params = _sentinel_parse_scene_id(sceneid)\n preview_file = os.path.join(\n scene_params[\"aws_bucket\"],\n scene_params[\"aws_prefix\"],\n scene_params[\"preview_file\"],\n )\n with rasterio.open(preview_file) as src:\n wgs_bounds = transform_bounds(\n *[src.crs, \"epsg:4326\"] + list(src.bounds), densify_pts=21\n )\n\n info = {\"sceneid\": sceneid}\n info[\"bounds\"] = list(wgs_bounds)\n\n return info", "def bounds(self) -> typing.List[float]:\n raise NotImplementedError()", "def _buildGridPoints(self):\n self.spacings = []\n for level in xrange(self.depth):\n levelSpacings = []\n refLevel = level + 1\n level = 2**level\n axisData = []\n for axis in self.size:\n spacing = axis / (level+1)\n levelSpacings.append(spacing)\n axisData.append([gridValue*spacing for gridValue in xrange(1, level+1)])\n pointList = [((i, j, k), np.array([axisData[0][i], axisData[1][j], axisData[2][k]]))\n for i in xrange(level)\n for j in xrange(level)\n for k in xrange(level)]\n self.grid[refLevel] = {point[0]: point[1] for point in pointList}\n self.spacings.append(levelSpacings)", "def bounds(self) -> Tensor:\n return torch.cat([self.mins, self.mins + self.ranges], dim=-2)", "def axis_bounds(pc, axis):\n try:\n bounds = pc.bounds\n except AttributeError:\n bounds = pc\n \n return tuple([getattr(bounds, b + axis) for b in ('min', 'max')])", "def parameter_range(p, v):\n if p.endswith('_pd_n'):\n return [0, 100]\n elif p.endswith('_pd_nsigma'):\n return [0, 5]\n elif p.endswith('_pd_type'):\n return v\n elif any(s in p for s in ('theta', 'phi', 'psi')):\n # orientation in [-180,180], orientation pd in [0,45]\n if p.endswith('_pd'):\n return [0, 45]\n else:\n return [-180, 180]\n elif 'sld' in p:\n return [-0.5, 10]\n elif p.endswith('_pd'):\n return [0, 1]\n elif p == 'background':\n return [0, 10]\n elif p == 'scale':\n return [0, 1e3]\n elif p == 'case_num':\n # RPA hack\n return [0, 10]\n elif v < 0:\n # Kxy parameters in rpa model can be negative\n return [2*v, -2*v]\n else:\n return [0, (2*v if v > 0 else 1)]" ]
[ "0.7272382", "0.62937915", "0.6163484", "0.6021445", "0.5926308", "0.5803722", "0.57488054", "0.57488054", "0.57488054", "0.57488054", "0.57488054", "0.57488054", "0.57488054", "0.57488054", "0.5734198", "0.5685604", "0.56679", "0.5640129", "0.5524533", "0.55184925", "0.5516188", "0.5504665", "0.55014485", "0.55010545", "0.5489884", "0.54747576", "0.54734606", "0.5466856", "0.5441429", "0.54344916", "0.5365764", "0.5342349", "0.5341416", "0.53335613", "0.5312844", "0.52995247", "0.52612835", "0.52571434", "0.52467376", "0.5243617", "0.52433187", "0.5237495", "0.52245605", "0.52090174", "0.5203684", "0.5201018", "0.5201018", "0.51803505", "0.51528376", "0.5140849", "0.51398444", "0.5137938", "0.51354253", "0.5128363", "0.51134163", "0.51131606", "0.5076051", "0.507539", "0.50650823", "0.5063733", "0.50550354", "0.50518155", "0.5051204", "0.50251585", "0.5014299", "0.501306", "0.5009739", "0.50017786", "0.4988248", "0.4983299", "0.49830696", "0.49793476", "0.4978896", "0.4973642", "0.49526092", "0.49362203", "0.49342966", "0.49153107", "0.49102238", "0.49081203", "0.49007565", "0.48892725", "0.4888351", "0.48761335", "0.4862192", "0.4855811", "0.48447272", "0.4844065", "0.483848", "0.4823888", "0.48134762", "0.48129278", "0.4811136", "0.479263", "0.47921875", "0.4791333", "0.47875544", "0.47859976", "0.47859237", "0.47844416" ]
0.48262188
89
Convert the passed values to colormap.
def get_colormap(level_values: np.ndarray) -> np.ndarray: color_dimension = level_values # change to desired fourth dimension color_min, color_max = color_dimension.min(), color_dimension.max() norm = colors.Normalize(color_min, color_max) m = plt.cm.ScalarMappable(norm=norm, cmap="Spectral_r") m.set_array([]) face_colors = m.to_rgba(color_dimension) return face_colors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __value2color(self, v):\n if np.isscalar(v):\n r = self.cmap(self.norm(np.asarray([v])))\n else:\n r = self.cmap(self.norm(v))\n return r.flatten()", "def get_colormap(values, cmap='jet', colorbar=True, log=False):\n by_value = not isinstance(values, int)\n if not by_value:\n colorbar = False\n if by_value:\n cmap = plt.get_cmap(cmap)\n if log:\n colors = [cmap(x) for x in tools.maths.normalise(np.log10(values))]\n else:\n colors = [cmap(x) for x in tools.maths.normalise(np.array(values))]\n else:\n cmap = plt.get_cmap(cmap, values)\n colors = [cmap(x) for x in range(values)]\n if colorbar:\n if log:\n norm = mpl.colors.LogNorm(vmin=min(values), vmax=max(values))\n else:\n norm = mpl.colors.Normalize(vmin=min(values), vmax=max(values))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n sm.set_array([])\n if isinstance(colorbar, str):\n label = colorbar\n else:\n label = ''\n plt.colorbar(sm, label=label)\n return colors", "def map_values_to_color(self, df, col_name):\n # map values to colors in hex via\n # creating a hex Look up table table and apply the normalized data to it\n norm = mcolors.Normalize(vmin=np.nanmin(df[col_name].values),\n vmax=np.nanmax(df[col_name].values), clip=True)\n # alternative way of generating hex values\n # mapper = plt.cm.ScalarMappable(norm=norm, cmap=plt.cm.viridis)\n # a = mapper.to_rgba(df[col_name])\n # color_col_name = col_name + '_color'\n # df[color_col_name] = np.apply_along_axis(mcolors.to_hex, 1, a)\n\n lut = plt.cm.viridis(np.linspace(0,1,256))\n lut = np.apply_along_axis(mcolors.to_hex, 1, lut)\n a = (norm(df[col_name].values)*255).astype(np.int16)\n color_col_name = col_name + '_color'\n df[color_col_name] = lut[a]\n return df", "def norm_cmap(values, cmap, vmin=None, vmax=None):\n mn = vmin or min(values)\n mx = vmax or max(values)\n norm = Normalize(vmin=mn, vmax=mx)\n n_cmap = plt.cm.ScalarMappable(norm=norm, cmap=cmap)\n\n rgb_colors = [n_cmap.to_rgba(value) for value in values]\n\n return n_cmap, rgb_colors", "def clr_to_cmap(clr, val=None):\n v = np.loadtxt(clr)\n assert np.all(np.arange(v.shape[0]) == v[:,0])\n if not val is None:\n # subset the colors to match with min/max\n # numpy arr\n #mn = val.amin()\n #mx = val.amax()\n # masked arr\n mn = val.min()\n mx = val.max()\n v = v[mn:(mx+1),:]\n v = v[:,1:]\n cm = mpl.colors.ListedColormap(v / 255)\n return cm", "def getColorMap(colors):\n # Normalise RGBs\n norm_colors = []\n for color in colors:\n norm_colors.append([val / 255. for val in color])\n # create color map\n cmap = cols.ListedColormap(norm_colors)\n\n return cmap", "def color_map(val):\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def colors_to_cmap(colors):\n colors = np.asarray(colors)\n if colors.shape[1] == 3:\n colors = np.hstack((colors, np.ones((len(colors),1))))\n steps = (0.5 + np.asarray(range(len(colors)-1), dtype=np.float))/(len(colors) - 1)\n return matplotlib.colors.LinearSegmentedColormap(\n 'auto_cmap',\n {clrname: ([(0, col[0], col[0])] + \n [(step, c0, c1) for (step,c0,c1) in zip(steps, col[:-1], col[1:])] + \n [(1, col[-1], col[-1])])\n for (clridx,clrname) in enumerate(['red', 'green', 'blue', 'alpha'])\n for col in [colors[:,clridx]]},\n N=len(colors))", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def colormap(self):\n palette = [(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),\n (111, 74, 0), (81, 0, 81), (128, 64, 128), (244, 35, 232),\n (250, 170, 160), (230, 150, 140), (70, 70, 70),\n (102, 102, 156), (190, 153, 153), (180, 165, 180),\n (150, 100, 100), (150, 120, 90), (153, 153, 153),\n (153, 153, 153), (250, 170, 30), (220, 220, 0),\n (107, 142, 35), (152, 251, 152), (70, 130, 180),\n (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),\n (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100),\n (0, 0, 230), (119, 11, 32), (0, 0, 142)]\n\n num_colors = self[0][1].shape[-1]\n colormap = np.zeros((num_colors, 3), dtype=int)\n for i in range(num_colors):\n colormap[i, ...] = palette[self._update_labels_dict[i]]\n return colormap", "def colormap(cats, mplmap='auto', categorical=None):\n # Should automatically choose the right colormaps for:\n # categorical data\n # sequential data (low, high important)\n # diverging data (low, mid, high important)\n global DEF_SEQUENTIAL\n from matplotlib import cm\n\n if hasattr(cm, 'inferno'):\n DEF_SEQUENTIAL = 'inferno'\n else:\n DEF_SEQUENTIAL = 'BrBG'\n\n # strip units\n units = None # TODO: build a color bar with units\n if hasattr(cats[0], 'magnitude'):\n arr = u.array(cats)\n units = arr.units\n cats = arr.magnitude\n is_categorical = False\n else:\n is_categorical = not isinstance(cats[0], (float, int))\n\n if categorical is not None:\n is_categorical = categorical\n\n if is_categorical:\n values = _map_categories_to_ints(cats)\n if mplmap == 'auto':\n mplmap = DEF_CATEGORICAL\n else:\n values = np.array(list(map(float, cats)))\n if mplmap == 'auto':\n mplmap = DEF_SEQUENTIAL\n\n rgb = _cmap_to_rgb(mplmap, values)\n hexcolors = [webcolors.rgb_to_hex(np.array(c)) for c in rgb]\n return hexcolors", "def colorify(data, vmin=None, vmax=None, cmap=plt.cm.Spectral):\n import matplotlib.colors as colors\n\n _vmin = vmin or min(data)\n _vmax = vmax or max(data)\n cNorm = colors.normalize(vmin=_vmin, vmax=_vmax)\n\n scalarMap = plt.cm.ScalarMappable(norm=cNorm, cmap=cmap)\n colors = map(scalarMap.to_rgba, data)\n return colors, scalarMap", "def cmap(num,cmap = plt.cm.gist_earth_r):\n return cmap(np.linspace(0, 1, num))", "def colorify(data, vmin=None, vmax=None, cmap=plt.cm.Spectral):\n try:\n from matplotlib.colors import Normalize\n except ImportError:\n # old mpl\n\n from matplotlib.colors import normalize as Normalize\n\n _vmin = vmin or min(data)\n _vmax = vmax or max(data)\n cNorm = Normalize(vmin=_vmin, vmax=_vmax)\n\n scalarMap = plt.cm.ScalarMappable(norm=cNorm, cmap=cmap)\n try:\n colors = scalarMap.to_rgba(data)\n except:\n colors = list(map(scalarMap.to_rgba, data))\n return colors, scalarMap", "def generate_colormap(scale_range=(0.0, 1.0), hue_range=(0.8, 0.0),\n saturation_range=(1.0, 1.0), value_range=(0.8, 0.8),\n nan_color=(0.2, 0.2, 0.2, 1.0)):\n lookup_table = vtk.vtkLookupTable()\n lookup_table.SetRange(scale_range)\n\n lookup_table.SetHueRange(hue_range)\n lookup_table.SetSaturationRange(saturation_range)\n lookup_table.SetValueRange(value_range)\n lookup_table.SetNanColor(nan_color)\n lookup_table.Build()\n return lookup_table", "def colorManagementConvert(*args, toDisplaySpace: List[float, float, float]=None,\n **kwargs)->None:\n pass", "def colormapping(data, legend_kwargs: Dict = {}, ax=None):\n cmap, data_family = encodings.data_cmap(data)\n if ax is None:\n ax = plt.gca()\n if data_family == \"continuous\":\n norm = Normalize(vmin=data.min(), vmax=data.max())\n scalarmap = ScalarMappable(\n cmap=cmap,\n norm=norm,\n )\n fig = plt.gcf()\n fig.colorbar(scalarmap)\n else:\n labels = data.drop_duplicates().sort_values()\n cfunc = encodings.color_func(data)\n colors = labels.apply(cfunc)\n patchlist = []\n for color, label in zip(colors, labels):\n data_key = Patch(color=color, label=label)\n patchlist.append(data_key)\n kwargs = dict(\n loc=\"best\",\n ncol=int(len(labels) / 2),\n # bbox_to_anchor=(0.5, -0.05),\n )\n kwargs.update(legend_kwargs)\n legend = plt.legend(handles=patchlist, **kwargs)\n ax.add_artist(legend)", "def _linear_cmap(a, b):\n a = matplotlib.colors.colorConverter.to_rgb(a)\n b = matplotlib.colors.colorConverter.to_rgb(b)\n a_linear = _gamma_expand(a)\n b_linear = _gamma_expand(b)\n color_diff = a_linear - b_linear\n palette = (np.linspace(0, 1, 256).reshape((-1, 1))\n * color_diff.reshape((1, -1)))\n palette += b_linear\n palette = _gamma_compress(palette)\n return matplotlib.colors.ListedColormap(palette)", "def map_colors(colors, cmap=None, lut=None, mode='hexs', **norm_kw):\n modes = ['hexs', 'tuples', 'arrays']\n if mode not in modes:\n raise ValueError('mode must be one of %s, but got %s'\n % (modes, mode))\n if not isinstance(cmap, Colormap):\n cmap = cm.get_cmap(cmap, lut=lut)\n rgba_arrays = cmap(Normalize(**norm_kw)(colors))\n rgb_arrays = rgba_arrays[:, :-1] #without alpha\n if mode == 'arrays':\n return rgb_arrays\n elif mode == 'tuples':\n return list(imap(tuple, rgb_arrays))\n else: # mode == 'hexs':\n return list(imap(rgb2hex, rgb_arrays))", "def color_mapper(cmap: str, vmin: float=0, vmax: float=1, alpha: float=1):\n\n alpha = int(255 * alpha)\n\n cmap = plt.get_cmap(cmap)\n cNorm = Normalize(vmin=vmin, vmax=vmax)\n scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cmap)\n\n def mapper(value):\n \"\"\"\n This is the function that gets returned\n\n Parameters\n ----------\n value A number between vmin and vmax\n\n Returns\n -------\n An RGB color\n\n \"\"\"\n\n out = scalarMap.to_rgba(value)\n\n if isinstance(out, tuple):\n return tuple([255 * out[i] for i in range(3)] + [alpha])\n\n elif isinstance(out, np.ndarray):\n out[:, :-1] *= 255\n out[:, 3] = alpha\n return out\n\n return mapper", "def make_colormap(colors):\n#-------------------------\n from matplotlib.colors import LinearSegmentedColormap, ColorConverter\n from numpy import sort\n z = sort(colors.keys())\n n = len(z)\n z1 = min(z)\n zn = max(z)\n x0 = (z - z1) / (zn - z1)\n CC = ColorConverter()\n R = []\n G = []\n B = []\n for i in range(n):\n #i'th color at level z[i]:\n Ci = colors[z[i]]\n if type(Ci) == str:\n # a hex string of form '#ff0000' for example (for red)\n RGB = CC.to_rgb(Ci)\n else:\n # assume it's an RGB triple already:\n RGB = Ci\n R.append(RGB[0])\n G.append(RGB[1])\n B.append(RGB[2])\n cmap_dict = {}\n cmap_dict['red'] = [(x0[i],R[i],R[i]) for i in range(len(R))]\n cmap_dict['green'] = [(x0[i],G[i],G[i]) for i in range(len(G))]\n cmap_dict['blue'] = [(x0[i],B[i],B[i]) for i in range(len(B))]\n mymap = LinearSegmentedColormap('mymap',cmap_dict)\n return mymap", "def create_label_colormap():\n colormap = np.array([\n [128, 64, 128],\n [244, 35, 232],\n [ 70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [ 70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [ 0, 0, 142],\n [ 0, 0, 70],\n [ 0, 60, 100],\n [ 0, 80, 100],\n [ 0, 0, 230],\n [119, 11, 32],\n [ 0, 0, 0]], dtype=np.uint8)\n return colormap", "def get_mpl_colormap(self):\n return mpl.colors.ListedColormap(self.get_colors().astype(float) / 255.0)", "def create_colormap(seg_map):\n\tcolormap = np.zeros((256, 3), dtype=int)\n\tind = np.arange(256, dtype=int)\n\tfor shift in reversed(range(8)):\n\t\tfor channel in range(3):\n\t\t\tcolormap[:, channel] |= ((ind >> channel) & 1) << shift \n\t\tind >>= 3\n\treturn colormap[seg_map]", "def _create_colormap(self):\n\n max_count = max(self.data['rects'], key=lambda r: r['count'])['count']\n ranges_list = [\n [\n int(max_count/x[0])+1,\n int(max_count/x[1])+1\n ] for x in [[6, 3], [3, 2], [2, 1]]\n ]\n ranges_list = [[0, 1]] + [[1, ranges_list[0][0]]] + ranges_list\n\n self.data['colormap'] = [\n {\n 'fill': self._get_fill(\n i,\n range(\n ranges_list[i][0],\n ranges_list[i][1]\n )\n ),\n 'range': range(\n ranges_list[i][0],\n ranges_list[i][1]\n )\n } for i in range(0, 5)\n ]", "def _get_colors(num_colors):\n cmap = plt.get_cmap()\n return [cmap(1. * i / num_colors) for i in range(num_colors)]", "def display_cmap_color_range(cmap_style='rainbow'):\n cmap = plt.get_cmap(cmap_style)\n for c in range(256):\n plt.scatter([c], [0], s=500, c=cmap(c), lw=0)\n plt.show()", "def _map_to_2d_cmap(data1, data2, vmin, vmax, vmin2, vmax2, cmap):\n # load 2D cmap image\n cmap_directory = cortex.options.config.get(\"webgl\", \"colormaps\")\n cmap_image = plt.imread(os.path.join(cmap_directory, \"%s.png\" % cmap))\n\n # Normalize the data\n dim1 = np.clip(Normalize(vmin, vmax)(data1), 0, 1)\n dim2 = np.clip(1 - Normalize(vmin2, vmax2)(data2), 0, 1)\n\n # 2D indices of the data on the 2D cmap\n dim1 = np.round(dim1 * (cmap_image.shape[1] - 1))\n dim1 = np.nan_to_num(dim1).astype(np.uint32)\n dim2 = np.round(dim2 * (cmap_image.shape[0] - 1))\n dim2 = np.nan_to_num(dim2).astype(np.uint32)\n\n mapped_rgba = cmap_image[dim2.ravel(), dim1.ravel()]\n\n # Preserve nan values with alpha = 0\n nans = np.logical_or(np.isnan(data1), np.isnan(data2))\n mapped_rgba[nans, 3] = 0\n\n return mapped_rgba, cmap_image", "def c_map(val):\n return int(remap(val, -1, 1, 0, 255))", "def color(self, data):\n\n red = np.interp(data, self.range, self.r)\n blue = np.interp(data, self.range, self.b)\n green = np.interp(data, self.range, self.g)\n # Style plot to return a grey color when value is 'nan'\n red[np.isnan(red)] = 240\n blue[np.isnan(blue)] = 240\n green[np.isnan(green)] = 240\n colors = np.dstack([red.astype(np.uint8),\n green.astype(np.uint8),\n blue.astype(np.uint8),\n np.full_like(data, 255, dtype=np.uint8)])\n #return colors.view(dtype=np.uint32).reshape(data.shape)\n c=[]\n for i in range(len(data)):\n c.append([red[i],green[i],blue[i]])\n return c", "def create_colormap(color_list: Sequence[str], n_colors: int) -> NDArrayFloat:\n cmap = LinearSegmentedColormap.from_list(name=\"dummy_name\", colors=color_list)\n colorscale: NDArrayFloat = np.array(\n [cmap(k * 1 / n_colors) for k in range(n_colors)]\n )\n # ignore the 4th alpha channel\n return colorscale[:, :3]", "def _create_color_map(self, hue, data):\n if hue in self.categorical_columns:\n # adding suffix to column name to get unique str values\n factors = sorted(set(data[hue + self.categorical_suffix]))\n if len(factors) <= self._max_categories_internal_limit:\n cmap = factor_cmap(\n hue + self.categorical_suffix,\n palette=self._categorical_palette[len(factors)],\n factors=factors\n )\n else:\n # If there is too many categories, None is returned\n cmap = None\n else:\n values = data[hue]\n cmap = linear_cmap(\n hue,\n palette=self.plot_design.contrary_color_linear_palette,\n low=min(values),\n high=max(values)\n )\n\n return cmap", "def map_face2color(face, colormap, scale, vmin, vmax):\n if vmin >= vmax:\n raise exceptions.PlotlyError(\n \"Incorrect relation between vmin \"\n \"and vmax. The vmin value cannot be \"\n \"bigger than or equal to the value \"\n \"of vmax.\"\n )\n if len(colormap) == 1:\n # color each triangle face with the same color in colormap\n face_color = colormap[0]\n face_color = clrs.convert_to_RGB_255(face_color)\n face_color = clrs.label_rgb(face_color)\n return face_color\n if face == vmax:\n # pick last color in colormap\n face_color = colormap[-1]\n face_color = clrs.convert_to_RGB_255(face_color)\n face_color = clrs.label_rgb(face_color)\n return face_color\n else:\n if scale is None:\n # find the normalized distance t of a triangle face between\n # vmin and vmax where the distance is between 0 and 1\n t = (face - vmin) / float((vmax - vmin))\n low_color_index = int(t / (1.0 / (len(colormap) - 1)))\n\n face_color = clrs.find_intermediate_color(\n colormap[low_color_index],\n colormap[low_color_index + 1],\n t * (len(colormap) - 1) - low_color_index,\n )\n\n face_color = clrs.convert_to_RGB_255(face_color)\n face_color = clrs.label_rgb(face_color)\n else:\n # find the face color for a non-linearly interpolated scale\n t = (face - vmin) / float((vmax - vmin))\n\n low_color_index = 0\n for k in range(len(scale) - 1):\n if scale[k] <= t < scale[k + 1]:\n break\n low_color_index += 1\n\n low_scale_val = scale[low_color_index]\n high_scale_val = scale[low_color_index + 1]\n\n face_color = clrs.find_intermediate_color(\n colormap[low_color_index],\n colormap[low_color_index + 1],\n (t - low_scale_val) / (high_scale_val - low_scale_val),\n )\n\n face_color = clrs.convert_to_RGB_255(face_color)\n face_color = clrs.label_rgb(face_color)\n return face_color", "def colorscale_to_colors(colorscale):\n color_list = []\n for color in colorscale:\n color_list.append(color[1])\n return color_list", "def assigning_colors():\n rgb_colors = {}\n for name, hex in matplotlib.colors.cnames.items():\n color = []\n # So the values are from 0-255 and not 0-1\n for i in matplotlib.colors.to_rgb(hex):\n color.append(int(i * 255))\n\n color = tuple(color)\n rgb_colors[name] = color\n\n return rgb_colors", "def plot_cmap(cmap):\n gradient = np.linspace(0, 1, 256)\n gradient = np.vstack((gradient, gradient))\n pylab.imshow(gradient, aspect='auto', cmap=cmap)\n pylab.show()", "def get_colors(self):\n df = self.reindexed_dataframe()\n\n colormap = cm.get_cmap(self.cmap)\n norm = Normalize(vmin=df.min(), vmax=df.max())\n\n sm = cm.ScalarMappable(norm=norm, cmap=colormap)\n\n return ['rgba' + str(sm.to_rgba(m, bytes = True, alpha = 0.8)) if not np.isnan(m) else 'rgba(128,128,128,1)' for m in df.values]", "def get_density_cmap():\n # Add completely white color to Reds colormap in Matplotlib\n list_colors = plt.cm.datad['Reds']\n list_colors = list(list_colors)\n list_colors.insert(0, (1, 1, 1))\n list_colors.insert(0, (1, 1, 1))\n lscm = matplotlib.colors.LinearSegmentedColormap.from_list(\"my_Reds\", list_colors)\n return lscm", "def __init__(self, vmin, vmax, cmap=\"Blues\", kind=\"hex\"):\n assert vmin < vmax, \"vmin must be smaller than vmax\"\n assert kind.lower() in [\"hex\", \"rgba\"], 'kind must be one of [\"hex\", \"rgba\"]'\n self.vmin = vmin\n self.vmax = vmax\n self.cmap = cmap\n self.kind = kind\n\n # Normalize range of vals\n norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=True)\n self.mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)\n # def create_continuous_colormapper(vmin, vmax, cmap=\"Greys_r\"):", "def test_density_colormap(self):\n cmap = matplotlib.cm.get_cmap('density')\n np.testing.assert_allclose(cmap(0.0), [0.214, 0.152, 0.535, 1], atol=0.001)\n np.testing.assert_allclose(cmap(1.0), [0.988, 0.978, 0.042, 1], atol=0.001)", "def make_colormap(colormap = 'rainbow_r', bins = 256, add_alpha = True, invert_alpha = False, cmap_name = 'costum',\n discrete = False, return_cmap = False):\n \n if isinstance(colormap, str): # if input is string (so existent colormap)\n\n # get colormap\n cmap = cm.get_cmap(colormap)\n\n else: # is list of strings\n cvals = np.arange(len(colormap))\n norm = plt.Normalize(min(cvals),max(cvals))\n tuples = list(zip(map(norm,cvals), colormap))\n cmap = colors.LinearSegmentedColormap.from_list(\"\", tuples)\n \n if discrete == True: # if we want a discrete colormap from list\n cmap = colors.ListedColormap(colormap)\n bins = int(len(colormap))\n\n # convert into array\n cmap_array = cmap(range(bins))\n\n # reshape array for map\n new_map = []\n for i in range(cmap_array.shape[-1]):\n new_map.append(np.tile(cmap_array[...,i],(bins,1)))\n\n new_map = np.moveaxis(np.array(new_map), 0, -1)\n \n if add_alpha: \n # make alpha array\n if invert_alpha == True: # in case we want to invert alpha (y from 1 to 0 instead pf 0 to 1)\n _, alpha = np.meshgrid(np.linspace(0, 1, bins, endpoint=False), 1-np.linspace(0, 1, bins))\n else:\n _, alpha = np.meshgrid(np.linspace(0, 1, bins, endpoint=False), np.linspace(0, 1, bins, endpoint=False))\n\n # add alpha channel\n new_map[...,-1] = alpha\n cmap_ext = (0,1,0,1)\n else:\n new_map = new_map[:1,...].copy() \n cmap_ext = (0,100,0,1)\n \n fig = plt.figure(figsize=(1,1))\n ax = fig.add_axes([0,0,1,1])\n # plot \n plt.imshow(new_map,\n extent = cmap_ext,\n origin = 'lower')\n ax.axis('off')\n\n if add_alpha: \n rgb_fn = op.join(op.split(cortex.database.default_filestore)[\n 0], 'colormaps', cmap_name+'_alpha_bins_%d.png'%bins)\n else:\n rgb_fn = op.join(op.split(cortex.database.default_filestore)[\n 0], 'colormaps', cmap_name+'_bins_%d.png'%bins)\n #misc.imsave(rgb_fn, new_map)\n plt.savefig(rgb_fn, dpi = 200,transparent=True)\n\n if return_cmap:\n return cmap\n else:\n return rgb_fn", "def map_channels(self, map_function):\n return ScreenColor(map_function(self.red), map_function(self.green), map_function(self.blue))", "def to_color(arr,pmin=1,pmax=99.8,gamma=1.,colors=((0,1,0),(1,0,1),(0,1,1))):\n \n if not arr.dim in (2,3):\n raise ValueError(\"only 2d or 3d arrays supported\")\n \n if arr.ndim==2:\n arr=arr[np.newaxis]\n #get the grey channel\n ind_min=np.argmin(arr.shape)\n arr =np.moveaxis(arr,ind_min,0).astype(np.float32)\n \n out =np.zeros(arr.shape[1:]+(3,))\n \n eps=1.e-20\n \n if pmin>=0:\n mi=np.percentile(arr,pmin,axis=(1,2),keepdims=True)\n else:\n mi=0\n \n if pmax>=0:\n ma=np.percentile(arr,pmax,axis=(1,2),keepdims=True)\n else:\n ma=1.+eps\n \n arr_norm=(1.*arr-mi)/(ma-mi+eps)\n #RGB 3channel\n for i_stack,col_stack in enumerate(colors):\n if i_stack >= len(arr):\n break\n for j,c in enumerate(col_stack):\n out[...,j]+= c*arr_norm[i_stack]\n \n return np.clip(out,0,1)", "def mapper(value):\n\n out = scalarMap.to_rgba(value)\n\n if isinstance(out, tuple):\n return tuple([255 * out[i] for i in range(3)] + [alpha])\n\n elif isinstance(out, np.ndarray):\n out[:, :-1] *= 255\n out[:, 3] = alpha\n return out", "def _get_csi_colour_scheme():\n\n this_colour_map_object = pyplot.cm.Blues\n this_colour_norm_object = matplotlib.colors.BoundaryNorm(\n LEVELS_FOR_CSI_CONTOURS, this_colour_map_object.N)\n\n rgba_matrix = this_colour_map_object(this_colour_norm_object(\n LEVELS_FOR_CSI_CONTOURS\n ))\n\n colour_list = [\n rgba_matrix[i, ..., :-1] for i in range(rgba_matrix.shape[0])\n ]\n\n colour_map_object = matplotlib.colors.ListedColormap(colour_list)\n colour_map_object.set_under(numpy.full(3, 1.))\n colour_norm_object = matplotlib.colors.BoundaryNorm(\n LEVELS_FOR_CSI_CONTOURS, colour_map_object.N)\n\n return colour_map_object, colour_norm_object", "def get_color_map(n):\n jet = plt.get_cmap('jet')\n cNorm = colors.Normalize(vmin=0, vmax=n-1)\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)\n outmap = []\n for i in range(n):\n outmap.append( scalarMap.to_rgba(i) )\n return outmap", "def linkcolormap(self, linkcolors=\"viridis\"):\n\n if isinstance(linkcolors, list) and len(linkcolors) == self.n:\n # provided a list of color names\n return colors.ListedColormap(linkcolors)\n else:\n # assume it is a colormap name\n return cm.get_cmap(linkcolors, 6)", "def create_cityscapes_label_colormap():\r\n colormap = np.zeros((256, 3), dtype=np.uint8)\r\n colormap[0] = [128, 64, 128]\r\n colormap[1] = [244, 35, 232]\r\n colormap[2] = [70, 70, 70]\r\n colormap[3] = [102, 102, 156]\r\n colormap[4] = [190, 153, 153]\r\n colormap[5] = [153, 153, 153]\r\n colormap[6] = [250, 170, 30]\r\n colormap[7] = [220, 220, 0]\r\n colormap[8] = [107, 142, 35]\r\n colormap[9] = [152, 251, 152]\r\n colormap[10] = [70, 130, 180]\r\n colormap[11] = [220, 20, 60]\r\n colormap[12] = [255, 0, 0]\r\n colormap[13] = [0, 0, 142]\r\n colormap[14] = [0, 0, 70]\r\n colormap[15] = [0, 60, 100]\r\n colormap[16] = [0, 80, 100]\r\n colormap[17] = [0, 0, 230]\r\n colormap[18] = [119, 11, 32]\r\n return colormap", "def _get_cmap_colors(cmap, n, cc = 0, callfn = None):\r\n if callfn is None: callfn = _get_cmap_colors.__name__\r\n # sanity checks\r\n if not isinstance(cmap, str):\r\n raise TypeError(\"{0}: error: cmap must be a string matplotlib color \"\r\n \"map\".format(callfn))\r\n if not isinstance(n, int):\r\n raise TypeError(\"{0}: error: n must be a positive integer\"\r\n \"\".format(callfn))\r\n if n < 1:\r\n raise ValueError(\"{0}: error: int n must be positive\".format(callfn))\r\n if (not isinstance(cc, float)) and (not isinstance(cc, int)):\r\n raise TypeError(\"{0}: error: cc must be a float in range [0, 1)\"\r\n \"\".format(callfn))\r\n if (cc < 0) or (cc >= 1):\r\n raise ValueError(\"{0}: error: float cc outside range [0, 1)\"\r\n \"\".format(callfn))\r\n # take range [0.5 * cc, 1 - 0.5 * cc] and split it into n pieces; the\r\n # collected points are midpoints of each interval. reduces color contrast.\r\n colors = [0.5 * cc + (1 - cc) * (i + 0.5) / n for i in range(n)]\r\n # try to get the colormap\r\n try: cmap = getattr(cm, cmap)\r\n except AttributeError as ae:\r\n ae.args = [\"{0}: error: unknown color map \\\"{1}\\\"\".format(callfn, cmap)]\r\n raise ae\r\n # if cmap is not a Colormap, raise a TypeError\r\n if not isinstance(cmap, Colormap):\r\n raise TypeError(\"{0}: error: {1} is not a valid Colormap\"\r\n \"\".format(callfn, cmap))\r\n # retrieve colors using color points and return\r\n for i in range(n): colors[i] = cmap(colors[i])\r\n # return ListedColormap from colors\r\n return ListedColormap(colors, name = cmap.name + \"_listed\", N = n)", "def XYZ_to_RGB_matrix(self, value):\n\n if value is not None:\n value = np.asarray(value)\n self._XYZ_to_RGB_matrix = value", "def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)", "def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)", "def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)", "def get_cmap(n, name='jet'):\n return plt.cm.get_cmap(name, n)", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour,mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def color_vals(val, threshl=[0.15, 0.30, 0.50]):\n colormap = ['red', 'black', 'blue', 'green']\n color = colormap[-1]\n for i, thresh in enumerate(threshl):\n if val < thresh:\n color = colormap[i]\n break\n return 'color: %s' % color", "def create_range_map(points_xyz: NDArrayFloat) -> NDArrayByte:\n range = points_xyz[..., 2]\n range = np.round(range).astype(int)\n color = plt.get_cmap(\"turbo\")(np.arange(0, range.max() + 1))\n color = color[range]\n range_cmap: NDArrayByte = (color * 255.0).astype(np.uint8)\n return range_cmap", "def get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)", "def get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)", "def create_cycler_colors(color_scheme):\n cmap = cm.get_cmap(color_scheme) # PiYG\n cycler_colors = []\n\n for i in range(cmap.N):\n rgba = cmap(i)\n # rgb2hex accepts rgb or rgba\n cycler_colors.append(matplotlib.colors.rgb2hex(rgba)) \n \n return cycler_colors", "def get_colors(self):\n colors = [\"#244486\", \"#A6A6A6\", \"#B12122\"]\n cmap = LinearSegmentedColormap.from_list(\"mycmap\", colors)\n\n color_palette=[cmap(i) for i in np.linspace(0, 1, len(set(self.nodes_list)))]\n return dict(zip(list(set(self.nodes_list)), color_palette))", "def get_cmap_levels(colormap,levels):\n \n #Matplotlib colormap name\n if isinstance(colormap,str):\n cmap = mlib.cm.get_cmap(colormap)\n \n #User defined list of colors\n elif isinstance(colormap,list):\n cmap = mcolors.ListedColormap(colormap)\n \n #Dictionary\n elif isinstance(colormap,dict):\n cmap = make_colormap(colormap)\n \n #Otherwise, a cmap was passed\n else:\n cmap = colormap\n \n #Normalize colors relative to levels\n norm = mcolors.Normalize(vmin=0, vmax=len(levels)-1)\n \n #If more than 2 levels were passed, use those for the contour levels\n if len(levels) > 2:\n colors = cmap(norm(np.arange(len(levels)-1)+.5))\n cmap = mcolors.ListedColormap(colors)\n \n #Otherwise, create a list of colors based on levels\n else:\n colors = cmap(norm(np.linspace(0,1,256)))\n cmap = mcolors.LinearSegmentedColormap.from_list('my_colormap',colors)\n \n y0 = min(levels)\n y1 = max(levels)\n dy = (y1-y0)/16\n scalemag = int(np.round_(np.log(dy)/np.log(8),decimals=0))\n dy_scaled = dy*10**-scalemag\n dc = min([1,2,5,10], key=lambda x:abs(x-dy_scaled))\n dc = np.round_(dc*10**scalemag,decimals=-scalemag+1)\n c0 = np.round_(y0/dc,decimals=0)*dc\n c1 = np.round_(y1/dc,decimals=0)*dc\n levels = np.arange(c0,c1+dc*.5,dc)\n \n if scalemag > 0:\n levels = levels.astype(int)\n else:\n levels = np.round_(levels,decimals=-scalemag+1)\n\n #Return colormap and levels\n return cmap, levels", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour, mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (\n maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def example_SegmentedColorMapping(min_value, max_value):\n \n colmap1 = ColorMapper(\"red2\")\n colmap1.exponent = 0.7\n \n colmap2 = ColorMapper(\"green\")\n \n colmap3 = ColorMapper(\"green\")\n colmap3.invert = True\n \n colmap4 = ColorMapper(\"blue2\")\n colmap4.invert = True\n colmap4.exponent = 0.5\n \n colmap = SegmentedColorMapping([ (-4.0, -2.0, colmap1), (-2.0, 0.0, colmap2),\n (0.0, 2.0, colmap3), (2.0, 4.0, colmap4)],\n min_value, max_value)\n \n return colmap", "def _generate_mappable(\n self, mappable, values=None, *, orientation=None,\n locator=None, formatter=None, norm=None, norm_kw=None, rotation=None,\n):\n # A colormap instance\n # TODO: Pass remaining arguments through Colormap()? This is really\n # niche usage so maybe not necessary.\n orientation = _not_none(orientation, 'horizontal')\n if isinstance(mappable, mcolors.Colormap):\n # NOTE: 'Values' makes no sense if this is just a colormap. Just\n # use unique color for every segmentdata / colors color.\n cmap = mappable\n values = np.linspace(0, 1, cmap.N)\n\n # List of colors\n elif np.iterable(mappable) and all(\n isinstance(obj, str) or (np.iterable(obj) and len(obj) in (3, 4))\n for obj in mappable\n ):\n cmap = mcolors.ListedColormap(list(mappable), '_no_name')\n if values is None:\n values = np.arange(len(mappable))\n locator = _not_none(locator, values) # tick *all* values by default\n\n # List of artists\n # NOTE: Do not check for isinstance(Artist) in case it is an mpl collection\n elif np.iterable(mappable) and all(\n hasattr(obj, 'get_color') or hasattr(obj, 'get_facecolor')\n for obj in mappable\n ):\n # Generate colormap from colors and infer tick labels\n colors = []\n for obj in mappable:\n if hasattr(obj, 'get_color'):\n color = obj.get_color()\n else:\n color = obj.get_facecolor()\n if isinstance(color, np.ndarray):\n color = color.squeeze() # e.g. scatter plot\n if color.ndim != 1:\n raise ValueError(\n 'Cannot make colorbar from list of artists '\n f'with more than one color: {color!r}.'\n )\n colors.append(to_rgb(color))\n\n # Try to infer tick values and tick labels from Artist labels\n cmap = mcolors.ListedColormap(colors, '_no_name')\n if values is None:\n # Get object labels and values\n labels = []\n values = []\n for obj in mappable:\n label = _get_label(obj) # could be None\n try:\n value = float(label) # could be float(None)\n except (TypeError, ValueError):\n value = None\n labels.append(label)\n values.append(value)\n\n # Use default values if labels are non-numeric (numeric labels are\n # common when making on-the-fly colorbars). Try to use object labels\n # for ticks with default vertical rotation, like datetime axes.\n if any(value is None for value in values):\n values = np.arange(len(mappable))\n if formatter is None and any(label is not None for label in labels):\n formatter = labels # use these fixed values for ticks\n if orientation == 'horizontal':\n rotation = _not_none(rotation, 90)\n locator = _not_none(locator, values) # tick *all* values by default\n\n else:\n raise ValueError(\n 'Input mappable must be a matplotlib artist, '\n 'list of objects, list of colors, or colormap. '\n f'Got {mappable!r}.'\n )\n\n # Build ad hoc ScalarMappable object from colors\n if np.iterable(mappable) and len(values) != len(mappable):\n raise ValueError(\n f'Passed {len(values)} values, but only {len(mappable)} '\n f'objects or colors.'\n )\n norm, *_ = _build_discrete_norm(\n self,\n cmap=cmap,\n norm=norm,\n norm_kw=norm_kw,\n extend='neither',\n values=values,\n )\n mappable = mcm.ScalarMappable(norm, cmap)\n\n return mappable, rotation", "def get_colors(n, cmap=\"viridis\", start=0.0, stop=1.0, alpha=1.0, return_hex=False):\n colors = [cm.get_cmap(cmap)(x) for x in np.linspace(start, stop, n)]\n colors = [(r, g, b, alpha) for r, g, b, _ in colors]\n if return_hex:\n colors = _rgb_color_list_to_hex(colors)\n return colors", "def get_colors(nlevels, colormap=None):\n if colormap is None:\n from matplotlib.pyplot import cm\n colormap = cm.rainbow\n return colormap(np.linspace(0, 1, nlevels))", "def cmap_idl4():\n r=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 7 15 22 30 37 45 52 60 67 75 82 90 97 105 112 120 125 130 135 140 145 150 155 160 165 170 175 180 185 190 195 200 200 201 201 202 202 203 203 204 204 205 205 206 206 207 207 208 208 209 209 210 210 211 211 212 212 213 213 214 214 215 215 216 216 217 217 218 218 219 219 220 220 221 221 222 222 223 223 224 224 225 225 226 226 227 227 228 228 229 229 230 230 231 231 232 232 233 233 234 234 235 235 236 236 237 237 238 238 239 239 240 240 241 241 242 242 243 243 244 244 245 245 246 246 247 247 248 248 249 249 250 250 251 251 252 252 253 253 254 254 255 255\"\n g=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 6 9 12 15 18 21 25 28 31 34 37 40 43 46 50 53 56 59 62 65 68 71 75 78 81 84 87 90 93 96 100 103 106 109 112 115 118 121 125 128 131 134 137 140 143 146 150 150 150 150 150 150 150 150 150 150 150 150 150 150 150 150 150 149 148 148 147 146 146 145 145 144 143 143 142 141 141 140 140 137 135 132 130 127 125 122 120 117 115 112 110 107 105 102 100 93 87 81 75 68 62 56 50 43 37 31 25 18 12 6 0 2 4 6 9 11 13 16 18 20 23 25 27 29 32 34 36 39 41 43 46 48 50 53 55 57 59 62 64 66 69 71 73 76 78 80 83 85 87 89 92 94 96 99 101 103 106 108 110 113 115 117 119 122 124 126 129 131 133 136 138 140 142 145 147 149 152 154 156 159 161 163 166 168 170 172 175 177 179 182 184 186 189 191 193 196 198 200 202 205 207 209 212 214 216 219 221 223 226 228 230 232 235 237 239 242 244 246 249 251 253 255\"\n b=\"0 2 4 6 8 10 12 14 16 18 20 22 25 27 29 31 33 35 37 39 41 43 45 47 50 52 54 56 58 60 62 64 66 68 70 72 75 77 79 81 83 85 87 89 91 93 95 97 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 96 93 90 87 84 81 78 75 71 68 65 62 59 56 53 50 46 43 40 37 34 31 28 25 21 18 15 12 9 6 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\"\n rm = [tuple([i/255,]+[int(x)/255,]*2) for i,x in enumerate(r.split())]\n gm = [tuple([i/255,]+[int(x)/255,]*2) for i,x in enumerate(g.split())]\n bm = [tuple([i/255,]+[int(x)/255,]*2) for i,x in enumerate(b.split())]\n cdict = {'red':rm, 'green':gm, 'blue':bm}\n cmap = plt.matplotlib.colors.LinearSegmentedColormap('idl4',cdict,256)\n return cmap", "def scale(self, vmin=0.0, vmax=1.0, max_labels=10):\n return LinearColormap(\n self.colors,\n index=[\n vmin + (vmax - vmin) * (x - self.vmin) * 1.0 / (self.vmax - self.vmin)\n for x in self.index\n ], # noqa\n vmin=vmin,\n vmax=vmax,\n caption=self.caption,\n max_labels=max_labels,\n )", "def colors_for_labels():\n colors = [(i * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) % 255).astype(np.uint8) for i in range(len(CATEGORY))]\n #colors = np.array(range(len(COCO_INSTANCE_CATEGORY_NAMES))) * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n #colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def p_to_color_seq(p):\n cmap = cm.get_cmap('Reds')\n# help(cmap)\n return cmap(p)", "def colorize_label_map(label):\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label. Got {}'.format(label.shape))\n\n colormap = colormap_ade20k\n label_mod = np.mod(label, len(colormap))\n return colormap[label_mod].astype(np.uint8)", "def truncate_colormap(cmap_str, minval=0.0, maxval=1.0, n=100):\n cmap = plt.get_cmap(cmap_str)\n new_cmap = colors.LinearSegmentedColormap.from_list(\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\n cmap(np.linspace(minval, maxval, n)))\n return new_cmap", "def scale(self, vmin=0.0, vmax=1.0, max_labels=10):\n return StepColormap(\n self.colors,\n index=[\n vmin + (vmax - vmin) * (x - self.vmin) * 1.0 / (self.vmax - self.vmin)\n for x in self.index\n ], # noqa\n vmin=vmin,\n vmax=vmax,\n caption=self.caption,\n max_labels=max_labels,\n )", "def create_pascal_label_colormap():\r\n colormap = np.zeros((256, 3), dtype = int)\r\n ind = np.arange(256, dtype=int)\r\n\r\n for shift in reversed(range(8)):\r\n for channel in range(3):\r\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\r\n ind >>= 3\r\n\r\n return colormap", "def discrete_cmap(N, base_cmap=None):\n base = plt.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def test_colormap_discrete():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(colors=['r', 'g', 'b'],\n interpolation='zero'),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_rgb.png\")", "def compute_colors_for_labels(self, labels):\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def compute_colors_for_labels(self, labels):\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def colourscale(plotdata):\n M = np.nanmax(plotdata)\n m = np.nanmin(plotdata)\n if M >= abs(m):\n ctrs1 = np.arange(-M, 0, .1*M)\n ctrs2 = np.arange(0.1*M, 1.09*M, .1*M)\n ctrs = np.concatenate((ctrs1, ctrs2))\n caxismin = -M\n caxismax = M\n else:\n m = -m\n ctrs1 = np.arange(-m, 0, .1*m)\n ctrs2 = np.arange(0.1*m, 1.09*m, .1*m)\n ctrs = np.concatenate((ctrs1, ctrs2))\n caxismin = -m\n caxismax = m\n # function will not work if there exist no positive max or negative min\n return caxismin, caxismax, ctrs", "def cmap(self):\n return self.pixels.get_cmap()", "def get_colors(color_map, count):\n cols = plt.get_cmap(color_map, count + 1) # +1 to prevent wrapping, where col 0 is same as col -1\n cols = cols(range(count + 1)).tolist() # Create a list of colours\n return cols[:-1] # Remove overlapping colour and return", "def apply_cmap(\n self, *args,\n cmap=None, cmap_kw=None, norm=None, norm_kw=None,\n extend='neither', levels=None, N=None, values=None,\n vmin=None, vmax=None, locator=None, locator_kw=None,\n symmetric=False, positive=False, negative=False, nozero=False,\n discrete=None, edgefix=None, labels=False, labels_kw=None, fmt=None, precision=2,\n inbounds=True, colorbar=False, colorbar_kw=None, **kwargs\n):\n method = kwargs.pop('_method')\n name = method.__name__\n contour = name in ('contour', 'tricontour')\n contourf = name in ('contourf', 'tricontourf')\n pcolor = name in ('pcolor', 'pcolormesh', 'pcolorfast')\n hexbin = name in ('hexbin',)\n hist2d = name in ('hist2d',)\n imshow = name in ('imshow', 'matshow', 'spy')\n parametric = name in ('parametric',)\n discrete = _not_none(\n getattr(self, '_image_discrete', None),\n discrete,\n rc['image.discrete'],\n not hexbin and not hist2d and not imshow\n )\n\n # Parse keyword args\n # NOTE: For now when drawing contour or contourf plots with no colormap,\n # cannot use 'values' to specify level centers or level center count.\n # NOTE: For now need to duplicate 'levels' parsing here and in\n # _build_discrete_norm so that it works with contour plots with no cmap.\n cmap_kw = cmap_kw or {}\n norm_kw = norm_kw or {}\n labels_kw = labels_kw or {}\n locator_kw = locator_kw or {}\n colorbar_kw = colorbar_kw or {}\n norm_kw = norm_kw or {}\n edgefix = _not_none(edgefix, rc['image.edgefix'])\n props = _pop_props(kwargs, 'fills')\n linewidths = props.get('linewidths', None)\n linestyles = props.get('linestyles', None)\n colors = props.get('colors', None)\n levels = _not_none(\n N=N,\n levels=levels,\n norm_kw_levels=norm_kw.pop('levels', None),\n default=rc['image.levels'] if discrete else None\n )\n\n # Get colormap, but do not use cmap when 'colors' are passed to contour()\n # or to contourf() -- the latter only when 'linewidths' and 'linestyles'\n # are also *not* passed. This wrapper lets us add \"edges\" to contourf\n # plots by calling contour() after contourf() if 'linewidths' or\n # 'linestyles' are explicitly passed, but do not want to disable the\n # native matplotlib feature for manually coloring filled contours.\n # https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.contourf\n add_contours = contourf and (linewidths is not None or linestyles is not None)\n use_cmap = colors is None or (not contour and (not contourf or add_contours))\n if not use_cmap:\n if cmap is not None:\n warnings._warn_proplot(\n f'Ignoring input colormap cmap={cmap!r}, using input colors '\n f'colors={colors!r} instead.'\n )\n cmap = None\n if contourf:\n kwargs['colors'] = colors # this was not done above\n colors = None\n else:\n if cmap is None:\n if name == 'spy':\n cmap = pcolors.ListedColormap(['w', 'k'], '_binary')\n else:\n cmap = rc['image.cmap']\n cmap = constructor.Colormap(cmap, **cmap_kw)\n if getattr(cmap, '_cyclic', None) and extend != 'neither':\n warnings._warn_proplot(\n 'Cyclic colormap requires extend=\"neither\". '\n f'Overriding user input extend={extend!r}.'\n )\n extend = 'neither'\n\n # Translate standardized keyword arguments back into the keyword args\n # accepted by native matplotlib methods. Also disable edgefix if user want\n # to customize the \"edges\".\n styles = STYLE_ARGS_TRANSLATE.get(name, None)\n for idx, (key, value) in enumerate((\n ('colors', colors), ('linewidths', linewidths), ('linestyles', linestyles)\n )):\n if value is None or add_contours:\n continue\n if not styles: # no known conversion table, e.g. for imshow() plots\n raise TypeError(f'{name}() got an unexpected keyword argument {key!r}')\n edgefix = False # disable edgefix when specifying borders!\n kwargs[styles[idx]] = value\n\n # Build colormap normalizer\n # NOTE: This ensures contour() and tricontour() use the same default levels\n # whether or not colormap is active.\n kw = dict(\n norm=norm, norm_kw=norm_kw,\n extend=extend, vmin=vmin, vmax=vmax, locator=locator, locator_kw=locator_kw,\n symmetric=symmetric, positive=positive, negative=negative, nozero=nozero,\n inbounds=inbounds, centers=pcolor, counts=hexbin or hist2d,\n )\n ticks = None\n if levels is None:\n if norm is not None:\n norm = constructor.Norm(norm, **norm_kw)\n elif not use_cmap:\n levels, _ = _auto_levels_locator(self, *args, N=levels, **kw)\n else:\n kw.update(levels=levels, values=values, cmap=cmap, minlength=2 - int(contour))\n norm, cmap, levels, ticks = _build_discrete_norm(self, *args, **kw)\n\n # Call function with correct keyword args\n if cmap is not None:\n kwargs['cmap'] = cmap\n if norm is not None:\n kwargs['norm'] = norm\n if parametric:\n kwargs['values'] = values\n if levels is None: # i.e. no DiscreteNorm was used\n kwargs['vmin'] = vmin\n kwargs['vmax'] = vmax\n if contour or contourf:\n kwargs['levels'] = levels\n kwargs['extend'] = extend\n with _state_context(self, _image_discrete=False):\n obj = method(self, *args, **kwargs)\n if not isinstance(obj, tuple): # hist2d\n obj._colorbar_extend = extend # used by proplot colorbar\n obj._colorbar_ticks = ticks # used by proplot colorbar\n\n # Possibly add solid contours between filled ones or fix common \"white lines\"\n # issues with vector graphic output\n if add_contours:\n colors = _not_none(colors, 'k')\n self.contour(\n *args, levels=levels, linewidths=linewidths,\n linestyles=linestyles, colors=colors\n )\n if edgefix:\n _fix_white_lines(obj)\n\n # Apply labels\n # TODO: Add quiverkey to this!\n if labels:\n fmt = _not_none(labels_kw.pop('fmt', None), fmt, 'simple')\n fmt = constructor.Formatter(fmt, precision=precision)\n if contour or contourf:\n _labels_contour(self, obj, *args, fmt=fmt, **labels_kw)\n elif pcolor:\n _labels_pcolor(self, obj, fmt=fmt, **labels_kw)\n else:\n raise RuntimeError(f'Not possible to add labels to {name!r} pplt.')\n\n # Optionally add colorbar\n if colorbar:\n m = obj\n if hist2d:\n m = obj[-1]\n if parametric and values is not None:\n colorbar_kw.setdefault('values', values)\n self.colorbar(m, loc=colorbar, **colorbar_kw)\n\n return obj", "def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arrange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>=3\n\n return colormap", "def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap", "def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap", "def to_color(arr, pmin=1, pmax=99.8, gamma=1., colors=((0, 1, 0), (1, 0, 1), (0, 1, 1))):\n if not arr.ndim in (2,3):\n raise ValueError(\"only 2d or 3d arrays supported\")\n\n if arr.ndim ==2:\n arr = arr[np.newaxis]\n\n ind_min = np.argmin(arr.shape)\n arr = np.moveaxis(arr, ind_min, 0).astype(np.float32)\n\n out = np.zeros(arr.shape[1:] + (3,))\n\n eps = 1.e-20\n if pmin>=0:\n mi = np.percentile(arr, pmin, axis=(1, 2), keepdims=True)\n else:\n mi = 0\n\n if pmax>=0:\n ma = np.percentile(arr, pmax, axis=(1, 2), keepdims=True)\n else:\n ma = 1.+eps\n\n arr_norm = (1. * arr - mi) / (ma - mi + eps)\n\n\n for i_stack, col_stack in enumerate(colors):\n if i_stack >= len(arr):\n break\n for j, c in enumerate(col_stack):\n out[..., j] += c * arr_norm[i_stack]\n\n return np.clip(out, 0, 1)", "def discrete_cmap(n_colors: int, base_cmap: str) -> Colormap:\r\n # https://gist.github.com/jakevdp/91077b0cae40f8f8244a\r\n base = plt.cm.get_cmap(base_cmap)\r\n color_list = base(np.linspace(0, 1, n_colors))\r\n cmap_name = base.name + str(n_colors)\r\n\r\n return base.from_list(cmap_name, color_list, n_colors)", "def discrete_cmap(N, base_cmap=None):\n\t# Note that if base_cmap is a string or None, you can simply do\n\t# return plt.cm.get_cmap(base_cmap, N)\n\t# The following works for string, None, or a colormap instance:\n\tbase = plt.cm.get_cmap(base_cmap)\n\tcolor_list = base(np.linspace(0, 1, N))\n\tcmap_name = base.name + str(N)\n\treturn base.from_list(cmap_name, color_list, N)", "def SetColorMap(self, colormap, contrast=None, bias=None):\n extra = ''\n if contrast is not None:\n extra += '%f,' % contrast\n if bias is not None:\n extra += '%f,' % bias\n fmt = dict(wid=self.wid,cmap=colormap, extra=extra, suffix=self.suffix)\n command = \"JS9.SetColormap('{cmap}', {extra} {{display:'{wid}{suffix}'}});\".format(**fmt)\n get_ipython().run_cell_magic('javascript', '', command)", "def discrete_cmap(N, base_cmap=None):\n # see https://gist.github.com/jakevdp/91077b0cae40f8f8244a\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def set_cmap_cb(self, w, index):\n old_cmap_name = self._cmap_name\n name = cmap.get_names()[index]\n self.cmap_name = name\n self.pipeline.push(StageAction(self,\n dict(cmap_name=old_cmap_name),\n dict(cmap_name=self._cmap_name),\n descr=\"rgbmap / change cmap\"))\n\n self.pipeline.run_from(self)", "def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap", "def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap", "def discrete_cmap(N, base_cmap=None):\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "def discrete_cmap(N, base_cmap=None):\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)" ]
[ "0.7223537", "0.70907754", "0.6728139", "0.6724819", "0.66896755", "0.66573834", "0.65715116", "0.64982826", "0.6465033", "0.6465033", "0.6465033", "0.6465033", "0.6443659", "0.6338098", "0.63217264", "0.6303877", "0.6251594", "0.6230061", "0.62153965", "0.6132751", "0.6093069", "0.60640544", "0.5959021", "0.5958537", "0.59533674", "0.5944029", "0.59233356", "0.5919306", "0.5917612", "0.58996475", "0.5886292", "0.58770007", "0.586723", "0.5842137", "0.583624", "0.58313096", "0.5829404", "0.5817696", "0.5770981", "0.5769393", "0.5762901", "0.57548654", "0.5754176", "0.57493824", "0.57398325", "0.5706624", "0.570619", "0.56975", "0.56929034", "0.5689232", "0.567932", "0.56750315", "0.5646683", "0.56253165", "0.56253165", "0.56253165", "0.5623449", "0.561764", "0.5617523", "0.5608052", "0.5605953", "0.5605953", "0.56049776", "0.559097", "0.5587021", "0.55720615", "0.5565629", "0.55646706", "0.55624485", "0.5557444", "0.5554996", "0.55539066", "0.55507535", "0.5549038", "0.55480516", "0.55253494", "0.5525137", "0.55116606", "0.5511525", "0.5510667", "0.5508535", "0.5508535", "0.55043364", "0.55030227", "0.5495327", "0.54868925", "0.5486833", "0.54855", "0.54855", "0.54787076", "0.54775953", "0.54724646", "0.5468418", "0.5465241", "0.54632545", "0.5450189", "0.5450189", "0.54421157", "0.54421157", "0.54421157" ]
0.6442322
13
Plots the cell trajectories in 2D as a line and a point at the last coordinate.
def plot_trajectories_2d(trajectories: pd.DataFrame, ax: Optional[plt.Axes] = None): if ax is None: fig, ax = plt.subplots() for cell in trajectories: ax.plot(cell["position_x"].values, cell["position_y"].values) ax.scatter( cell["position_x"].values[-1], cell["position_y"].values[-1], marker="o" ) return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_lines(self):\n self.plot(3)", "def trajectoire(self):\n trajx = []\n trajy = []\n for i in range(0, len(self.pos)):\n trajx.append(self.pos[i].x)\n trajy.append(self.pos[i].y)\n plt.plot(trajx, trajy) # color=self.color)\n plt.show()", "def plot_scatter_points_lines(self):\n self.plot(2)", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def draw(self,ax):\n # remove cell lines if thery are on the plot\n # (if new axes are created the cell lines will be not there)\n for line in self.cell_lines:\n try:\n ax.lines.remove(line)\n except ValueError:\n pass\n # create new list with cell boundaries\n ylims = ax.get_ylim()\n self.cell_lines = [ ax.plot(xx,ylims,'k:')[0] \n for xx in zip(self.xx_cells,self.xx_cells) ]", "def plt_connecting_lines():\n\n for i in range(0, Molecule.connection_count):\n tmp1 = Molecule.right_endpt[Molecule.left_connection[i] - 1]\n tmp2 = Molecule.left_endpt[Molecule.right_connection[i] - 1]\n tmp3 = Molecule.energy[Molecule.left_connection[i] - 1]\n tmp4 = Molecule.energy[Molecule.right_connection[i] - 1]\n\n plt.plot([tmp1, tmp2], [tmp3, tmp4], color=PlotParameter.connection_line_color,\n lw=PlotParameter.connection_line_width, linestyle='--')\n\n return None", "def visualizeTrajectory(y, g):\n visualizeObs()\n x = np.linspace(-1.5, 1.5, 13)[1:-1]\n plt.plot(np.concatenate(([-1.5],x,[1.5])), np.concatenate(([0],y,[0])), color='black', marker='+')\n if g is not None:\n for i in range(y.size):\n plt.arrow(x[i], y[i], 0, -0.5*g[i], color='blue', head_width=0.05)", "def _plot(self, **kwargs):\n XY = self.positions\n plt.plot(XY[0,:], XY[1,:], 'o')\n plt.gca().set_aspect('equal')\n SS = np.abs(self.S)\n SS /= SS.max()\n\n for i in range(self.N):\n for j in range(self.N):\n if i == j or SS[i,j] < 1e-2:\n continue\n clr = 'r' if self.S[i,j]<0 else 'b'\n x, y = XY[:,i]\n r = XY[:,j] - XY[:,i]\n dx, dy = r\n rhat = r / np.sqrt((r**2).sum())\n ofsx, ofsy = 0.03 * rhat\n perpx, perpy = 0.005 * np.array([-rhat[1], rhat[0]])\n plt.arrow(x + ofsx + perpx, y + ofsy + perpy,\n r[0] - 2*ofsx, r[1] - 2*ofsy, color=clr,\n shape='right', width=0.01*SS[i,j],\n length_includes_head=True, head_width=0.02,\n linewidth=0, **kwargs)", "def line_layer(self):\n screen_origin = self.ids.mapview.get_window_xy_from(lat1, lon1, self.ids.mapview.zoom)\n screen_destination = self.ids.mapview.get_window_xy_from(lat2, lon2, self.ids.mapview.zoom)\n point_list = [screen_origin[0], screen_origin[1], screen_destination[0], screen_destination[1]]\n\n with self.ids.line.canvas:\n self.ids.line.canvas.clear()\n\n Color(0, 0, 0, .6)\n Line(points=point_list, width=3, joint=\"bevel\")", "def plot_single_trajectory(self):\n\n plt.plot(self.trip_centroids['lon'], self.trip_centroids['lat'], '-o')", "def coordPlt(grid, buffer=10, step=5):\n plt.cla()\n\n plt.plot(grid[1][0::step, 0::step],\n grid[0][0::step, 0::step],\n '.-b' )\n\n plt.plot(grid[1][0::step, 0::step].T,\n grid[0][0::step, 0::step].T,\n '.-b' )\n\n plt.axis( [ grid[1].max() + buffer,\n grid[1].min() - buffer,\n grid[0].max() + buffer,\n grid[0].min() - buffer],\n )\n plt.axis('off')\n plt.grid()", "def plot_points(self, tour_tuples):\n data_in_array = np.array(tour_tuples)\n transposed = data_in_array.T\n x, y = transposed\n plt.ion()\n # self.f, self.a = plt.subplots(1, 1)\n self.f = Figure(figsize=(8, 6), dpi=100)\n self.a = self.f.add_subplot(111, navigate=True)\n self.a.plot(x, y, 'ro')\n # self.a.plot(x, y, 'b-')\n self.a.set_title('Current best tour')\n self.a.set_xlabel('X axis coordinates')\n self.a.set_ylabel('Y axis coordinates')\n self.a.grid(True)\n self.canvas = FigureCanvasTkAgg(self.f, master=root)\n self.canvas.mpl_connect('motion_notify_event', on_move)\n self.canvas.get_tk_widget().grid(row=1, column=1, sticky=W)\n self.canvas.draw()", "def show_obstacle(plot, points):\n for p1, p2 in zip(points, [points[-1]] + list(points)):\n plot.plot([p1[0], p2[0]], [p1[1], p2[1]], 'b')", "def plot_tour(self, tour_tuples):\n tour_tuples.append(tour_tuples[0])\n data_in_array = np.array(tour_tuples)\n transposed = data_in_array.T\n x, y = transposed\n plt.ion()\n self.a.cla()\n # self.f, self.a = plt.subplots(1, 1)\n # self.f = Figure(figsize=(8, 6), dpi=100)\n # self.a = self.f.add_subplot(111, navigate=True)\n self.a.plot(x, y, 'ro')\n self.a.plot(x, y, 'b-')\n # self.a.set_title('Current best tour')\n # self.a.set_xlabel('X axis coordinates')\n # self.a.set_ylabel('Y axis coordinates')\n # self.a.grid(True)\n # self.canvas = FigureCanvasTkAgg(self.f, master=root)\n # self.canvas.mpl_connect('motion_notify_event', on_move)\n # self.canvas.get_tk_widget().grid(row=1, column=1, sticky=W)\n self.canvas.draw()\n # self.canvas.show()", "def show_path_2D(start, end, coordinates, polygons, clear = True):\n global L, N, delta_t\n\n # start interactive mode\n plt.ion()\n\n # crete eempty figure on which data will go and first subplot\n fig = plt.figure()\n\n # get into the correct time step\n for time_step in range(start, end):\n # list of colours used for animation\n colours = cm.rainbow(np.linspace(0, 1, N))\n\n # loop over each particle and colour\n for i in range(N):\n # plot x, y poistion of particle in a given colour and set axis to size of box\n plt.scatter(coordinates[time_step][i][0], coordinates[time_step][i][1], s = 1, color = 'r')\n\n # plot the object\n if i < M:\n polygon = np.array(polygons[time_step][i])\n # get the points of the polygon to plot it\n x, y = polygon.T\n\n # print(x, y)\n\n x = np.append(x, x[0])\n y = np.append(y, y[0])\n\n # print(x, y)\n\n # plot the polygon\n plt.plot(x , y)\n # plt.scatter(polygons_com[time_step][i][0], polygons_com[time_step][i][1], s = 5, color = 'g')\n\n if bound_cond == True:\n plt.axis([0, L, 0, L])\n plt.axis([0, L, 0, L])\n # plt.axis([-L*2, L*2, -L*2, L*2])\n\n # show graph\n plt.show()\n plt.pause(time_pause)\n\n # decide if you want to clear\n if clear == True:\n plt.clf()\n\n return None", "def draw_grid(plt):\n x0, x1, x2, x3 = 0, 3057, 6508, 9860\n y0, y1, y2, y3, y4, y5, y6, y7, y8 = 0, 1535, 2041, 2547, 3053, 3559, 4257, 5303, 6978\n alpha, linewidth = 0.3, 0.5\n\n # Vertical Lines\n plt.plot((x0, x0), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x1, x1), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x2, x2), (y0, y5), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x3, x3), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n\n # Horizontal Lines\n plt.plot((x0, x3), (y0, y0), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y1, y1), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y2, y2), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y3, y3), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y4, y4), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y5, y5), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x1), (y6, y6), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x1, x3), (y7, y7), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y8, y8), 'black', alpha=alpha, linewidth=linewidth)", "def plot(self):\n pass", "def drawLines(self):\n\t\tintersections = [[], []]\n\t\tfor l in self.lines:\n\t\t\tif l.direction == 'v':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + int((self.width - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.width / 100) if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[0].append(position)\n\t\t\t\tfor yPos in range(1, self.height - 2):\n\t\t\t\t\tself.wts(yPos, position, '│', self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(0, position, '┬',self._borderColor)\n\t\t\t\tself.wts(self.height - 2, position, '┴', self._borderColor)\n\t\t\telif l.direction == 'h':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + ((self.height - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.height / 100) - 1 if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[1].append(position)\n\t\t\t\tself.wts(position, 1, '─' * (self.width - 2), self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(position, 0, '├', self._borderColor)\n\t\t\t\tself.wts(position, self.width - 1, '┤', self._borderColor)\n\t\t# draw intersections\n\t\tfor x in intersections[1]:\n\t\t\tfor y in intersections[0]:\n\t\t\t\tself.wts(x, y, '┼', self._borderColor)\n\t\tself.verticalBoundaries = intersections[0]\n\t\tif self.screenBorder:\n\t\t\tself.verticalBoundaries.append(self.width)", "def test_line_plot(self):\n clf()\n filename = 'lines_plot.png'\n N = 10\n lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(N)])\n ax = lines.plot()\n self._compare_images(ax=ax, filename=filename)", "def plot_current_map(inputs):\n # plot it each epoch\n mp = construct_map_with_sliders(inputs, extvar=extvar)\n # to make it clearer, add the start pos\n npa = np.concatenate([[np.concatenate(\n [extvar[\"start_pos\"] / np.array([512, 384]), [0, 0]])], tf.stack(mp).numpy().squeeze()])\n fig, ax = plt.subplots()\n x, y = np.transpose(npa)[0:2]\n #x, y = np.random.rand(2, 20)\n line = MyLine(x, y, mfc='red', ms=12)\n line.text.set_color('red')\n line.text.set_fontsize(16)\n ax.add_line(line)\n plt.show()", "def plot(self, x, y, color=\"black\"):\n self.__checkOpen()\n xs,ys = self.toScreen(x,y)\n #self.create_line(xs,ys,xs+1,ys, fill=color)\n _tkExec(self.create_line,xs,ys,xs+1,ys,fill=color,tag=\"line\")\n self.__autoflush()", "def replot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n line.set_xdata(self.data[i].x)\n for line in self.lines: \n ax.draw_artist(line)", "def plot_traj2D_NEA(r_NEA,t_pos,n_arrows,arrow_size,dest_folder=None):\n \n import plotting_utilities as plut\n\n fig = plt.figure(figsize=(12.3,10))\n\n # View from above\n ax1 = fig.add_subplot(2,1,1)\n traj2D = ax1.plot(r_NEA[1,:], r_NEA[0,:], ls='solid', color='#006633', label='') # trajectory\n ax1.plot(r_NEA[1,0], r_NEA[0,0], ls='solid', color='#006633', # start marker\n marker='o', markersize=4*arrow_size, markerfacecolor='#006633', markeredgecolor='#006633') \n ax1.plot(r_NEA[1,-1], r_NEA[0,-1], ls='solid', color='#006633', # end marker\n marker='o', markersize=4*arrow_size, markerfacecolor='w', markeredgecolor='#006633') \n plut.add_arrow_to_line2D(ax1,traj2D, # arrows\n arrow_locs=np.linspace(1/(n_arrows+1),n_arrows/(n_arrows+1),n_arrows),\n arrowsize=arrow_size) \n\n ax1.set_title(\"Ground Track, to scale\") \n ax1.set_xlabel('East (m)')\n ax1.set_ylabel('North (m)')\n ax1.minorticks_on()\n\n plt.axis('equal')\n plt.axis([1.02*min(r_NEA[1,:]), 1.02*max(r_NEA[1,:]), 1.02*min(r_NEA[0,:]), 1.02*max(r_NEA[0,:])])\n plt.tight_layout()\n\n # Altitude\n ax2 = fig.add_subplot(2,1,2)\n hhist = ax2.plot(t_pos, r_NEA[2,:], ls='solid', color='#006633', label='') # altitude history\n ax2.plot(t_pos[0], r_NEA[2,0], ls='solid', color='#006633', # start marker\n marker='o', markersize=4*arrow_size, markerfacecolor='#006633', markeredgecolor='#006633') \n ax2.plot(t_pos[-1], r_NEA[2,-1], ls='solid', color='#006633', # end marker\n marker='o', markersize=4*arrow_size, markerfacecolor='w', markeredgecolor='#006633') \n plut.add_arrow_to_line2D(ax2,hhist, # arrows\n arrow_locs=np.linspace(1/(n_arrows+1),n_arrows/(n_arrows+1),n_arrows),\n arrowsize=arrow_size) \n\n ax2.set_title(\"Altitude history\") \n ax2.set_xlabel(r'$t$ (s)')\n ax2.set_ylabel(r'$h_\\mathrm{SL}$ (m)')\n ax2.minorticks_on()\n\n ax2.set_xlim([1.02*min(t_pos), 1.02*max(t_pos)])\n plt.tight_layout()\n \n # Export\n if dest_folder != None:\n plt.savefig(dest_folder+'plot_Traj2D_NEA.pdf')", "def plot(self):\n\t\tself.plotOfXray().plot()", "def plot_trajectories_XYZ(t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n ankle_l_trajectory = ankle_l_trajectory[index_start:index_end+1,:]\n ankle_r_trajectory = ankle_r_trajectory[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n \n plt.figure('Trajectories')\n plt.subplot(311)\n plt.plot(time,ankle_l_trajectory[:,0])\n plt.plot(time,ankle_r_trajectory[:,0])\n #plt.title('Trajectory of the X component')\n plt.xlabel('Time [s]')\n plt.ylabel('X Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(312)\n plt.plot(time,ankle_l_trajectory[:,1])\n plt.plot(time,ankle_r_trajectory[:,1])\n #plt.title('Trajectory of the Y component')\n plt.xlabel('Time [s]')\n plt.ylabel('Y Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(313)\n plt.plot(time,ankle_l_trajectory[:,2])\n plt.plot(time,ankle_r_trajectory[:,2])\n #plt.title('Trajectory of the Z component')\n plt.xlabel('Time [s]')\n plt.ylabel('Z Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n return", "def plot_xy(self, xpts, ypts):\n self.plot(np.asarray((xpts, ypts)).T)", "def plotpy (tvec=tvec, ind=ind, synclines=True):\n plt.scatter(tvec,ind) \n if synclines:\n for spkt in np.array(tvec): plt.plot((spkt, spkt), (0, ncells), 'r-', linewidth=0.1)", "def to_cell_coordinates(self):\n self.plotter.to_cell_coordinates(self.ax)\n self.plotter.replot(self.ax)\n self.plotter.cells.draw(self.ax)\n self.x_label.set_text(self.plotter.plot_xlabel)\n self.fig.canvas.draw()", "def plotTrace(trace):\n for t in trace:\n plt.plot(range(len(t)),t,alpha=0.5)\n plt.ylabel(\"Trace\")\n plt.xlabel(\"Step\")\n\n return", "def plot_chain(chain):\n\n\tlabels = ['a', 'b']\n\tplt.figure(figsize=(20,6))\n\tfor i_dim in range(2):\n\t\tplt.subplot(2,1,i_dim+1)\n\t\tplt.ylabel(labels[i_dim])\n\n\t\tfor i in range(100):\n\t\t\tplt.plot(chain[i,:,i_dim],color='black', alpha=0.5)\n \n\tplt.show()", "def plot(self) :\r\n pos = np.nonzero(self.y > 0) # matlab: find(y > 0)\r\n neg = np.nonzero(self.y < 0) # matlab: find(y < 0)\r\n plt.plot(self.X[pos,0], self.X[pos,1], 'b+', markersize=5)\r\n plt.plot(self.X[neg,0], self.X[neg,1], 'ro', markersize=5)\r\n plt.show()", "def __plot_rank_line(self):\n numbers = [i for i in range(self.__rank_length)]\n\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.plot(numbers, self.__motion, label=\"Motion\", color='r')\n ax.plot(numbers, self.__blur, label=\"Blur\", color='g')\n ax.plot(numbers, self.__audio, label=\"Audio\", color='c')\n ax.plot(numbers, self.__text, label=\"Text\", color='m')\n ax.set_title(\"rankings for all features\")\n ax.set_ylim(-1)\n plt.legend(loc=2).set_draggable(True)\n\n ax = fig.add_subplot(212)\n for start, end, in self.__timestamps:\n ax.plot([start, start], [0, 10], color='red', linestyle='dashed', linewidth=1.5)\n ax.plot([end, end], [0, 10], color='green', linestyle='dashed', linewidth=1.5)\n\n custom_lines = [Line2D([0], [0], color='red', linestyle='dashed', linewidth=1.5),\n Line2D([0], [0], color='green', linestyle='dashed', linewidth=1.5)]\n\n ax.plot([i for i in range(self.__rank_length)], self.__ranks)\n ax.set_ylim(0)\n ax.set_title(\"sum of all rankings\")\n ax.legend(custom_lines, ['start time', 'end time'], loc=0).set_draggable(True)\n\n plt.tight_layout()\n plt.show()", "def make_plot(x,y):", "def _display_from_tsne(self, x, y):\n\n # Find the closest 9\n inds = np.argsort(np.sum( (self._Y_tsne-np.array([x, y]))**2, axis=1))\n print(inds[:10])\n\n # Plot the green circles on the tsne plot\n self._display_tsne()\n self._tsne_window.plot(self._Y_tsne[inds[:9],0], self._Y_tsne[inds[:9],1], 'yo')\n\n # Now run through the 9 sub axes and display the image data and cutout location.\n self._sub_window_filenames = []\n for ii, axis in enumerate(self._sub_windows):\n axis.clear()\n\n fits_filename, filename, sliceno, middle = self._process_result_filename_cutout_number[inds[ii]]\n print('display from tsne fits: {} filename: {}'.format(fits_filename, filename))\n\n # So, the filename actually contains the wrong path on it so we\n # need to take it off and use the proper path.\n pf = pickle.load(open(os.path.join(self._cutouts_directory, filename), 'rb'))\n ff = list(glob.iglob('{}/**/{}'.format(self._data_directory, pf['filename'].split('/')[-1])))[0]\n\n print(ff)\n self._display_window(axis, ff)\n self._sub_window_filenames.append(fits_filename)\n\n # Draw the line\n axis.plot([middle[0]-112, middle[0]-112], [middle[1]-112, middle[1]+112], 'y')\n axis.plot([middle[0]+112, middle[0]+112], [middle[1]-112, middle[1]+112], 'y')\n axis.plot([middle[0]-112, middle[0]+112], [middle[1]-112, middle[1]-112], 'y')\n axis.plot([middle[0]-112, middle[0]+112], [middle[1]+112, middle[1]+112], 'y')\n\n plt.figure(1).show()\n plt.figure(1).canvas.draw()", "def simple_line():\n\n # Make two datasets\n dataset_a = DataSet(sine)\n dataset_b = DataSet(cosine)\n\n # Make plot and add data\n plot = Plot()\n plot.set_text()\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_simple_line',fmt='png')\n plot.display()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plotTrajectory(arg, color = sf.cyan, xyRate=True, radiusRate = 80.0\n , blAxes = True):\n if not(hasattr(arg, '__getitem__')) and hasattr(arg, '__iter__'):\n arg = list(arg)\n\n vs = sf.vs_()\n\n color = tuple(color) # color argment may be list/vector\n if isinstance(arg,list) or isinstance(arg,tuple) or isinstance(\n arg,type(sf.sc.array([0,]))):\n from octnOp import ClOctonion\n if not(hasattr(arg[0],'__len__')) and isinstance(arg[0], complex):\n arg = [ (x.real, x.imag) for x in arg]\n elif not(hasattr(arg[0],'__len__')) and isinstance(arg[0], ClOctonion):\n arg = [ x[1:4] for x in arg]\n\n if len(arg[0])==2:\n import visual.graph as vg\n global __obj2dDisplayGeneratedStt\n\n maxX = max([abs(elm[0]) for elm in arg])\n maxY = max([abs(elm[1]) for elm in arg])\n\n print \"maxX:\",maxX, \" maxY:\",maxY\n\n if (__obj2dDisplayGeneratedStt == None):\n if xyRate == True: # 11.01.16 to \n maxAt = max(maxX, maxY)\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600*maxX/maxAt,height=600*maxY/maxAt)\n else:\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600,height=600)\n #__bl2dDisplayGeneratedStt = True\n grphAt = vg.gcurve(color = color)\n for i in range(len(arg)):\n assert len(arg[i])==2, \"unexpeted length data:\"+str(arg[i])\n grphAt.plot(pos = arg[i])\n\n #return __obj2dDisplayGeneratedStt\n #import pdb; pdb.set_trace()\n #print \"debug:\",grphAt.gcurve.pos\n\n # plot start mark\n grphSqAt = vg.gcurve(color = color)\n pos0At = grphAt.gcurve.pos[0,:][:2]\n rateAt = 50\n for x,y in sf.mitr([-maxX/rateAt, maxX/rateAt]\n , [-maxY/rateAt, maxY/rateAt]):\n grphSqAt.plot(pos = pos0At+[x,y])\n \n grphSqAt.plot(pos = pos0At+[-maxX/rateAt,-maxY/rateAt])\n\n return grphAt # 09.02.04 to animate graph\n elif len(arg[0])==3:\n vs.scene.forward=(-1,+1,-1)\n vs.scene.up=(0,0,1)\n\n c = vs.curve( color = color )\n\n maxX, maxY, maxZ = 0,0,0\n for i in range(len(arg)):\n if maxX < abs(arg[i][0]):\n maxX = abs(arg[i][0])\n if maxY < abs(arg[i][1]):\n maxY = abs(arg[i][1])\n if maxZ < abs(arg[i][2]):\n maxZ = abs(arg[i][2])\n c.append( arg[i] )\n #print c.pos\n print \"maxX:\",maxX, \" maxY:\",maxY, \" maxZ:\",maxZ\n maxAt = max(maxX,maxY,maxZ)\n c.radius = maxAt/radiusRate\n\n vs.sphere(pos = arg[0], radius = 3*c.radius, color = color)\n\n if blAxes == True:\n # draw axise\n vs.curve( pos=[(0,0,0), (maxAt,0,0)]\n , color=(1,0,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,maxAt,0)]\n , color=(0,1,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,0,maxAt)]\n , color=(0,1,1)\n , radius = maxAt/100 )\n #return vs.scene\n return c # 09.02.04 to animate graph\n else:\n assert False,\"unexpeted data:\"+str(arg)", "def plot(self):\n plt.cla()\n h = 0.02\n # Plot the training points\n self.ax.scatter(self.X[:, 1], self.X[:, 2], c = self.y, cmap = plt.cm.Paired)\n # Plot the separation line\n x0 = 0\n y0 = -self.w[0] / self.w[2]\n x1 = 1\n y1 = -(self.w[1] + self.w[0]) / self.w[2]\n p1 = (x0, y0)\n p2 = (x1, y1)\n newline(p1, p2)\n plt.draw()\n plt.pause(0.5)", "def _plot(self):\n\n #self.best_canvas.Clear()\n self.current_canvas.Clear()\n\n if len(self.results) > 0:\n x_max = self.results[-1][2]\n #self.best_canvas.xSpec = (0, x_max)\n self.current_canvas.xSpec = (0, x_max)\n\n # best_points = [(r.time, r.best.distance) for r in self.results\n # if r.best is not None and\n # isinstance(r.best.distance, int)]\n # best_line = PolyLine(best_points)\n # best_plot = PlotGraphics([best_line],\n # title='Best path distance over time',\n # xLabel='Time [ns]', yLabel='Distance')\n\n current_points = [self.TopLevelParent.solver_view.tsp_view._points[x] for x in self.results[-1][0]] if len(self.results) > 0 else []\n # current_points = [(r[2], r[0]) for r in self.results]\n if len(current_points) > 0:\n current_line = PolyLine(current_points)\n current_plot = PlotGraphics([current_line],\n title='Current path distance over time',\n xLabel='Iter', yLabel='Score')\n\n #self.best_canvas.Draw(best_plot)\n self.current_canvas.Draw(current_plot)", "def plot_grid(ax, flow, factor=10):\n grid = factor * flow[:, ::8, ::8]\n lin_range = np.linspace(0, 512, 64)\n x, y = np.meshgrid(lin_range, lin_range)\n x = x + grid[0, ...]\n y = y + grid[1, ...]\n y = y\n\n segs1 = np.stack((x, y), axis=2)\n segs2 = segs1.transpose(1, 0, 2)\n ax.add_collection(LineCollection(segs1, color='black', linewidths=0.8))\n ax.add_collection(LineCollection(segs2, color='black', linewidths=0.8))\n ax.autoscale()", "def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])", "def plot_outputgrid(self, scalefactor=1, **kwargs):\n\n if not (type(scalefactor) == 'int'):\n scalefactor = round(scalefactor)\n\n xx = np.arange(self.xori, self.xend, scalefactor * self.dx)\n yy = np.arange(self.yori, self.yend, scalefactor * self.dy)\n plt.hlines(yy, self.xori, self.xend, linewidth=0.2, **kwargs)\n plt.vlines(xx, self.yori, self.yend, linewidth=0.2, **kwargs)\n\n logger.debug('Adding output grid to plot')", "def plot_graph(self) -> None:", "def print(self):\n # it would be nice just to add one point instead of printing all again from scratch\n stones_player_0 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == -1]\n stones_player_1 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == 1]\n plt.plot([0, self.size-1, 0, self.size-1], [0, 0, self.size-1, self.size-1], marker='x', ls='')\n plt.plot(*zip(*stones_player_0), marker='o', color='r', ls='')\n plt.plot(*zip(*stones_player_1), marker='o', color='b', ls='')\n\n plt.draw()\n plt.show(block=False)", "def line_plot(array_index_start,array_index_finish):\n cv2.line(image, (int(pointstore[array_index_start, 0]), int(pointstore[array_index_start, 1])), (int(pointstore[array_index_finish, 0]), int(pointstore[array_index_finish, 1])),\n (0, 0, 0), 1)", "def plot(self):\n\t\tself.plotOfTF().plot()", "def line_graph():\n fig = plt.figure()\n ax = plt.axes()\n x = [1, 2, 3]\n y = [5, 6, 7]\n plt.plot(x, y)\n plt.show()", "def simple_plot(self):\n for i in np.arange(len(self.e2)):\n self.ax.plot(self.e2[i], 'o', label=self.labels[i])", "def show_grid(self):\n\n if not os.path.exists(self.path_to_results):\n os.mkdir(self.path_to_results)\n\n fig = plt.figure()\n\n if self.show_points == 1:\n plt.scatter(self.x_list_grid, self.y_list_grid, c='blue')\n\n plt.plot(self.x_list_main, self.y_list_main,\n 'green', label='straight path')\n plt.plot(self.x_list, self.y_list, 'red', label='first path')\n plt.plot(self.x_list_filtered, self.y_list_filtered,\n 'blue', label='filtered path')\n plt.title('Paths')\n plt.ylabel('Latitude')\n plt.xlabel('Longitude')\n # plt.legend()\n\n fig.savefig(os.path.join(self.path_to_results, 'Paths.png'))", "def show_trace_2d(f, results): #@save\n set_figsize()\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = torch.meshgrid(torch.arange(-5.5, 1.0, 0.1),torch.arange(-3.0, 1.0, 0.1))\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')", "def plot_traces(self, cellname, targettime, historytime, srctype, syntype):\n self.tstart = targettime - historytime\n self.istart = int(self.tstart / self.plotdt + 0.5)\n self.tend = targettime + historytime\n self.iend = int(self.tend / self.plotdt + 0.5)\n self.tseries = np.linspace(self.tstart, self.tend, \n self.iend - self.istart)\n if cellname not in self.datafile['/Vm']:\n return []\n vm = self.datafile['/Vm/' + cellname] \n plt.plot(self.tseries, \n normalize(vm[self.istart:self.iend]),\n label=cellname)\n stimdata = np.asarray(self.datafile['/stimulus/stim_bg'])\n stim_start = int(self.tstart/self.simdt+0.5)\n stim_end = int(self.tend/self.simdt+0.5)\n stimdata = stimdata[stim_start: stim_end]\n plt.plot(np.linspace(self.tstart, self.tend, len(stimdata)),\n normalize(stimdata),\n 'r--', \n label='STIMULUS')\n precells = self.plot_presynaptic(cellname, srctype, syntype)\n return precells", "def plot_endpoints( polylines, mymap ):\n map( \\\n lambda start : mymap.addpoint( start[-1][0], start[-1][1], \"#0000FF\") if start != [] else [],\n polylines)", "def plotBox(box):\n plt.plot([box.xll, box.xur, box.xur, box.xll, box.xll]\n ,[box.yll, box.yll, box.yur, box.yur, box.yll]\n , '-'\n )", "def plot_epochs(epochs, y, line):\n ep = np.arange(0, epochs)\n if hasattr(y[0], '__len__'):\n for i in range(len(y[0])):\n plt.plot(ep, [val[i] for val in y], line[i])\n else:\n plt.plot(ep, y, line)\n plt.show()", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def plot(self, routePoints=None):\n return plot(routePoints, self.profiles)", "def plot_sequence_2d(sequence: Sequence, **kwargs: Any) -> mpl.figure.Figure:\n return plot_paths_2d(sequence.prediction.get_path(), sequence.ground_truth.get_path(), **kwargs)", "def draw_line(self, coords, smooth=False, **options):\n # NOTE: Outline does not work because uses paths instead of normal line method.\n # TODO: Add volume param, containing a list of linewidths same length as line\n # or as a function that calculates the width at each node\n # Result is a flow line with varying thickness at each node\n # Have to calculate left/right xy at each node, and use symbol curveto()\n # Easy and really cool...DO IT!\n options = self._check_options(options)\n \n if not hasattr(coords[0], \"__iter__\"):\n coords = _grouper(coords, 2)\n else: coords = (point for point in coords)\n \n # get drawing tools from options\n args = []\n if options[\"fillcolor\"]:\n pen = aggdraw.Pen(options[\"fillcolor\"], options[\"fillsize\"])\n args.append(pen)\n\n if smooth:\n\n # Note: Creation of the aggdraw.Symbol object here can be\n # very slow for long lines; Path is much faster but due\n # to a bug it does not correctly render curves, hence the use\n # of Symbol\n \n pathstring = \"\"\n \n # begin\n coords = _pairwise(coords)\n (startx,starty),(endx,endy) = next(coords)\n pathstring += \" M%s,%s\" %(startx, starty)\n \n # draw straight line to first line midpoint\n midx,midy = (endx + startx) / 2.0, (endy + starty) / 2.0\n pathstring += \" L%s,%s\" %(midx, midy)\n oldmidx,oldmidy = midx,midy\n \n # for each line\n for line in coords:\n # curve from midpoint of first to midpoint of second\n (startx,starty),(endx,endy) = line\n midx,midy = (endx + startx) / 2.0, (endy + starty) / 2.0\n pathstring += \" Q%s,%s,%s,%s\" %(startx, starty, midx, midy)\n oldmidx,oldmidy = midx,midy\n \n # draw straight line to endpoint of last line\n pathstring += \" L%s,%s\" %(endx, endy)\n\n # make into symbol object\n symbol = aggdraw.Symbol(pathstring)\n\n # draw the constructed symbol\n self.drawer.symbol((0,0), symbol, *args)\n\n else:\n\n path = aggdraw.Path()\n \n # begin\n startx,starty = next(coords)\n path.moveto(startx, starty)\n \n # connect to each successive point\n for nextx,nexty in coords:\n path.lineto(nextx, nexty)\n\n # draw the constructed path\n self.drawer.path((0,0), path, *args)", "def draw_step(ax, line, sizes):\n x1, y1, x2, y2 = line\n\n # Clear & Resize\n ax.cla()\n size = np.sum(sizes) + 1\n g.axis([-size, size, -size, size])\n g.autoscale(False)\n\n # Plot step\n ax.plot([0, x1, x2], [0, y1, y2], lw=2, c='k')\n ax.add_patch(Circle((0, 0), 0.05, fc='k', zorder=10))\n ax.add_patch(Circle((x1, y1), 0.08, fc='b', ec='b', zorder=10))\n ax.add_patch(Circle((x2, y2), 0.08, fc='r', ec='r', zorder=10))", "def _timeseries_scatter_plot_lines(axes):\n axes.axvline(\n x=0,\n ymin=-1000,\n ymax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )\n axes.axhline(\n y=0,\n xmin=-1000,\n xmax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )", "def display(points: List[Point], color, width=1) -> None:\n x = []\n y = []\n for point in points:\n x.append(point[0])\n y.append(point[1])\n plt.plot(x, y, c=color, linewidth=width)", "def draw_tour(list_coord, canvas, color):\n if len(list_coord) >= 1:\n for i in range(0, len(list_coord) - 1):\n canvas.create_line(list_coord[i].get_x(),\n list_coord[i].get_y(),\n list_coord[i + 1].get_x(),\n list_coord[i + 1].get_y(),\n fill=color,\n width=3)\n #dash=(4, 4))\n\n canvas.create_line(list_coord[-1].get_x(),\n list_coord[-1].get_y(),\n list_coord[0].get_x(),\n list_coord[0].get_y(),\n fill=color,\n width=3)\n #dash=(4, 4))", "def tiledplot(self, stims):\r\n nstim = stims.shape[0]\r\n plotrows = int(np.sqrt(nstim))\r\n plotcols = int(np.ceil(nstim/plotrows))\r\n f, axes = plt.subplots(plotrows, plotcols, sharex=True, sharey=True)\r\n for ii in range(nstim):\r\n axes.flatten()[ii].plot(stims[ii])\r\n f.subplots_adjust(hspace=0, wspace=0)\r\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\r\n plt.setp([a.get_yticklabels() for a in f.axes[:-1]], visible=False)", "def plotsegs(x1,y1,x2,y2):\n x = np.repeat(np.nan,x1.shape[0]*3)\n y = np.repeat(np.nan,x1.shape[0]*3)\n x[0::3] = x1\n x[1::3] = x2\n y[0::3] = y1\n y[1::3] = y2\n plt.plot(x,y)", "def plotLine(self):\n minc = 0\n maxc = 500\n num = 500\n levels = np.linspace(minc,maxc,num+1)\n title = textwrap.dedent(\"\"\"\\\n Orography difference between LGM and Modern ICE-5G data\n using {0} meter contour interval\"\"\").format((maxc-minc)/num)\n plt.figure()\n plt.contour(self.difference_in_ice_5g_orography,levels=levels)\n plt.title(title)\n pts.set_ticks_to_zero()\n #if self.save:\n #plt.savefig('something')\n print(\"Line contour plot created\")", "def tiled_plot(self, stims):\n nstim = stims.shape[0]\n plotrows = int(np.sqrt(nstim))\n plotcols = int(np.ceil(nstim/plotrows))\n f, axes = plt.subplots(plotrows, plotcols, sharex=True, sharey=True)\n for ii in range(nstim):\n axes.flatten()[ii].plot(stims[ii])\n f.subplots_adjust(hspace=0, wspace=0)\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\n plt.setp([a.get_yticklabels() for a in f.axes[:-1]], visible=False)", "def add_square_plot(x_start, x_stop, y_start, y_stop, ax, colour = 'k'):\n \n ax.plot((x_start, x_start), (y_start, y_stop), c= colour) # left hand side\n ax.plot((x_start, x_stop), (y_stop, y_stop), c= colour) # bottom\n ax.plot((x_stop, x_stop), (y_stop, y_start), c= colour) # righ hand side\n ax.plot((x_stop, x_start), (y_start, y_start), c= colour) # top", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def plot_2018_board():\n top_left_corner_border = plt.Polygon([[0,823], [91,823], [0,747]], fill='k', edgecolor='k')\n bottom_left_corner_border = plt.Polygon([[0,0], [0,76], [91,0]], fill='k', edgecolor='k')\n plt.gca().add_line(top_left_corner_border)\n plt.gca().add_line(bottom_left_corner_border)\n\n # Auto Line\n auto_line = plt.Line2D((305, 305), (0, 823), lw=2.5)\n plt.gca().add_line(auto_line)\n\n # Exchange Zone\n exchange_zone = plt.Rectangle((0, 442), 91, 122, fc='r')\n plt.gca().add_patch(exchange_zone)\n\n # Power Cube Zone\n power_cube_zone = plt.Rectangle((249, 354), 107, 114, fc='r')\n plt.gca().add_patch(power_cube_zone)\n\n # Switch Zone\n switch_zone = plt.Rectangle((356, 216), 142, 390, fc='grey')\n plt.gca().add_patch(switch_zone)\n\n # Power Cubes next to Switch Zone\n for i in range(0,6,1):\n cube = plt.Rectangle((498, 216+i*(33+38.4)), 33, 33, fc='yellow')\n plt.gca().add_patch(cube)\n\n # Null territory\n null_territory_top = plt.Polygon([[731.5, 581], [731.5, 823], [823, 823], [823, 581]], fill=None, edgecolor='k')\n null_territory_bottom = plt.Polygon([[731.5, 0], [731.5, 242], [823, 242], [823, 0]], fill=None, edgecolor='k')\n plt.gca().add_line(null_territory_top)\n plt.gca().add_line(null_territory_bottom)\n\n # Scale\n scale = plt.Rectangle((653.5, 242), 823-653.5, 581-242, fc='black')\n plt.gca().add_patch(scale)", "def plot(self):\n done = []\n patches = []\n\n def chain(p):\n done.append(p)\n yield p\n last = p\n finished = False\n while not finished:\n seg0 = next(filter(lambda x: (x[0] not in done and\n x[1] == last),\n self.segments),\n None)\n seg1 = next(filter(lambda x: (x[1] not in done and\n x[0] == last),\n self.segments),\n None)\n if seg0 is not None:\n done.append(seg0[0])\n yield seg0[0]\n last = seg0[0]\n elif seg1 is not None:\n done.append(seg1[1])\n yield seg1[1]\n last = seg1[1]\n else:\n finished = True\n p0 = next(filter(lambda x: x not in done,\n (s[0] for s in self.segments)),\n next(filter(lambda x: x not in done,\n (s[1] for s in self.segments)),\n None))\n assert type(p0) is tuple\n while p0 is not None:\n gen = chain(p0)\n a = np.array([[p[0], p[1]] for p in gen])\n patches.append(mplPoly(a, closed=True))\n p0 = next(filter(lambda x: x not in done,\n (s[0] for s in self.segments)),\n next(filter(lambda x: x not in done,\n (s[1] for s in self.segments)),\n None))\n fig, ax = plt.subplots()\n ax.set_xlim([min(min(p.get_xy()[:, 0]) for p in patches),\n max(max(p.get_xy()[:, 0]) for p in patches)])\n ax.set_ylim([min(min(p.get_xy()[:, 1]) for p in patches),\n max(max(p.get_xy()[:, 1]) for p in patches)])\n pCol = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)\n colors = 100 * np.random.rand(len(patches))\n pCol.set_array(np.array(colors))\n ax.add_collection(pCol)\n plt.show()", "def plot_coupled_chains(data: CoupledData, *, max_chains=8): # pylint: disable=too-many-locals\n chains = min(max_chains, data.chains)\n dim = data.dim\n ncols = 2 if dim == 1 else 4\n _, axes = plt.subplots(\n nrows=chains // ncols,\n ncols=ncols,\n figsize=(20, chains),\n sharex=True,\n sharey=True,\n constrained_layout=True,\n )\n\n for chain_idx, axis in enumerate(axes.ravel()):\n x_chain, y_chain = data.x[:, chain_idx, :], data.y[:, chain_idx, :]\n met = data.meeting_time[chain_idx] > 0\n iters = data.iters\n\n if met:\n meeting_time = data.meeting_time[chain_idx]\n else:\n meeting_time = iters\n\n after_meet = np.array([[], []])\n if dim == 1:\n y_line = np.vstack(\n (\n np.arange(data.lag, min(meeting_time + data.lag, iters)),\n y_chain[:meeting_time].flatten(),\n )\n ).T\n x_line = np.vstack(\n (np.arange(min(meeting_time, iters)), x_chain[:meeting_time].flatten())\n ).T\n if met:\n after_meet = np.vstack(\n (np.arange(meeting_time, iters), x_chain[meeting_time:].flatten())\n ).T\n axis.plot(meeting_time - 1, x_chain[meeting_time - 1, -1], \"rx\")\n else:\n x_line = x_chain[: meeting_time + data.lag, :2]\n y_line = y_chain[:meeting_time, :2]\n\n for pts in (x_line, y_line, after_meet):\n axis.plot(*pts.T, \"-\", lw=2)\n return axes", "def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return", "def plotTrajectory(xydata, colordata = 'time', cmap = cm.jet, size = 20, ax = None, line = True):\n \n if isinstance(colordata, basestring) and colordata in ['time']:\n c = np.linspace(0, len(xydata[:,0]), len(xydata[:,0]));\n else:\n c = colordata;\n \n if ax is None:\n ax = plt.gca();\n s = ax.scatter(xydata[:,0], xydata[:,1], c = c, cmap = cmap, s = size, marker = 'o', lw = 0);\n if isinstance(line, basestring):\n ax.plot(xydata[:,0], xydata[:,1], color = line); \n \n ax.get_figure().colorbar(s, ax = ax);\n return ax", "def plotOfSlice(self,index=0):\n\t\tj=index;\n\t\t[n,m]=_np.shape(self._data)\n\t\ty=_np.zeros(n);\n\t\tfor i in range(0,n):\n\t\t\t\ty[i]=self._data[i][j]*1e4\n\t\tp1=_plot.plot(shotno=[self.shotno],\n\t\t\t\t\t title=self.title+', t='+str(self.time[j]*1000)+'ms.')\n\t\tphi=_np.linspace(0,_np.pi*2,100)\n\t\tn1Fit=self._x[0,j]+self._x[1,j]*_np.sin(phi)+self._x[2,j]*_np.cos(phi)\n\t\tn2Fit=self._x[0,j]+self._x[3,j]*_np.sin(2*phi)+self._x[4,j]*_np.cos(2*phi)\n\t\tfitTotal=self._x[0,j]+self._x[1,j]*_np.sin(phi)+self._x[2,j]*_np.cos(phi)+self._x[3,j]*_np.sin(2*phi)+self._x[4,j]*_np.cos(2*phi)\n\n\t\t# plot\n\t\tp1.addTrace(yData=y,xData=self._phi,\n\t\t\t\t\tmarker='x',linestyle='',yLegendLabel='raw') \n\t\tp1.addTrace(yData=n1Fit,xData=phi,\n\t\t\t\t\tyLegendLabel='n=1') \n\t\tp1.addTrace(yData=n2Fit,xData=phi,\n\t\t\t\t\tyLegendLabel='n=2') \n\t\tp1.addTrace(yData=fitTotal,xData=phi,\n\t\t\t\t\tyLegendLabel='Superposition') \n\t\treturn p1", "def plot(self, **kwargs):\n base.plot_homline(self.line, **kwargs)", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def plot_trajectory(ax, tr):\n earth_circle = Circle((0,0), R, facecolor=(0.9,0.9,0.9))\n ax.set_facecolor('k')\n ax.add_patch(earth_circle)\n ax.plot(*tr.T, c='y')\n # Make sure our planet looks circular!\n ax.axis('equal')\n\n # Set Axes limits to trajectory coordinate range, with some padding.\n xmin, xmax = min(tr.T[0]), max(tr.T[0])\n ymin, ymax = min(tr.T[1]), max(tr.T[1])\n dx, dy = xmax - xmin, ymax - ymin\n PAD = 0.05\n ax.set_xlim(xmin - PAD*dx, xmax + PAD*dx)\n ax.set_ylim(ymin - PAD*dy, ymax + PAD*dy)", "def _plot_arm(self):\n fig, axs = plt.subplots()\n fig.show()\n axs.cla()\n axs.axis([-1, 2.5, -1, 2.5])\n axs.plot([0], [0], 'o')\n config_plots = []\n for t_step in range(0, int(self._t_sim / self._dt) + 1, 1000):\n axs.plot([0, self._x_1[t_step]], [0, self._y_1[t_step]])\n axs.plot(self._x_1[t_step], self._y_1[t_step], 'o')\n axs.plot(\n [self._x_1[t_step], self._x_2[t_step]],\n [self._y_1[t_step], self._y_2[t_step]]\n )\n axs.plot(self._x_2[t_step], self._y_2[t_step], 'o')\n axs.plot(\n [self._x_2[t_step], self._x_e[t_step]],\n [self._y_2[t_step], self._y_e[t_step]]\n )\n axs.plot(self._x_e[t_step], self._y_e[t_step], 'ro')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 0],\n self._obj_coords_plot[t_step, 1, 0], 'g+')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 1],\n self._obj_coords_plot[t_step, 1, 1], 'g.')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 2],\n self._obj_coords_plot[t_step, 1, 2], 'g.')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 3],\n self._obj_coords_plot[t_step, 1, 3], 'g.')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 4],\n self._obj_coords_plot[t_step, 1, 4], 'g.')\n plt.axis('off')\n plt.pause(1 / self._plot_fps)\n fig.canvas.draw()\n image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n config_plots.append(image.reshape(\n fig.canvas.get_width_height()[::-1] + (3, )))\n\n # Draw and create image\n return config_plots", "def plot(self):\n R = self.length\n\n plt.figure()\n for ii, car in enumerate(self.cars):\n theta = self.positions[ii] + car.position\n x = R * np.cos(theta)\n y = R * np.sin(theta)\n if ii == 0:\n plt.scatter(x, y, marker='x')\n else:\n plt.scatter(x, y)\n\n plt.axis('scaled')\n lim = (-1.2 * R, 1.2 * R)\n plt.ylim(lim)\n plt.xlim(lim)\n plt.savefig('traffic_{:d}.png'.format(self.time))\n plt.close()", "def plot(self, axes):\n if self.is_leaf:\n axes.plot([p.x for p in self.points], [p.y for p in self.points], 'bo')\n else:\n axes.plot([self.centre.x - self.size / 2, self.centre.x + self.size / 2],\n [self.centre.y, self.centre.y], '-', color='gray')\n axes.plot([self.centre.x, self.centre.x],\n [self.centre.y - self.size / 2, self.centre.y + self.size / 2],\n '-', color='gray')\n for child in self.children:\n child.plot(axes)\n axes.set_aspect(1)", "def replot(self,ax):\n self.XP_Plotter.replot(ax)\n # theoretical lines\n self.lines_theory[0].set_xdata(self.xx)\n self.lines_theory[1].set_xdata(self.xx)\n self.lines_theory[2].set_xdata(self.xx_itpl)\n for line in self.lines_theory: \n ax.draw_artist(line)", "def drawGrid(self):\n for div in range(NBCELL):\n sec = SSIZE*div\n self.can.create_line(0, sec, GSIZE, sec, width=3, fill=GFILL)\n self.can.create_line(sec, 0, sec, GSIZE, width=3, fill=GFILL)", "def plotroute(self, markers=True, equal_aspect=True, equal_lims=True, canvas_style=False):\n\n if markers:\n marker = 'o'\n else:\n marker = None\n\n fig, ax = plt.subplots()\n ax.plot(self.x, self.y, 'k', marker=marker)\n\n fig.tight_layout()\n\n if equal_aspect:\n ax.set_aspect('equal', 'box')\n\n # Set equal lims if chosen. If not, let matplotlib set lims automatically\n if equal_lims:\n # Determine plot limits centered on the route center point\n c = self.center()\n lim = round((max(self.size())/2) * 1.1, 0) # add approx 10% to the lims\n x_lim = [c[0] - lim, c[0] + lim]\n y_lim = [c[1] - lim, c[1] + lim]\n\n # Set lims on plot\n ax.set_xlim(x_lim)\n ax.set_ylim(y_lim)\n\n # Axis formating\n if canvas_style is False:\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.grid(True)\n\n elif canvas_style:\n ax.set_axis_off()\n\n return ax", "def plot(self, values=None):\r\n plt.cla()\r\n plt.xlim([0, self.pond_size[0]])\r\n plt.ylim([0, self.pond_size[1]])\r\n plt.xticks(np.arange(self.pond_size[0]), [])\r\n for i in range(self.pond_size[0]):\r\n plt.text(i+0.4, -0.5, str(i))\r\n plt.yticks(np.arange(self.pond_size[1]), [])\r\n for i in range(self.pond_size[1]):\r\n plt.text(-0.5, i+0.4, str(i))\r\n\r\n # Draw the trajectory\r\n t_x = np.array([t[0] for t in self.trajectory])\r\n t_y = np.array([t[1] for t in self.trajectory])\r\n plt.plot(t_x+0.5, t_y+0.5, 'r-o')\r\n\r\n # Draw currents and values\r\n for x in range(self.pond_size[0]):\r\n for y in range(self.pond_size[1]):\r\n if values is not None:\r\n plt.text(x, y, '%.1f'%values[y, x])\r\n c = self.currents[y][x]\r\n assert len(c)==4\r\n for i in range(4):\r\n if c[i] != '0':\r\n head_size = 0.15 if c[i] == '1' else 0.3\r\n d = self.current_directions[i]\r\n plt.arrow(x+0.5-0.4*d[0], y+0.5-0.4*d[1], (0.8-head_size)*d[0], (0.8-head_size)*d[1],\r\n head_width=head_size, head_length=head_size, overhang=1.0)\r\n\r\n # Draw start and end states\r\n plt.gcf().gca().add_artist(plt.Circle((self.start_state[0]+0.5, self.start_state[1]+0.5), 0.4, color='r', alpha=0.5))\r\n plt.gcf().gca().add_artist(plt.Circle((self.end_state[0]+0.5, self.end_state[1]+0.5), 0.4, color='g', alpha=0.5))\r\n plt.gcf().gca().add_artist(plt.Circle((self.current_state[0]+0.5, self.current_state[1]+0.5), 0.25, color='b', alpha=0.5))\r\n plt.grid(True)\r\n plt.pause(0.2)", "def plot_scatter_points(self):\n self.plot(1)", "def addPlot(self, X, Y, color = \"k\"):\n if color in self.colors: color = self.colors[color] \n \n path = QtGui.QPainterPath()\n path.moveTo(X[0],Y[0])\n for i in xrange(1, len(X)):\n path.lineTo(X[i],Y[i])\n self.pathItem_list.append(self.scene.addPath(path, QtGui.QPen(color)))", "def plot_edge(self, x0, x1, y0, y1):\n self.ax.plot([x0, x1], [y0, y1], '-o', color=self.color)", "def trajectory_plotter(trajectories, title = \"Trajectories\"):\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(projection=\"3d\")\r\n\r\n ax.set_xlabel('X')\r\n ax.set_ylabel('Y')\r\n ax.set_zlabel('Z')\r\n ax.set_title(title)\r\n\r\n for i in range(trajectories.shape[0]):\r\n ax.plot(trajectories[i, 0, :], trajectories[i, 1, :], trajectories[i, 2, :], label=f\"Bird {i}\")\r\n\r\n # ax.legend()\r\n\r\n return plt.show()", "def fixed_line(self, quantity, direction, coord1, coord2):\n\n coord1 = int(coord1)\n coord2 = int(coord2)\n # Get the scalar values\n # Filter out any undesired data that isn't on the planes\n data = self.get_line(quantity, direction, coord1, coord2)\n if direction == 'x':\n # z along rows, y along columns\n pos_data = self.X\n elif direction == 'y':\n # x along columns, z along rows\n pos_data = self.Y\n elif direction == 'z':\n # x along rows, y along columns\n pos_data = self.Z\n freq = self.conf['Simulation']['params']['frequency']\n wvlgth = (consts.c / freq) * 1E9\n title = 'Frequency = {:.4E} Hz, Wavelength = {:.2f} nm'.format(\n freq, wvlgth)\n ptype = \"%s_line_plot_%i_%i\" % (direction, coord1, coord2)\n if np.iscomplexobj(data):\n labels = ('Real Part', 'Z [um]', quantity, title)\n fig, ax = self.line_plot(pos_data, data.real, labels)\n labels = ('Imag Part', 'Z [um]', quantity, title)\n _, ax = self.line_plot(pos_data, data.imag, labels, ax=ax)\n else:\n labels = (None, 'Z [um]', quantity, title)\n fig, ax = self.line_plot(pos_data, data, labels)\n ax.legend()\n if self.conf['General']['save_plots']:\n name = labels[2] + '_' + ptype + '.png'\n sim_dir = os.path.expandvars(self.conf['General']['sim_dir'])\n path = os.path.join(sim_dir, name)\n fig.savefig(path)\n if self.conf['General']['show_plots']:\n plt.show()\n plt.close(fig)", "def draw(self):\r\n dt = m.get_instance().dt\r\n self.perception_history = m.get_instance().larvae[0].history\r\n t = np.arange(0,len(self.perception_history)*dt,dt)\r\n plt.plot(t,self.perception_history)\r\n plt.title('Perception History')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Perception (uM)')\r\n plt.show()", "def plot_trajectory_with_internal_states(self, plot='x', **kwargs):\n \n # Check if trajectory is already computed\n if self.traj == None:\n self.compute_trajectory()\n \n plotter = graphical.TrajectoryPlotter( self )\n plotter.plot( self.traj, plot, **kwargs)", "def render_lines(self, line_cells):\n for cell in line_cells:\n self.surface.set_at(cell.tuple('2D'), YELLOW)", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def graphplot(self):\n if self.binned:\n self.line.set_ydata(self.fft_bins_y)\n else:\n self.line.set_ydata(self.spec_y)\n self.line2.set_ydata(self.wave_y)\n self.ax1.draw_artist(self.ax1.patch)\n self.ax2.draw_artist(self.ax2.patch)\n self.ax1.draw_artist(self.line)\n self.ax2.draw_artist(self.line2)\n self.fig.canvas.update()\n self.fig.canvas.flush_events()", "def plot_line(self,x_0,y_0,x_1,y_1,col=\"black\",line_width=1,line_type=\"solid\"):\n self._fig.add_shape(\n go.layout.Shape(\n type=\"line\",\n x0=x_0,\n y0=y_0,\n x1=x_1,\n y1=y_1,\n line=dict(\n color=col,\n width=line_width,\n dash=line_type\n )\n )\n )", "def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()", "def show_tileselection(image, tile_selection, tile_dim=[200, 200]):\n fig, ax = plt.subplots()\n ax.imshow(image, cmap='gray')\n for r in np.arange(image.shape[0]+1, step=200):\n ax.plot([0, image.shape[1]], [r, r], 'r')\n for c in np.arange(image.shape[1]+1, step=200):\n ax.plot([c, c], [0, image.shape[0]], 'r') \n for tiler, tilec in zip(tile_selection[0], tile_selection[1]):\n ax.plot([tilec*tile_dim[0], tilec*tile_dim[0]], [tiler*tile_dim[0], (tiler+1)*tile_dim[0]], color=[0, 1, 0])\n ax.plot([(tilec+1)*tile_dim[0], (tilec+1)*tile_dim[0]], [tiler*tile_dim[0], (tiler+1)*tile_dim[0]], color=[0, 1, 0])\n ax.plot([tilec*tile_dim[0], (tilec+1)*tile_dim[0]], [tiler*tile_dim[0], tiler*tile_dim[0]], color=[0, 1, 0])\n ax.plot([tilec*tile_dim[0], (tilec+1)*tile_dim[0]], [(tiler+1)*tile_dim[0], (tiler+1)*tile_dim[0]], color=[0, 1, 0])\n ax.set_xlim(-5, image.shape[1]+5)\n ax.set_ylim(image.shape[0]+5, -5)\n ax.axis('off')\n return fig, ax", "def draw_tiles(self):\n db = self.double_buffer\n if db is not None:\n span_x = self.width\n span_y = self.height\n tiles_x = int(ceil(span_x/256.0))\n tiles_y = int(ceil(span_y/256.0))\n\n cc = cairo.Context(db)\n tiles = self.tile_loader.load_area(self.longitude,self.latitude,self.zoom,tiles_x,tiles_y)\n tile_number=0\n line_number=0\n\n x_center = self.width/2# - 128\n y_center = self.height/2# - 128\n offset_x,offset_y = self.tile_loader.gmap_tile_xy_from_coord(self.longitude,self.latitude,self.zoom)\n\n\n xtiles = len(tiles[0])\n ytiles = len(tiles)\n #print len(tiles),len(tiles[0])\n for line in tiles:\n for tile in line:\n x = (tile_number - int(xtiles/2)) * 256 + x_center\n y = (line_number - int(ytiles/2)) * 256 + y_center\n finalx = x - offset_x #+128\n finaly = y - offset_y #+128\n cc.set_source_surface(tile, finalx+self.dx, finaly+self.dy)\n cc.paint()\n tile_number += 1\n tile_number = 0\n line_number += 1\n\n self.draw_cross(cc,x_center,y_center)\n self.draw_points(cc)\n\n db.flush()\n\n else:\n print('Invalid double buffer')" ]
[ "0.6842867", "0.68287706", "0.65767425", "0.65242386", "0.648994", "0.6471235", "0.6305497", "0.62960494", "0.6275145", "0.6269169", "0.6093281", "0.6079582", "0.60568917", "0.6041457", "0.60041326", "0.59778225", "0.5945811", "0.5943831", "0.5938332", "0.59359473", "0.58843535", "0.5878874", "0.5878812", "0.58438206", "0.58257294", "0.58221424", "0.5809725", "0.58052903", "0.580519", "0.5791027", "0.57676595", "0.5767136", "0.57612187", "0.57598835", "0.57587355", "0.5738556", "0.57302296", "0.57302284", "0.5726543", "0.57148325", "0.5711316", "0.57039624", "0.5701005", "0.56851345", "0.56819016", "0.56662965", "0.56608814", "0.5651944", "0.5634227", "0.56330717", "0.56263167", "0.56154513", "0.56130385", "0.5612045", "0.5601593", "0.55960584", "0.55959916", "0.5586214", "0.55857134", "0.55853415", "0.5580939", "0.55700374", "0.5565583", "0.55540836", "0.55510795", "0.5550222", "0.55495554", "0.55256546", "0.5525409", "0.55011547", "0.54970497", "0.5489531", "0.5487691", "0.54847443", "0.547959", "0.54759306", "0.54751927", "0.5475119", "0.54743123", "0.5471855", "0.54681176", "0.5465413", "0.54640293", "0.5462519", "0.54619074", "0.54612035", "0.5460013", "0.54578406", "0.5453221", "0.54491705", "0.54440093", "0.5443548", "0.54433465", "0.5440253", "0.5439168", "0.54372686", "0.54352564", "0.54343003", "0.543293", "0.54261875" ]
0.6549081
3
Plots the cell trajectories in 3D as a line and a point at the last coordinate.
def plot_trajectories_3d(trajectories: pd.DataFrame, ax: Optional[plt.Axes] = None): if ax is None: fig = plt.figure() ax = fig.add_subplot(projection="3d") for cell in trajectories: ax.plot( cell["position_x"].values, cell["position_y"].values, cell["position_z"].values, ) ax.scatter( cell["position_x"].values[-1], cell["position_y"].values[-1], cell["position_z"].values[-1], marker="o", ) return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot3d(self):\n plot_rupture_wire3d(self)", "def plot_lines(self):\n self.plot(3)", "def plot_results_traj_3d(p_x, p_y, p_z, xmin, xmax, ymin, ymax, zmin, zmax):\n fig, ax = plt.subplots(2 , 2, figsize = (10, 10))\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n for t in np.arange(0, p_x.shape[1], step = 1): \n ax[0,0].plot(t, p_x[p, t], 'rx') \n ax[0,1].plot(t, p_y[p, t], 'gx') \n ax[1,0].plot(t, p_z[p, t], 'bx') \n ax[1,1].plot(t, p_x[p, t], 'rx') \n ax[1,1].plot(t, p_y[p, t], 'gx') \n ax[1,1].plot(t, p_z[p, t], 'bx') \n for a in ax.flat: \n a.set(xlabel = 'Time steps', ylabel = 'Position')\n ax[0,0].set_title('X (pix)') \n ax[0,0].set_ylim([xmin, xmax]) \n ax[0,1].set_title('Y (pix)') \n ax[0,1].set_ylim([ymin, ymax]) \n ax[1,0].set_title('Z (pix)') \n ax[1,0].set_ylim([zmin, zmax])\n ax[1,1].set_title('Positions combined') \n ax[1,1].set_ylim([np.array([xmin, ymin, zmin]).min(), np.array([xmax, ymax, zmax]).max()])", "def trajectoire(self):\n trajx = []\n trajy = []\n for i in range(0, len(self.pos)):\n trajx.append(self.pos[i].x)\n trajy.append(self.pos[i].y)\n plt.plot(trajx, trajy) # color=self.color)\n plt.show()", "def trajectory_plotter(trajectories, title = \"Trajectories\"):\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(projection=\"3d\")\r\n\r\n ax.set_xlabel('X')\r\n ax.set_ylabel('Y')\r\n ax.set_zlabel('Z')\r\n ax.set_title(title)\r\n\r\n for i in range(trajectories.shape[0]):\r\n ax.plot(trajectories[i, 0, :], trajectories[i, 1, :], trajectories[i, 2, :], label=f\"Bird {i}\")\r\n\r\n # ax.legend()\r\n\r\n return plt.show()", "def plot_tracks_3D(trackDF, polyline_df_xy, polyline_df_yz,\n output_fig_path=None, plot_style='white', line_color=None,\n fig_width=3, fig_height=3, fig_dpi=300,\n plot_xy_polylines=False, plot_yz_polylines=False,\n track_list=None, tracks_highlight=None, uniform_line_width=False,\n elevation=45, azimuth=60, axis_off=False,\n centering=True, draw_spot_start=False, draw_spot_end=False):\n import matplotlib as mpl\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib import cm\n # Create a colors dictionary to set same color for the daughter cells of the same cell division\n# colors = cm.jet(np.linspace(0, 1, df.cell_division_id.nunique()))\n# colors = cm.Set1(np.linspace(0, 1, df.cell_division_id.nunique()))\n colors = cm.Dark2(np.linspace(0, 1, trackDF.cell_division_id.nunique()))\n colors = [val for pair in zip(colors, colors) for val in pair]\n \n if plot_style=='white':\n AXES_COLOR = '#000000'#black\n mpl.rc('figure', facecolor='w', edgecolor=AXES_COLOR)\n mpl.rc('axes', facecolor='w', edgecolor=AXES_COLOR, labelcolor=AXES_COLOR)\n mpl.rc('xtick', color=AXES_COLOR)\n mpl.rc('ytick', color=AXES_COLOR)\n mpl.rc('grid', color='#EEEEEE')\n \n if plot_style=='dark':\n AXES_COLOR = '#FFFFFF'#white\n mpl.rc('figure', facecolor='k', edgecolor=AXES_COLOR)\n mpl.rc('axes', facecolor='k', edgecolor=AXES_COLOR, labelcolor=AXES_COLOR)\n mpl.rc('xtick', color=AXES_COLOR)\n mpl.rc('ytick', color=AXES_COLOR)\n mpl.rc('grid', color='gray')\n \n # plotting set up\n fig = plt.figure(figsize=(fig_width, fig_height), dpi=fig_dpi)\n ax = fig.add_axes([0, 0, 1.0, 1.0], projection='3d')\n \n if plot_style=='white':\n # *** Set the background color of the panes ***\n PANECOLOR = (1, 1, 1, 1.0) # white\n ax.w_xaxis.set_pane_color(PANECOLOR)\n ax.w_yaxis.set_pane_color(PANECOLOR)\n ax.w_zaxis.set_pane_color(PANECOLOR)\n # *** Set the line colors of x,y,z axes ***\n AXISCOLOR = (0, 0, 0, 1.0) # black\n ax.w_xaxis.line.set_color(AXISCOLOR)\n ax.w_yaxis.line.set_color(AXISCOLOR)\n ax.w_zaxis.line.set_color(AXISCOLOR)\n\n if plot_style=='dark':\n # *** Set the background color of the panes ***\n # PANECOLOR = (0.1, 0.1, 0.1, 1.0) # dark grey\n PANECOLOR = (0, 0, 0, 1.0) # black\n ax.w_xaxis.set_pane_color(PANECOLOR)\n ax.w_yaxis.set_pane_color(PANECOLOR)\n ax.w_zaxis.set_pane_color(PANECOLOR)\n # *** Set the line colors of x,y,z axes ***\n AXISCOLOR = (1.0, 1.0, 1.0, 1.0) # white\n ax.w_xaxis.line.set_color(AXISCOLOR)\n ax.w_yaxis.line.set_color(AXISCOLOR)\n ax.w_zaxis.line.set_color(AXISCOLOR)\n \n # calculate the range values and scales in each dimension\n tMin, tMax = trackDF.t.min(), trackDF.t.max()\n xMin, xMax = trackDF.x.min(), trackDF.x.max()\n yMin, yMax = trackDF.y.min(), trackDF.y.max()\n zMin, zMax = trackDF.z.min(), trackDF.z.max()\n print('x, y, z min: ', xMin, yMin, zMin)\n print('x, y, z max: ', xMax, yMax, zMax)\n if centering == False:\n shift_x, shift_y, shift_z = 0, 0, 0\n else:\n shift_x, shift_y, shift_z = np.mean([xMax, xMin]), np.mean([yMax, yMin]), np.mean([zMax, zMin])\n\n if track_list is None:\n track_list = trackDF.track_id.unique()\n for i, track in enumerate(track_list):\n temp = trackDF.loc[trackDF.track_id == track]\n temp = temp.sort_values('t', ascending=True)\n x, y, z = temp.x, temp.y, temp.z\n x, y, z = np.array(x), np.array(y), np.array(z)\n if (tracks_highlight is not None) and (track in tracks_highlight):\n if uniform_line_width:\n ax.plot(x-shift_x, y-shift_y, z-shift_z, '-', color='#FF00FF', alpha=.6, lw=.6)\n else:\n ax.plot(x-shift_x, y-shift_y, z-shift_z, '-', color='#FF00FF', alpha=.8, lw=.8)\n else:\n if line_color is None:\n ax.plot(x-shift_x, y-shift_y, z-shift_z, '-', color=colors[i], alpha=.6, lw=.6)\n else:\n ax.plot(x-shift_x, y-shift_y, z-shift_z, '-', color=line_color, alpha=.6, lw=.6)\n\n if draw_spot_start == True:\n ax.plot([x[0]-shift_x], [y[0]-shift_y], [z[0]-shift_z], 'ob', alpha=.4, markersize=2, markeredgewidth=0)\n if draw_spot_end == True:\n ax.plot([x[-1]-shift_x], [y[-1]-shift_y], [z[-1]-shift_z], 'or', alpha=.6, markersize=2, markeredgewidth=0)\n\n if plot_xy_polylines:\n plot_3D_polylines_xy(polyline_df_xy, 1, ax,\n line_color='#1A6A82', line_alpha=0.08,\n shift_x=shift_x, shift_y=shift_y, shift_z=shift_z)\n plot_3D_polylines_xy(polyline_df_xy, 192, ax,\n line_color='#A05B22', line_alpha=0.08,\n shift_x=shift_x, shift_y=shift_y, shift_z=shift_z)\n\n if plot_yz_polylines:\n plot_3D_polylines_yz(polyline_df_yz, 1, ax,\n line_color='#1A6A82', line_alpha=0.08,\n shift_x=shift_x, shift_y=shift_y, shift_z=shift_z)\n plot_3D_polylines_yz(polyline_df_yz, 192, ax,\n line_color='#A05B22', line_alpha=0.08,\n shift_x=shift_x, shift_y=shift_y, shift_z=shift_z)\n\n # *** adjust axis limits and turn on/off grids -- has to be after plotting ***\n x0, y0, z0 = xMin-shift_x, yMin-shift_y, zMin-shift_z\n if abs(x0) > abs(y0):\n y0 = x0\n else:\n x0 = y0\n# x0, y0, z0 = -100, -100, -40\n# x0, y0, z0 = -95, -95, -40\n# x0, y0, z0 = -120, -120, -40\n axisLength = abs(x0) * 2\n ax.set_xlim( x0, x0 + axisLength )\n ax.set_ylim( y0, y0 + axisLength )\n ax.set_zlim( z0, z0 + axisLength )\n \n # *** customize grid size ***\n grid_size = 40\n ax.set_xticks(np.arange( x0, x0 + axisLength + 1, grid_size))\n ax.set_yticks(np.arange( y0, y0 + axisLength + 1, grid_size))\n ax.set_zticks(np.arange( z0, z0 + axisLength + 1, grid_size))\n\n # *** add axis labels ***\n# ax.set_xlabel('x')\n# ax.set_ylabel('y')\n# ax.set_zlabel('z')\n ax.set_xlabel('')\n ax.set_ylabel('')\n ax.set_zlabel('')\n \n # *** add axis tick labels ***\n# ax.set_xticklabels([0, 40, 80, 120, 160])\n# ax.set_yticklabels([0, 40, 80, 120, 160])\n# ax.set_zticklabels([92, 52, 12, -28, -68])\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n\n\n# ax.grid(False)\n ax.view_init(elevation, azimuth) # elevation and azimuth angles for viewpoint settting\n\n # Flip the y-axis to match the image coordinates\n plt.gca().invert_yaxis()\n \n if axis_off:\n plt.axis('off')\n \n for o in fig.findobj():\n o.set_clip_on(False)\n for o in ax.findobj():\n o.set_clip_on(False)\n \n if output_fig_path is not None:\n plt.savefig(output_fig_path)\n \n return ax", "def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()", "def show(self):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = Axes3D(fig)\n pos = self.cluster.get_positions()\n from itertools import combinations\n for tri in self.mesh.simplices:\n for comb in combinations(tri, 2):\n x1 = pos[comb[0], 0]\n x2 = pos[comb[1], 0]\n y1 = pos[comb[0], 1]\n y2 = pos[comb[1], 1]\n z1 = pos[comb[0], 2]\n z2 = pos[comb[1], 2]\n ax.plot([x1, x2], [y1, y2], zs=[z1, z2], color=\"black\")\n plt.show()", "def get_3d_plot(three_d_matrix, ax, title, length):\r\n x, y, z = np.where(three_d_matrix != 0)\r\n ax.scatter(x, y, z, c='blue')\r\n ax.set_xlabel('x')\r\n ax.set_ylabel('y')\r\n ax.set_xlim(0, length)\r\n ax.set_ylim(0, length)\r\n ax.set_title(title)", "def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return", "def drawLine3D(x0,y0,z0,x1,y1,z1):\n dislin.strt3d(x0,y0,z0)\n dislin.conn3d(x1,y1,z1)", "def plot_3d(trj: TrajaDataFrame, **kwargs) -> matplotlib.collections.PathCollection:\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.set_xlabel(\"x\", fontsize=15)\n ax.set_zlabel(\"time\", fontsize=15)\n ax.set_ylabel(\"y\", fontsize=15)\n title = kwargs.pop(\"title\", \"Trajectory\")\n ax.set_title(f\"{title}\", fontsize=20)\n ax.plot(trj.x, trj.y, trj.index)\n cmap = kwargs.pop(\"cmap\", \"winter\")\n cm = plt.get_cmap(cmap)\n NPOINTS = len(trj)\n ax.set_prop_cycle(color=[cm(1.0 * i / (NPOINTS - 1)) for i in range(NPOINTS - 1)])\n for i in range(NPOINTS - 1):\n ax.plot(trj.x[i : i + 2], trj.y[i : i + 2], trj.index[i : i + 2])\n\n dist = kwargs.pop(\"dist\", None)\n if dist:\n ax.dist = dist\n labelpad = kwargs.pop(\"labelpad\", None)\n if labelpad:\n from matplotlib import rcParams\n\n rcParams[\"axes.labelpad\"] = labelpad\n\n return ax", "def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()", "def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()", "def plot_3d(results_list): \n x_range = range(len(results_list[0]))\n fig = plt.figure()\n axe = Axes3D(fig)\n\n for idx, result in enumerate(results_list):\n axe.plot(x_range, result, idx)\n plt.show()", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def plot_3d(self, ax_3d: Axes3D, lims_x: array_like = (-1, 1), lims_y: array_like = (-1, 1), **kwargs) -> None:\n X, Y, Z = self.to_mesh(lims_x, lims_y)\n\n ax_3d.plot_surface(X, Y, Z, **kwargs)", "def plot_results_3d(p_x, p_y, p_z, h_exp = 0.5):\n plt.figure(figsize = (10, 10))\n ax3d = plt.axes(projection = '3d') \n\n color=iter(cm.rainbow(np.linspace(0,1,p_x.shape[0]))) # (1)\n labels = ['Particle ' + str(pl+1) for pl in np.arange(0, p_x.shape[0], step = 1)]\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n c = next(color) # (1)\n for t in np.arange(0, p_x.shape[1], step = 1): \n ax3d.plot3D(p_x[p, t], p_y[p, t], p_z[p, t], 'x', c = c, label = labels[p]) \n legend_without_duplicate_labels(ax3d)\n ax3d.set_xlabel('X (pixels)') \n ax3d.set_ylabel('Y (pixels') \n ax3d.set_zlabel('Z (pixels)') \n ax3d.set_xlim([origin-150,origin+150])\n ax3d.set_ylim([origin-150,origin+150])\n ax3d.set_zlim([origin-150,origin+150])\n ax3d.set_title('3D particle trajectories - H = ' + str(h_exp))", "def plot(self):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=Axes3D.name)\n\n # TODO Use numpy to rotate esp_points matrix for faster variable access.\n ax.scatter(\n xs=[i[0][0] for i in self.esp_points],\n ys=[i[0][1] for i in self.esp_points],\n zs=[i[0][2] for i in self.esp_points],\n c=[i[1] for i in self.esp_points],\n marker='o',\n s=2,\n alpha=0.5\n )\n\n ax.scatter(\n xs=[i[0][0] for i in self.atom_points],\n ys=[i[0][1] for i in self.atom_points],\n zs=[i[0][2] for i in self.atom_points],\n c=[i[1] for i in self.atom_points],\n marker='X',\n s=100\n )\n\n plt.show()", "def plot_3d_object(object_):\n \n # Initialize renderer instance\n r = Renderer()\n\n # Add surfaces and goal regions to the renderer instance\n for surf in object_:\n r.add((object_[surf][0],'b',1))\n if len(object_[surf])>2:\n r.add((object_[surf][2],'r',1))\n r.add((gPoint(-15,-15,-15),'k',1))\n r.show()", "def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')", "def plot_3d_plot(self, features, headers, labels):\n self.master_plot.scatter(features[:, 0], features[:, 1], features[:, 2], c=labels)\n self.master_plot.set_xlabel(headers[0])\n self.master_plot.set_ylabel(headers[1])\n self.master_plot.set_zlabel(headers[2])\n\n plot_hyperplane(self.clf, self.master_plot, colors='orange')", "def plot_phase_plane_trajectory_3d(self , x_axis=0, y_axis=1, z_axis=2):\n \n # Check is trajectory is already computed\n if self.traj == None:\n self.compute_trajectory()\n \n plotter = self.get_plotter()\n \n return plotter.phase_plane_trajectory_3d( \n self.traj, x_axis , y_axis, z_axis)", "def plot_pose3RT_on_axes(axes, gRp, origin, axis_length=0.1, center_plot=False, line_obj_list=None, zoom_to_fit=False):\n # draw the camera axes\n x_axis = origin + gRp[:, 0] * axis_length\n linex = np.append(origin, x_axis, axis=0)\n\n y_axis = origin + gRp[:, 1] * axis_length\n liney = np.append(origin, y_axis, axis=0)\n\n z_axis = origin + gRp[:, 2] * axis_length\n linez = np.append(origin, z_axis, axis=0)\n\n\n if line_obj_list is None:\n xaplt = axes.plot(linex[:, 0], linex[:, 1], linex[:, 2], 'r-')\n yaplt = axes.plot(liney[:, 0], liney[:, 1], liney[:, 2], 'g-')\n zaplt = axes.plot(linez[:, 0], linez[:, 1], linez[:, 2], 'b-')\n\n if center_plot:\n center_3d_plot_around_pt(axes,origin[0],zoom_to_fit=zoom_to_fit)\n return [xaplt, yaplt, zaplt]\n\n else:\n line_obj_list[0][0].set_data(linex[:, 0], linex[:, 1])\n line_obj_list[0][0].set_3d_properties(linex[:,2])\n\n line_obj_list[1][0].set_data(liney[:, 0], liney[:, 1])\n line_obj_list[1][0].set_3d_properties(liney[:,2])\n\n line_obj_list[2][0].set_data(linez[:, 0], linez[:, 1])\n line_obj_list[2][0].set_3d_properties(linez[:,2])\n\n if center_plot:\n center_3d_plot_around_pt(axes,origin[0],zoom_to_fit=zoom_to_fit)\n return line_obj_list", "def plot_3d_heads(ax, vertices, faces):\n # extract vertices coordinates\n x_V = vertices[:, 2]\n y_V = vertices[:, 0]\n z_V = vertices[:, 1]\n\n # plot link between vertices\n for F in range(len(faces)):\n V0 = faces[F, 0]\n V1 = faces[F, 1]\n V2 = faces[F, 2]\n V3 = faces[F, 3]\n ax.plot([x_V[V0], x_V[V1]],\n [y_V[V0], y_V[V1]],\n [z_V[V0], z_V[V1]],\n '-', color= 'grey', linewidth=0.3)\n ax.plot([x_V[V1], x_V[V2]],\n [y_V[V1], y_V[V2]],\n [z_V[V1], z_V[V2]],\n '-', color= 'grey', linewidth=0.3)\n ax.plot([x_V[V2], x_V[V3]],\n [y_V[V2], y_V[V3]],\n [z_V[V2], z_V[V3]],\n '-', color= 'grey', linewidth=0.3)\n ax.plot([x_V[V3], x_V[V1]],\n [y_V[V3], y_V[V1]],\n [z_V[V3], z_V[V1]],\n '-', color= 'grey', linewidth=0.3)", "def plots(x_bef,y_bef,z_bef):\r\n # Makes a 3-D plot of the x, y and z axes representing the ball's total trajectory\r\n plt.figure(3)\r\n plot3 = plt.axes(projection=\"3d\")\r\n plot3.plot3D(x_bef,y_bef,z_bef,'blue')\r\n plot3.set_xlabel('x (ft)')\r\n plot3.set_ylabel('y (ft)')\r\n plot3.set_zlabel('z (ft)')\r\n plot3.set_title('Total Trajectory')\r\n \r\n # Makes a 2-D plot of the x, and z axes representing the ball's total 2-D trajectory\r\n plt.figure(4)\r\n plt.plot(x_bef,z_bef)\r\n plt.xlabel('x (ft)')\r\n plt.ylabel('z (ft)')\r\n plt.title('z (ft) vs x (ft)')\r\n plt.show()", "def plot_trajectories_XYZ(t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n ankle_l_trajectory = ankle_l_trajectory[index_start:index_end+1,:]\n ankle_r_trajectory = ankle_r_trajectory[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n \n plt.figure('Trajectories')\n plt.subplot(311)\n plt.plot(time,ankle_l_trajectory[:,0])\n plt.plot(time,ankle_r_trajectory[:,0])\n #plt.title('Trajectory of the X component')\n plt.xlabel('Time [s]')\n plt.ylabel('X Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(312)\n plt.plot(time,ankle_l_trajectory[:,1])\n plt.plot(time,ankle_r_trajectory[:,1])\n #plt.title('Trajectory of the Y component')\n plt.xlabel('Time [s]')\n plt.ylabel('Y Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(313)\n plt.plot(time,ankle_l_trajectory[:,2])\n plt.plot(time,ankle_r_trajectory[:,2])\n #plt.title('Trajectory of the Z component')\n plt.xlabel('Time [s]')\n plt.ylabel('Z Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n return", "def plot3d(self,datarange=None,nx=100,ny=100,clf=True,cb=True,data='auto',**kwargs):\n from enthought.mayavi import mlab as M\n from operator import isMappingType\n\n if data == 'auto':\n if self.data:\n data = self.data[:2]\n else:\n data = None\n\n if data: #TODO:correct coord conv\n xd,yd = data[0][0],data[0][1]\n if datarange is None:\n datarange = (np.min(xd),np.max(xd),np.min(yd),np.max(yd))\n maxmind = (np.max(data[1]),np.min(data[1]))\n elif datarange is None:\n if self.rangehint is not None:\n datarange = self.rangehint\n else:\n raise ValueError(\"Can't choose limits for plotting without data or a range hint\")\n maxmind = None\n\n grid = np.mgrid[datarange[0]:datarange[1]:1j*nx,datarange[2]:datarange[3]:1j*ny]\n res = self(grid)\n\n# if maxmind:\n# norm = plt.normalize(min(np.min(res),maxmind[1]),max(np.max(res),maxmind[0]))\n# else:\n# norm = plt.normalize(np.min(res),np.max(res))\n\n if clf:\n M.clf()\n\n M.mesh(grid[0],grid[1],res)\n\n if cb:\n if isMappingType(cb):\n M.colorbar(**cb)\n else:\n M.colorbar()\n\n if data:\n if isMappingType(data):\n kwscat = dict(data)\n else:\n kwscat = {}\n zd = data[1]\n zres = zd-self((xd,yd))\n kwscat.setdefault('scale_mode','none')\n kwscat.setdefault('scale_factor','auto')\n g = M.points3d(xd,yd,zd,zres,**kwscat)\n if kwscat['scale_factor'] == 'auto':\n g.glyph.glyph.scale_factor /= 2\n\n #M.xlim(datarange[0],datarange[1])\n #M.ylim(datarange[2],datarange[3])", "def init_render(self):\n plt.ion() # interactive plot mode, panning, zooming enabled\n self.fig = plt.figure(figsize=(9,7)) # create figure object\n self.ax = self.fig.add_subplot(111, projection=\"3d\") # attach z-axis to plot\n # set axe limits and labels\n self.ax.set_xlim([-self.l1max, self.l1max])\n self.ax.set_ylim([-self.l1max, self.l1max])\n self.ax.set_zlim([-self.l1max, self.l1max])\n self.ax.set_xlabel(\"X\")\n self.ax.set_ylabel(\"Y\")\n self.ax.set_zlabel(\"Z\")\n # add 3 arrows of coordinate base frame\n ax_base = Arrow3D([0.0, self.arrow_len], [0.0, 0.0], [0.0, 0.0],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"r\")\n ay_base = Arrow3D([0.0, 0.0], [0.0, self.arrow_len], [0.0, 0.0],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"g\")\n az_base = Arrow3D([0.0, 0.0], [0.0, 0.0], [0.0, self.arrow_len],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"b\")\n self.ax.add_artist(ax_base)\n self.ax.add_artist(ay_base)\n self.ax.add_artist(az_base)\n plt.show(block=False) # display figure and bring focus (once) to plotting window\n self.fig.tight_layout() # fits the plot to window size", "def plot3d(x, labels):\n trace0 = go.Scatter3d(\n x = x[labels[:,0]==0][:,0],\n y = x[labels[:,0]==0][:,1],\n z = x[labels[:,0]==0][:,2],\n mode = 'markers',\n marker = dict(\n size=5\n )\n )\n \n trace1 = go.Scatter3d(\n x = x[labels[:,0]==1][:,0],\n y = x[labels[:,0]==1][:,1],\n z = x[labels[:,0]==1][:,2],\n mode = 'markers',\n marker = dict(\n size=5\n )\n )\n \n layout = go.Layout(\n margin=dict(\n l=0,\n r=0,\n b=0,\n t=0\n )\n )\n \n data = [trace0, trace1]\n \n fig = go.Figure(data=data, layout=layout)\n \n iplot(fig)", "def plot3D(x):\n cycol = cycle('bgrcmk')\n fig = plt.figure()\n ax = Axes3D(fig)\n for i in range(5):\n ax.scatter(x[:, i, 0], x[:, i, 1], x[:, i, 2], c=next(cycol),\n marker='.')\n plt.show()", "def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()", "def plot_pose3RT_on_axes(axes, gRp, origin, axis_length=0.1, center_plot=False, line_obj_list=None):\n # draw the camera axes\n x_axis = origin + gRp[:, 0] * axis_length\n linex = np.append(origin, x_axis, axis=0)\n \n y_axis = origin + gRp[:, 1] * axis_length\n liney = np.append(origin, y_axis, axis=0)\n\n z_axis = origin + gRp[:, 2] * axis_length\n linez = np.append(origin, z_axis, axis=0)\n\n\n if line_obj_list is None:\n xaplt = axes.plot(linex[:, 0], linex[:, 1], linex[:, 2], 'r-') \n yaplt = axes.plot(liney[:, 0], liney[:, 1], liney[:, 2], 'g-') \n zaplt = axes.plot(linez[:, 0], linez[:, 1], linez[:, 2], 'b-')\n \n if center_plot:\n center_3d_plot_around_pt(axes,origin[0])\n return [xaplt, yaplt, zaplt]\n \n else:\n line_obj_list[0][0].set_data(linex[:, 0], linex[:, 1])\n line_obj_list[0][0].set_3d_properties(linex[:,2])\n \n line_obj_list[1][0].set_data(liney[:, 0], liney[:, 1])\n line_obj_list[1][0].set_3d_properties(liney[:,2])\n \n line_obj_list[2][0].set_data(linez[:, 0], linez[:, 1])\n line_obj_list[2][0].set_3d_properties(linez[:,2])\n\n if center_plot:\n center_3d_plot_around_pt(axes,origin[0])\n return line_obj_list", "def plot_3D_compare(true_lab, pred_lab):\n ref_shape = [true_lab.shape[1], true_lab.shape[2], true_lab.shape[3]]\n true_loc = np.where(true_lab == 1)\n pred_loc = np.where(pred_lab == 1)\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.9)\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.05)\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()", "def plotTerrain3d(self, gdf: gpd.GeoDataFrame, fig_size: tuple=(12, 10), size: float=0.01):\n fig, ax = plt.subplots(1, 1, figsize=fig_size)\n ax = plt.axes(projection='3d')\n ax.scatter(gdf.geometry.x, gdf.geometry.y, gdf.elevation, s=size)\n plt.show()", "def plot_latency_3d(latency, type):\n init()\n _n = len(latency.columns)\n x = y = [i for i in range(_n)]\n T, A = np.meshgrid(x, y)\n _queueing_delay_keywords = {'queueing delay', 'qd'}\n if type in _queueing_delay_keywords:\n zlabel = 'Average Queueing Delay (ns)'\n else:\n zlabel = 'Average Transfer Delay (ns)'\n xlabel = 'Source Node'\n ylabel = 'Destination Node'\n std_plot = False\n if isinstance(latency, tuple):\n mean_df = latency[0]\n std_df = latency[1]\n std_plot = True\n elif isinstance(latency, pd.DataFrame):\n mean_df = latency\n mean = mean_df.values\n if std_plot is True:\n std = std_df.values\n zlabel = ('STD of ' + zlabel)\n std_transpose = std.transpose()\n dz = std_transpose.flatten()\n else:\n mean_transpose = mean.transpose()\n dz = mean_transpose.flatten()\n # Plotting\n fig = plt.figure(figsize=(15, 15), dpi=300)\n ax = fig.gca(projection='3d')\n\n Xi = T.flatten()\n Yi = A.flatten()\n Zi = np.zeros(mean.size)\n dx = dy = 0.5\n ax.bar3d(Xi, Yi, Zi, dx, dy, dz, color=sns.color_palette('RdBu_r')[0], shade=False)\n ax.view_init(20, 37)\n ax.set_xlabel(xlabel, fontsize=15)\n ax.set_ylabel(ylabel, fontsize=15)\n ax.set_zlabel(zlabel, fontsize=15)\n return fig", "def visualize(s):\n n = 3\n fig, ax = plt.subplots(1, n, sharex=True, sharey=True)\n for x, y in zip(range(n), [s.x, s.y, s.z]):\n plot(ax, x, y)\n\n plt.tight_layout()\n plt.show()", "def newplot3(*args, **kwargs):\n\n if 'linewidth' and 'lw' not in kwargs.keys():\n kwargs['linewidth'] = 2\n\n fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)\n ax = fig.add_subplot(111, projection='3d')\n\n x = np.asarray(args[0], dtype=float)\n y = np.asarray(args[1], dtype=float)\n z = np.asarray(args[2], dtype=float)\n\n if z.ndim == 2:\n if x.ndim < 2:\n x = np.tile(x, z.shape[1]).reshape(z.T.shape).T\n if y.ndim < 2:\n y = np.tile(y, z.shape[0]).reshape(z.shape)\n\n # Plot each array independently\n for n in range(len(z)):\n ax.plot(x[n], y[n], z[n], *args[3:], **kwargs)\n else:\n ax.plot(*args, **kwargs)", "def plotMesh(verts,tris):\n x = verts[:,0]\n y = verts[:,1]\n\n plt.figure()\n plt.gca().set_aspect('equal')\n plt.triplot(x, y, tris, 'k-')\n plt.title('Unstructured Mesh')\n plt.xlabel('distance (m)')\n plt.ylabel('distance (m)')", "def test_3d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n db.close()\n\n date = pd.to_datetime('2015-04-01')\n self.full_iv.get_data()\n df_date0 = self.full_iv.df_all.query('date == %r' % date)\n df_date1 = df_iv.query('date == %r' % date)\n df_date = pd.concat([df_date0, df_date1])\n \"\"\":type: pd.DataFrame\"\"\"\n\n x = df_date['dte']\n y = df_date['strike']\n z = df_date['impl_vol']\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n # noinspection PyUnresolvedReferences\n ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n # ax.plot_wireframe(x, y, z, rstride=1, cstride=1)\n plt.show()", "def force_show(sub_Idx):\n force_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/forces/force_' + f'{sub_Idx:02d}' + '.txt'\n image_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/images/'\n force_num = len(glob.glob(image_path + '*.jpg'))\n force_list = load_force_txt(force_path,force_num)\n print('showing '+f'{force_num:03d}'+ ' raw forces for subject ' + f'{sub_Idx:02d}')\n\n fig = plt.figure(figsize = (10, 7)) \n ax = plt.axes(projection =\"3d\") \n\n for x, y, z in force_list:\n ax.scatter3D(x, y, z, color = \"green\")\n ax.set_xlabel('X-axis', fontweight ='bold') \n ax.set_ylabel('Y-axis', fontweight ='bold') \n ax.set_zlabel('Z-axis', fontweight ='bold')\n plt.title(\"3D force data\") \n plt.show()", "def plot_trajectory(ax, tr):\n earth_circle = Circle((0,0), R, facecolor=(0.9,0.9,0.9))\n ax.set_facecolor('k')\n ax.add_patch(earth_circle)\n ax.plot(*tr.T, c='y')\n # Make sure our planet looks circular!\n ax.axis('equal')\n\n # Set Axes limits to trajectory coordinate range, with some padding.\n xmin, xmax = min(tr.T[0]), max(tr.T[0])\n ymin, ymax = min(tr.T[1]), max(tr.T[1])\n dx, dy = xmax - xmin, ymax - ymin\n PAD = 0.05\n ax.set_xlim(xmin - PAD*dx, xmax + PAD*dx)\n ax.set_ylim(ymin - PAD*dy, ymax + PAD*dy)", "def plot_3D(Y_data, num_area):\n ref_shape = [Y_data.shape[0], Y_data.shape[1], Y_data.shape[2]]\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n for a in np.arange(1, num_area+1):\n loc = np.where(Y_data == a)\n ax.scatter3D(loc[0], loc[1], loc[2], marker=\".\", alpha=0.9)\n\n plt.show()", "def plot_curve_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n curve_x, curve_y, curve_z = self.curve()\n\n control_points_x = np.array([control_point[0] for control_point in self.control_points])\n control_points_y = np.array([control_point[1] for control_point in self.control_points])\n control_points_z = np.array([control_point[2] for control_point in self.control_points])\n\n x_min = min(np.min(curve_x), np.min(control_points_x))\n x_max = max(np.max(curve_x), np.max(control_points_x))\n x_range = np.abs(x_max - x_min)\n\n y_min = min(np.min(curve_y), np.min(control_points_y))\n y_max = max(np.max(curve_y), np.max(control_points_y))\n y_range = np.abs(y_max - y_min)\n\n z_min = min(np.min(curve_z), np.min(control_points_z))\n z_max = max(np.max(curve_z), np.max(control_points_z))\n z_range = np.abs(z_max - z_min)\n\n ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.plot(control_points_x, control_points_y, control_points_z, **CONTROL_POLYGON_KWARGS)\n ax.plot(curve_x, curve_y, curve_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()", "def plot_3D(self, position, legend):\n # Initializing the figure\n fig = plt.figure(figsize=(10, 10))\n ax = fig.gca(projection='3d')\n # Looping over all object arrays in the position matrix,\n # adding it to the plot\n for i in range(self.numbodies):\n ax.plot(position[i, 0, :], position[i, 1, :], position[i, 2, :])\n # Decorating the plot\n ax.set_xlabel('x [AU]', fontsize=16)\n ax.set_ylabel('y [AU]', fontsize=16)\n ax.set_zlabel('z [AU]', fontsize=16)\n ax.set_title('The solar system. \\n %d years from Sep. 18 2018' \\\n %(self.t), fontsize=24)\n ax.legend(legend, loc=2, fontsize='small')\n plt.axis('equal')", "def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()", "def draw_trajectory(filepath: str, timestamps: bool = False):\n\n t, x, y, z = coordinates.parse_coordinates_file(filepath=filepath)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plt.xlabel('X', fontsize=10, rotation = 0)\n plt.ylabel('Y', fontsize=10, rotation = 0)\n ax.set_zlabel('Z', fontsize=10, rotation = 0)\n\n # Add timestamps to plot\n if timestamps:\n for i in range(len(t)):\n timea = str(datetime.timedelta(seconds=t[i]))\n ax.annotate(timea, (x[i], y[i], z[i]),)\n\n ax.scatter(x, y, z, label='Траектория движения НКА')\n # ax.legend()\n\n plt.show()", "def plot(self):\n\t\tself.plotOfXray().plot()", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot(self):\n fx = self.fitness_functions(self.archive)\n n = len(fx[0])\n\n if n == 2:\n plt.xlabel(\"F1\")\n plt.ylabel(\"F2\")\n plt.suptitle(\"Pareto Front\")\n plt.scatter(fx[:,0], fx[:,1], label='Archive')\n plt.show()\n elif n == 3:\n plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter(fx[:, 0], fx[:, 1], fx[:, 2])\n ax.set_xlabel(\"F1\")\n ax.set_ylabel(\"F2\")\n ax.set_zlabel(\"F3\")\n plt.suptitle(\"Pareto Front of Archive\")\n plt.show()\n else:\n print(\"Cannot Print Multi-Dimensional Front greater than 3D\")", "def plot3D(self, diaphragmpoints=None, lungpoints=None, fig=None, ax=None, diaphragmcolor='r', lungcolor='g', size=2, howplot=0, dots=0):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n\n if diaphragmpoints is not None and lungpoints is not None:\n points = diaphragmpoints + lungpoints\n elif diaphragmpoints is not None:\n points = diaphragmpoints\n elif lungpoints is not None:\n points = lungpoints\n\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(points)):\n xpts.append(points[i][0])\n ypts.append(points[i][1])\n zpts.append(points[i][2])\n\n X = np.asarray(xpts)\n Y = np.asarray(ypts)\n Z = np.asarray(zpts)\n\n if howplot == 'wireframe':\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(pts)):\n xpts.append(pts[i][0])\n ypts.append(pts[i][1])\n zpts.append(pts[i][2])\n\n X = np.asarray([xpts])\n Y = np.asarray([ypts])\n Z = np.asarray([zpts])\n\n if dots == 1:\n ax.scatter(X, Y, Z, s=size, c='r', marker='o')\n\n ax.plot_wireframe(X, Y, Z)\n elif howplot == 1:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n else:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n ax.plot_trisurf(X, Y, Z, linewidth=0.2, antialiased=True)\n\n # Create cubic bounding box to simulate equal aspect ratio\n max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max()\n Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (X.max() + X.min())\n Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())\n Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())\n\n # Comment or uncomment following both lines to test the fake bounding box:\n for xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n\n plt.show()\n # fig.savefig('{}/diaphragm/{}.png'.format(DIR_RESULT))", "def plot_pose3_on_axes(axes, T, axis_length=0.1, center_plot=False, line_obj_list=None, zoom_to_fit=False):\n return plot_pose3RT_on_axes(axes, *decompose_T(T), axis_length, center_plot, line_obj_list, zoom_to_fit=zoom_to_fit)", "def visualise(cut_list): \r\n\tcutlist = json.load(cut_list)\r\n\tmodified_list =[]\r\n\tz_set = 0\r\n\tc_set = 0\r\n\ta_set = 0\r\n\tcut_num = 0\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\ta.pop(0)\r\n\t\t\ta = list(map(float,a)) + [z_set]\r\n\t\t\t\r\n\t\t\tif a_set != 0 or c_set != 0:\r\n\t\t\t\ta = rotate_a(a_set,a)\r\n\t\t\t\ta = rotate_c(c_set,a_set,a)\r\n\r\n\t\t\ta = a +[f\"a_set {a_set} c_set {c_set} z_set {z_set:.1f} cut_num {cut_num}\"]\r\n\t\t\tmodified_list.append(a)\r\n\r\n\t\telif a[0] == \"z_abs\":\r\n\t\t\tz_set = float(a[1])\r\n\t\t\tcut_num += 1\r\n\t\telif a[0] == \"c_abs\":\r\n\t\t\tc_set = float(a[1])\r\n\t\telif a[0] == \"a_abs\":\r\n\t\t\ta_set = float(a[1])\r\n\r\n\t\telif a[0] == \"z_rel\" or a[0] == \"z_step\":\r\n\t\t\tz_set = z_set + float(a[1])\r\n\t\telif a[0] == \"c_rel\" or a[0] == \"c_step\":\r\n\t\t\tc_set = c_set + float(a[1])\r\n\t\telif a[0] == \"a_rel\" or a[0] == \"a_step\":\r\n\t\t\ta_set = a_set + float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\tdf = pd.DataFrame(modified_list, columns = [\"x\",\"y\",\"z\",\"layer\"])\r\n\tfig = px.line_3d(df,\"x\",\"y\",\"z\",color=\"layer\")\r\n\t#fig.update_layout(scene_aspectmode = \"data\")\r\n\tfig.show()", "def plot_3d(vector_array, save_plot_dir):\n principal_df = pd.DataFrame(data=vector_array, columns=['pc1', 'pc2', 'pc3'])\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n xs = principal_df['pc1']\n ys = principal_df['pc2']\n zs = principal_df['pc3']\n ax.scatter(xs, ys, zs, s=50, alpha=0.6, edgecolors='w')\n\n ax.set_xlabel('pc1')\n ax.set_ylabel('pc2')\n ax.set_zlabel('pc3')\n\n plt.savefig(save_plot_dir + '/3D_scatter.png')\n plt.close()", "def plot3surface( pot, **kwargs ): \n \n fig = plt.figure( figsize = (8., 8.) ) \n gs = matplotlib.gridspec.GridSpec( 3,2, wspace=0.2) \n \n # Make a list with three perpendicular directions which \n # will define the three surface cuts \n perp = [(np.pi/2., 0.), (np.pi/2., -np.pi/2.), (0., -1.*np.pi/2.) ]\n \n # Iterate to plot the three surface cuts\n yMin = 1e16\n yMax = -1e16 \n Ims = []\n for i in range(3):\n ax0 = fig.add_subplot( gs[i,0], projection='3d')\n ax1 = fig.add_subplot( gs[i,1]) \n \n T0, T1, X, Y, Z = surfcut_points( normal = perp[i], \\\n ax0=ax0, **kwargs ) \n \n EVAL = pot.evalpotential(X,Y,Z)\n im = ax1.pcolormesh( T0, T1, EVAL, \\\n cmap=plt.get_cmap('jet') ) \n plt.axes( ax1 ) \n cbar = plt.colorbar(im)\n cbar.set_label( pot.unitlabel, rotation=0) \n \n ymin = EVAL.min()\n ymax = EVAL.max()\n \n Ims.append(im) \n if ymin < yMin : yMin = ymin\n if ymax > yMax : yMax = ymax \n \n for im in Ims:\n im.set_clim( vmin=yMin, vmax=yMax)", "def plotProperty(self, x, y, z = [], idx = None, col = 1, row = 1, N = 1, ax = None,\\\n save = False, dpi = 100, format = \"pdf\", verbose = 1, handle = False,\\\n translation = None, title = None, other = None, ab = [],\\\n m = \"o\", ms = 2, leg = True, ylim = None, xlim = None, xscale = \"linear\",\\\n yscale = \"linear\", **kwargs):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if translation is None: translation = [0]\n if isinstance(translation, (int, np.integer)): translation = [translation]\n \n if type(x) == str: x = [x]\n if type(y) == str: y = [y]\n if type(z) == str: z = [z]\n if len(x) != len(y):\n string = \"Length x (%i) and y (%i) must be the same\" % (len(x), len(y))\n ut.infoPrint(string)\n return\n\n if len(z) > 0 and len(x) != len(z):\n string = \"Length x (%i) and y (%i) and z (%i) must be the same\"\\\n % (len(x), len(y), len(z))\n ut.infoPrint(string)\n return\n\n m = kwargs.pop(\"marker\", m)\n ls = kwargs.pop(\"linestyle\", \"none\")\n ms = kwargs.pop(\"markersize\", ms)\n\n if len(m) == 1: m = m * len(x)\n if isinstance(ab, (int, np.integer)): ab = [ab]\n\n x_data, x_lbl, x_leg = self.getData(idx = idx, var = x, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n y_data, y_lbl, y_leg = self.getData(idx = idx, var = y, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n if len(x_data) != len(y_data): return\n\n if len(z) > 0:\n z_data, z_lbl, z_leg = self.getData(idx = idx, var = z, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n\n if len(x_data) != len(y_data) != len(z_data) or z_data == []: return\n else:\n z_data = None\n\n hP = []\n if not handle:\n hFig = plt.figure()\n hAx = plt.subplot(row, col, N)\n else:\n hAx = ax\n\n if z_data is None:\n\n kwargs.pop(\"vmin\", None)\n kwargs.pop(\"vmax\", None)\n kwargs.pop(\"colormap\", None)\n\n for i in range(len(x_data)):\n\n tP = hAx.plot(x_data[i].T, y_data[i].T, linestyle = ls, marker = m[i],\\\n markersize = ms, **kwargs)\n\n [hP.append(lines) for lines in tP]\n\n if leg:\n ncol = 1\n if len(x_leg) > len(y_leg):\n if len(x_leg) > 5: ncol = 2\n hAx.legend(x_leg, ncol = ncol)\n else:\n if len(y_leg) > 5: ncol = 2\n hAx.legend(y_leg, ncol = ncol)\n\n else:\n zmin = np.min([np.min(i) for i in z_data])\n zmax = np.max([np.max(i) for i in z_data])\n\n cm = kwargs.pop(\"colormap\", \"plasma\")\n cmap = plt.cm.get_cmap(cm)\n vmin = kwargs.pop(\"vmin\", zmin)\n vmax = kwargs.pop(\"vmax\", zmax)\n c = kwargs.pop(\"color\", 'b')\n lw = kwargs.pop(\"linewidth\", 1.2)\n\n\n for i in range(len(x_data)):\n\n if np.ndim(x_data[i]) == 1: x_data[i] = x_data[i][None, :]\n if np.ndim(y_data[i]) == 1: y_data[i] = y_data[i][None, :]\n if np.ndim(z_data[i]) == 1: z_data[i] = z_data[i][None, :]\n\n if (np.shape(z_data[i]) != np.shape(x_data[i])) and\\\n (np.shape(z_data[i]) != np.shape(y_data[i])) and\\\n (z_data[i].shape[0] != 1):\n string = \"Ambiguous z data %s with x %s and y %s\"\\\n % (np.shape(z_data[i]), np.shape(x_data[i]), np.shape(y_data[i]))\n ut.infoPrint(string)\n return\n \n j,k,l = (0, 0, 0)\n for ii, t in enumerate(translation):\n\n tP = hAx.scatter(x_data[i][j, :], y_data[i][k, :], c = z_data[i][l, :],\\\n vmin = vmin, vmax = vmax, cmap = cmap, marker = m[i],\\\n label = \"\", s = ms, linewidth = lw, **kwargs)\n\n hP.append(tP)\n\n if np.shape(x_data[i])[0] > 1: j += 1\n if np.shape(y_data[i])[0] > 1: k += 1\n if np.shape(z_data[i])[0] > 1: l += 1\n\n if leg:\n ncol = 1\n if len(x_leg) > len(y_leg):\n if len(x_leg) > 4: ncol = 2\n hAx.legend(x_leg, ncol = ncol)\n else:\n if len(y_leg) > 4: ncol = 2\n hAx.legend(y_leg, ncol = ncol)\n \n if not handle: plt.colorbar(hP[0], label = z_lbl[0])\n\n if ylim is not None:\n hAx.set_ylim(bottom = ylim[0], top = ylim[1])\n if xlim is not None:\n hAx.set_xlim(left = xlim[0], right = xlim[1])\n\n hAx.set_yscale(yscale)\n hAx.set_xscale(xscale)\n hAx.set_xlabel(x_lbl[0])\n hAx.set_ylabel(y_lbl[0])\n if title is None:\n hAx.set_title(self.filename)\n else:\n hAx.set_title(title)\n\n if handle: \n return\n\n \"\"\"Annotating plot marker\"\"\"\n hP[0].set_pickradius(2)\n anP = hAx.plot([], [], marker = 'o', ms = 6, color = 'k', mew = 2, mfc = 'None',\\\n linestyle = 'None')\n\n plt.tight_layout()\n\n \"\"\"Function to allow clickable points to display information\"\"\"\n def click(event):\n if event.inaxes == hAx:\n\n for line in hP:\n cont, ind = line.contains(event)\n if cont:\n break\n\n if cont:\n if z_data is not None:\n x = line.get_offsets()[:, 0]\n y = line.get_offsets()[:, 1]\n else:\n x, y = line.get_data()\n\n xSel = x[ind[\"ind\"]]\n ySel = y[ind[\"ind\"]]\n\n pPos = hAx.transData.transform((xSel, ySel))\n pDist = np.linalg.norm(pPos - [[event.x, event.y]], axis = 1)\n index = ind[\"ind\"][np.argmin(pDist)]\n anP[0].set_data(x[ind[\"ind\"]], y[ind[\"ind\"]])\n for n, i in enumerate(ind[\"ind\"]):\n string = \"Idx: %i (%.4f, %.4f) | Nr Points: %i\"\\\n % (idx[i], x[i], y[i], len(ind[\"ind\"]))\n\n if n == 0: \n print(\"=\" * len(string))\n print(string)\n if n == len(ind[\"ind\"]) - 1: \n print(\"=\" * len(string))\n\n hFig.canvas.draw_idle()\n else:\n anP[0].set_data([], [])\n hFig.canvas.draw_idle()\n\n if save:\n if save is True:\n ut.save_fig(filename = \"PropertyPlot.%s\" % format, format = format,\\\n dpi = dpi, verbose = verbose)\n else:\n ut.save_fig(filename = save, format = format, dpi = dpi,\\\n verbose = verbose)\n plt.close()\n else:\n hFig.canvas.mpl_connect(\"button_release_event\", click)\n plt.show()", "def _plot(self, **kwargs):\n XY = self.positions\n plt.plot(XY[0,:], XY[1,:], 'o')\n plt.gca().set_aspect('equal')\n SS = np.abs(self.S)\n SS /= SS.max()\n\n for i in range(self.N):\n for j in range(self.N):\n if i == j or SS[i,j] < 1e-2:\n continue\n clr = 'r' if self.S[i,j]<0 else 'b'\n x, y = XY[:,i]\n r = XY[:,j] - XY[:,i]\n dx, dy = r\n rhat = r / np.sqrt((r**2).sum())\n ofsx, ofsy = 0.03 * rhat\n perpx, perpy = 0.005 * np.array([-rhat[1], rhat[0]])\n plt.arrow(x + ofsx + perpx, y + ofsy + perpy,\n r[0] - 2*ofsx, r[1] - 2*ofsy, color=clr,\n shape='right', width=0.01*SS[i,j],\n length_includes_head=True, head_width=0.02,\n linewidth=0, **kwargs)", "def plot_traj(ax,traj,style,color,label,mode, timestep = None):\n x = []\n y = []\n i = 0.0\n # traj = traj +zs [traj[0]]\n for co in traj:\n rotX, rotY, rotZ = quaternion_to_euler(co[4], co[5], co[6], co[7])\n if mode == 'xy':\n x.append(co[0+1])\n y.append(co[1+1])\n if mode == 'xz':\n x.append(co[0+1])\n y.append(co[2+1])\n if mode == 'yz':\n x.append(co[1+1])\n y.append(co[2+1])\n\n if mode == 'rotx':\n x.append(i)\n y.append(rotX)\n if mode == 'roty':\n x.append(i)\n y.append(rotY)\n if mode == 'rotz':\n x.append(i)\n y.append(rotZ)\n\n if mode == 'ax':\n x.append(i)\n y.append(co[1])\n if mode == 'ay':\n x.append(i)\n y.append(co[2])\n if mode == 'az':\n x.append(i)\n y.append(co[3])\n i += timestep\n\n ax.plot(x,y,style,color=color,label=label)", "def plot_single_trajectory(self):\n\n plt.plot(self.trip_centroids['lon'], self.trip_centroids['lat'], '-o')", "def plotOfSlice(self,index=0):\n\t\tj=index;\n\t\t[n,m]=_np.shape(self._data)\n\t\ty=_np.zeros(n);\n\t\tfor i in range(0,n):\n\t\t\t\ty[i]=self._data[i][j]*1e4\n\t\tp1=_plot.plot(shotno=[self.shotno],\n\t\t\t\t\t title=self.title+', t='+str(self.time[j]*1000)+'ms.')\n\t\tphi=_np.linspace(0,_np.pi*2,100)\n\t\tn1Fit=self._x[0,j]+self._x[1,j]*_np.sin(phi)+self._x[2,j]*_np.cos(phi)\n\t\tn2Fit=self._x[0,j]+self._x[3,j]*_np.sin(2*phi)+self._x[4,j]*_np.cos(2*phi)\n\t\tfitTotal=self._x[0,j]+self._x[1,j]*_np.sin(phi)+self._x[2,j]*_np.cos(phi)+self._x[3,j]*_np.sin(2*phi)+self._x[4,j]*_np.cos(2*phi)\n\n\t\t# plot\n\t\tp1.addTrace(yData=y,xData=self._phi,\n\t\t\t\t\tmarker='x',linestyle='',yLegendLabel='raw') \n\t\tp1.addTrace(yData=n1Fit,xData=phi,\n\t\t\t\t\tyLegendLabel='n=1') \n\t\tp1.addTrace(yData=n2Fit,xData=phi,\n\t\t\t\t\tyLegendLabel='n=2') \n\t\tp1.addTrace(yData=fitTotal,xData=phi,\n\t\t\t\t\tyLegendLabel='Superposition') \n\t\treturn p1", "def vector_arrows(Out, x, y, z, plot_layer):\n\n x = sort_dim(x)\n y = sort_dim(y)\n z = sort_dim(z)\n\n # length of array in each dimension\n Ny = len(y)-1\n Nx = len(x)-1\n Nz = len(z)-1\n\n # coordinates of cell centres\n # (halfway between L and R edges)\n xm = 0.5 * (x[:-1] + x[1:])\n ym = 0.5 * (y[:-1] + y[1:])\n zm = 0.5 * (z[:-1] + z[1:])\n\n # create empty arrays for output\n U = np.zeros((len(Out.Qx[:,0,0,0]),len(Out.Qx[0,:,0,0]),len(Out.Qx[0,0,:,0]),len(Out.Qx[0,0,0,:])+1)) \n V = np.zeros((len(Out.Qy[:,0,0,0]),len(Out.Qy[0,:,0,0]),len(Out.Qy[0,0,:,0])+1,len(Out.Qy[0,0,0,:])))\n W = np.zeros((len(Out.Qz[:,0,0,0]),len(Out.Qz[0,:,0,0])+1,len(Out.Qz[0,0,:,0]),len(Out.Qz[0,0,0,:])))\n\n # create mesh\n X, Y, = np.meshgrid(xm, ym) # coordinates of cell centers\n Z = np.meshgrid(zm)\n\n # iterate through timesteps\n for t in range(len(Out.Qy[:,0,0,0])): # number of timesteps\n\n #grab relevant timestep from Out array\n Qx = Out.Qx[t,:,:,:]\n Qy = Out.Qy[t,:,:,:]\n Qz = Out.Qz[t,:,:,:]\n\n # Calculate flows at cell centers by interpolating between L and R faces\n Ut = np.concatenate((Qx[plot_layer, :, 0].reshape((1, Ny, 1)), \\\n 0.5 * (Qx[plot_layer, :, :-1].reshape((1, Ny, Nx-2)) +\\\n Qx[plot_layer, :, 1: ].reshape((1, Ny, Nx-2))), \\\n Qx[plot_layer, :, -1].reshape((1, Ny, 1))), axis=2).reshape((Ny,Nx))\n\n Vt = np.concatenate((Qy[plot_layer, 0, :].reshape((1, 1, Nx)), \\\n 0.5 * (Qy[plot_layer, :-1, :].reshape((1, Ny-2, Nx)) +\\\n Qy[plot_layer, 1:, :].reshape((1, Ny-2, Nx))), \\\n Qy[plot_layer, -1, :].reshape((1, 1, Nx))), axis=1).reshape((Ny,Nx))\n\n # average flow across vertical cell to get z flow at cell centre\n QzTop = Qz[0:-1,:,:]\n QzBot = Qz[1:,:,:]\n Wt = (QzTop+QzBot)/2\n \n # add results to output arrays\n U[t,:,:,:] = Ut\n V[t,:,:,:] = Vt\n W[t,1:-1,:,:] = Wt\n\n return X,Y,Z,U,V,W", "def plot(self, plotEdges=False, emphaseEdges=[], col=('b', 'k', 'r'), lims=None, ort=False):\n ax = a3.Axes3D(plt.figure())\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.dist = 30\n ax.azim = -140\n if lims is None:\n lims = [0, 0, 0]\n lims[0] = [min(v.x for v in self.vertices),\n max(v.x for v in self.vertices)]\n lims[1] = [min(v.y for v in self.vertices),\n max(v.y for v in self.vertices)]\n lims[2] = [min(v.z for v in self.vertices),\n max(v.z for v in self.vertices)]\n if ort:\n ma = max(lims[i][1] for i in range(3))\n mi = min(lims[i][0] for i in range(3))\n lims = [[mi, ma]] * 3\n ax.set_xlim(lims[0])\n ax.set_ylim(lims[1])\n ax.set_zlim(lims[2])\n for f in self.faces:\n face = a3.art3d.Poly3DCollection([[v.coords()\n for v in f.vertices]])\n ax.add_collection3d(face)\n face.set_facecolor(col[0])\n face.set_edgecolor(col[1])\n if plotEdges or len(emphaseEdges)>0:\n for e in self.edges:\n edge = a3.art3d.Poly3DCollection([[e.nvt.coords(),\n e.pvt.coords()]])\n ax.add_collection3d(edge)\n if e in emphaseEdges:\n edge.set_edgecolor(col[2])\n else:\n edge.set_edgecolor(col[1])\n plt.show()", "def show_path_2D(start, end, coordinates, polygons, clear = True):\n global L, N, delta_t\n\n # start interactive mode\n plt.ion()\n\n # crete eempty figure on which data will go and first subplot\n fig = plt.figure()\n\n # get into the correct time step\n for time_step in range(start, end):\n # list of colours used for animation\n colours = cm.rainbow(np.linspace(0, 1, N))\n\n # loop over each particle and colour\n for i in range(N):\n # plot x, y poistion of particle in a given colour and set axis to size of box\n plt.scatter(coordinates[time_step][i][0], coordinates[time_step][i][1], s = 1, color = 'r')\n\n # plot the object\n if i < M:\n polygon = np.array(polygons[time_step][i])\n # get the points of the polygon to plot it\n x, y = polygon.T\n\n # print(x, y)\n\n x = np.append(x, x[0])\n y = np.append(y, y[0])\n\n # print(x, y)\n\n # plot the polygon\n plt.plot(x , y)\n # plt.scatter(polygons_com[time_step][i][0], polygons_com[time_step][i][1], s = 5, color = 'g')\n\n if bound_cond == True:\n plt.axis([0, L, 0, L])\n plt.axis([0, L, 0, L])\n # plt.axis([-L*2, L*2, -L*2, L*2])\n\n # show graph\n plt.show()\n plt.pause(time_pause)\n\n # decide if you want to clear\n if clear == True:\n plt.clf()\n\n return None", "def drawCurve3D(xlist, ylist, zlist):\n dislin.curv3d(xlist,ylist,zlist,len(xlist))", "def plot_scatter_points_lines(self):\n self.plot(2)", "def plot_3d(x_data, y_data, Z, df, xlabel, ylabel, xrange=None,\n yrange=None, figsize=(12, 12)):\n fig = pyplot.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n nsamp, nsen = Z.shape\n\n sen_index = df.columns.names.index('sensor')\n senlist = df.columns.levels[sen_index]\n pyplot.yticks(y_data, senlist)\n ax.plot_surface(\n np.repeat(x_data,\n nsen, axis=1),\n np.repeat(np.matrix(y_data), nsamp, axis=0),\n df.values,\n cmap=cm.coolwarm)\n pyplot.xlabel(xlabel)\n pyplot.ylabel('Sensor name')\n ax.set_zlabel(ylabel)\n ax.view_init(elev=45., azim=-130)\n ax.tick_params(axis='y', which='major', labelsize=4)\n pyplot.show()", "def show_current_pair_by_3d_slice(iS,iT):\n import matplotlib.pyplot as plt\n import easyreg.viewers as viewers\n fig, ax = plt.subplots(2,3)\n plt.setp(plt.gcf(), 'facecolor', 'white')\n plt.style.use('bmh')\n\n ivsx = viewers.ImageViewer3D_Sliced(ax[0][0], iS, 0, 'source X', True)\n ivsy = viewers.ImageViewer3D_Sliced(ax[0][1], iS, 1, 'source Y', True)\n ivsz = viewers.ImageViewer3D_Sliced(ax[0][2], iS, 2, 'source Z', True)\n\n ivtx = viewers.ImageViewer3D_Sliced(ax[1][0], iT, 0, 'target X', True)\n ivty = viewers.ImageViewer3D_Sliced(ax[1][1], iT, 1, 'target Y', True)\n ivtz = viewers.ImageViewer3D_Sliced(ax[1][2], iT, 2, 'target Z', True)\n\n\n feh = viewers.FigureEventHandler(fig)\n feh.add_axes_event('button_press_event', ax[0][0], ivsx.on_mouse_press, ivsx.get_synchronize, ivsx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][1], ivsy.on_mouse_press, ivsy.get_synchronize, ivsy.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][2], ivsz.on_mouse_press, ivsz.get_synchronize, ivsz.set_synchronize)\n\n feh.add_axes_event('button_press_event', ax[1][0], ivtx.on_mouse_press, ivtx.get_synchronize, ivtx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][1], ivty.on_mouse_press, ivty.get_synchronize, ivty.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][2], ivtz.on_mouse_press, ivtz.get_synchronize, ivtz.set_synchronize)\n\n feh.synchronize([ax[0][0], ax[1][0]])\n feh.synchronize([ax[0][1], ax[1][1]])\n feh.synchronize([ax[0][2], ax[1][2]])", "def showPlot3():\n raise NotImplementedError", "def plot(x, y, z):\n pylab.plot(x, y)\n pylab.plot(x, z)\n pylab.show()", "def render(static, tour_indices, save_path, test = False):\n # if not test:\n # matplotlib.use('Agg')\n # if not os.path.exists(save_path):\n # os.makedirs(save_path)\n\n plt.close('all')\n if isinstance(static, torch.Tensor):\n static = static.cpu()\n if isinstance(tour_indices, torch.Tensor):\n tour_indices = tour_indices.cpu()\n\n num_plots = 3 if int(np.sqrt(len(tour_indices))) >= 3 else 1\n\n _, axes = plt.subplots(nrows=num_plots, ncols=num_plots,\n sharex='col', sharey='row')\n\n if num_plots == 1:\n axes = [[axes]]\n axes = [a for ax in axes for a in ax]\n\n for i, ax in enumerate(axes):\n\n # Convert the indices back into a tour\n idx = tour_indices[i, :]\n idx = idx[idx>-1]\n static_ = static[i].data\n\n for idx_ in idx:\n nearest_indices = cal_nearest_indices(static_, idx_)\n for nearest_idx in nearest_indices:\n pair_coor = torch.cat((static_[:,idx_].unsqueeze(0),static_[:,nearest_idx].unsqueeze(0))).transpose(1,0).numpy()\n ax.plot(pair_coor[0], pair_coor[1], linestyle='--', color='k',zorder=1, linewidth='0.5')\n\n\n # End tour at the starting index\n idx = torch.cat((idx, idx[0].unsqueeze(0)))\n idx = idx.repeat(2, 1) # (2,6)\n\n data = torch.gather(static[i].data, 1, idx).cpu().numpy()\n\n #plt.subplot(num_plots, num_plots, i + 1)\n ax.plot(data[0], data[1], zorder=1)\n ax.scatter(static_[0], static_[1], s=4, c='r', zorder=2)\n ax.scatter(data[0, 0], data[1, 0], s=20, c='k', marker='*', zorder=3)\n\n # ax.set_xlim(0, 1)\n # ax.set_ylim(0, 1)\n\n plt.tight_layout()\n plt.show()\n print(save_path)\n plt.savefig(save_path, bbox_inches='tight', dpi=400)", "def plot_gt3D(parent_dir, env, title='GT Cost Value over 3D Reachable Set'):\n\traw_waypts, gt_cost = get_coords_gt_cost(env, parent_dir)\n\tfig = px.scatter_3d(x=raw_waypts[:,88], y=raw_waypts[:,89], z=raw_waypts[:,90], color=gt_cost)\n\tfig.update_layout(title=title)\n\tfig.show()", "def trajectory_plt(model, inputs, targets, timesteps, highlight_inputs=False,\n include_arrow=False, save_fig=''):\n alpha = 0.5\n color = ['red' if targets[i, 0] > 0.0 else 'blue' for i in range(len(targets))]\n # Calculate trajectories (timesteps, batch_size, input_dim)\n trajectories = model.odeblock.trajectory(inputs, timesteps).detach()\n # Features are trajectories at the final time\n features = trajectories[-1]\n\n if model.augment_dim > 0:\n aug = torch.zeros(inputs.shape[0], model.odeblock.odefunc.augment_dim)\n inputs_aug = torch.cat([inputs, aug], 1)\n else:\n inputs_aug = inputs\n\n if model.augment_dim >=0 :\n input_dim = model.data_dim + model.augment_dim\n else:\n input_dim = model.data_dim\n\n if input_dim == 2:\n # Plot starting and ending points of trajectories\n input_linewidths = 2 if highlight_inputs else 0\n plt.scatter(inputs_aug[:, 0].numpy(), inputs_aug[:, 1].numpy(), c=color,\n alpha=alpha, linewidths=input_linewidths, edgecolor='orange')\n plt.scatter(features[:, 0].numpy(), features[:, 1].numpy(), c=color,\n alpha=alpha, linewidths=0)\n\n # For each point in batch, plot its trajectory\n for i in range(inputs_aug.shape[0]):\n # Plot trajectory\n trajectory = trajectories[:, i, :]\n x_traj = trajectory[:, 0].numpy()\n y_traj = trajectory[:, 1].numpy()\n plt.plot(x_traj, y_traj, c=color[i], alpha=alpha)\n #compute_distance(x_traj, y_traj, 1 )\n # Optionally add arrow to indicate direction of flow\n if include_arrow:\n arrow_start = x_traj[-2], y_traj[-2]\n arrow_end = x_traj[-1], y_traj[-1]\n plt.arrow(arrow_start[0], arrow_start[1],\n arrow_end[0] - arrow_start[0],\n arrow_end[1] - arrow_start[1], shape='full', lw=0,\n length_includes_head=True, head_width=0.15,\n color=color[i], alpha=alpha)\n\n plt.tick_params(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, right=False, left=False,\n labelleft=False)\n\n ax = plt.gca()\n elif input_dim == 3:\n # Create figure\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # Plot starting and ending points of trajectories\n input_linewidths = 1 if highlight_inputs else 0\n ax.scatter(inputs_aug[:, 0].numpy(), inputs_aug[:, 1].numpy(),\n inputs_aug[:, 2].numpy(), c=color, alpha=alpha,\n linewidths=input_linewidths, edgecolor='orange')\n ax.scatter(features[:, 0].numpy(), features[:, 1].numpy(),\n features[:, 2].numpy(), c=color, alpha=alpha, linewidths=0)\n\n # For each point in batch, plot its trajectory\n for i in range(inputs_aug.shape[0]):\n # Plot trajectory\n trajectory = trajectories[:, i, :]\n x_traj = trajectory[:, 0].numpy()\n y_traj = trajectory[:, 1].numpy()\n z_traj = trajectory[:, 2].numpy()\n ax.plot(x_traj, y_traj, z_traj, c=color[i], alpha=alpha)\n #compute_distance(x_traj, y_traj, z_traj)\n # Optionally add arrow\n if include_arrow:\n arrow_start = x_traj[-2], y_traj[-2], z_traj[-2]\n arrow_end = x_traj[-1], y_traj[-1], z_traj[-1]\n\n arrow = Arrow3D([arrow_start[0], arrow_end[0]],\n [arrow_start[1], arrow_end[1]],\n [arrow_start[2], arrow_end[2]],\n mutation_scale=15,\n lw=0, color=color[i], alpha=alpha)\n ax.add_artist(arrow)\n\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n else:\n raise RuntimeError(\"Input dimension must be 2 or 3 but was {}\".format(input_dim))\n\n ax.set_aspect(get_square_aspect_ratio(ax))\n\n if len(save_fig):\n plt.savefig(save_fig, format='png', dpi=400, bbox_inches='tight')\n plt.clf()\n plt.close()", "def trajectory_plt(model, inputs, targets, timesteps, highlight_inputs=True,\n include_arrow=False, save_fig=''):\n alpha = 0.5\n color = ['red' if targets[i, 0] > 0.0 else 'blue' for i in range(len(targets))]\n # Calculate trajectories (timesteps, batch_size, input_dim)\n trajectories = model.odeblock.trajectory(inputs, timesteps).detach()\n # Features are trajectories at the final time\n features = trajectories[-1]\n\n if model.augment_dim > 0:\n aug = torch.zeros(inputs.shape[0], model.odeblock.odefunc.augment_dim)\n inputs_aug = torch.cat([inputs, aug], 1)\n else:\n inputs_aug = inputs\n\n if model.augment_dim ==1 :\n input_dim = model.data_dim + model.augment_dim\n else:\n input_dim = model.data_dim\n\n if input_dim == 2:\n # Plot starting and ending points of trajectories\n input_linewidths = 2 if highlight_inputs else 0\n plt.scatter(inputs_aug[:, 0].numpy(), inputs_aug[:, 1].numpy(), c=color,\n alpha=alpha, linewidths=input_linewidths, edgecolor='orange')\n plt.scatter(features[:, 0].numpy(), features[:, 1].numpy(), c=color,\n alpha=alpha, linewidths=0)\n\n # For each point in batch, plot its trajectory\n for i in range(inputs_aug.shape[0]):\n # Plot trajectory\n trajectory = trajectories[:, i, :]\n x_traj = trajectory[:, 0].numpy()\n y_traj = trajectory[:, 1].numpy()\n plt.plot(x_traj, y_traj, c=color[i], alpha=alpha)\n #compute_distance(x_traj, y_traj, 1 )\n # Optionally add arrow to indicate direction of flow\n if include_arrow:\n arrow_start = x_traj[-2], y_traj[-2]\n arrow_end = x_traj[-1], y_traj[-1]\n plt.arrow(arrow_start[0], arrow_start[1],\n arrow_end[0] - arrow_start[0],\n arrow_end[1] - arrow_start[1], shape='full', lw=0,\n length_includes_head=True, head_width=0.15,\n color=color[i], alpha=alpha)\n\n plt.tick_params(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, right=False, left=False,\n labelleft=False)\n\n ax = plt.gca()\n elif input_dim == 3:\n # Create figure\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # Plot starting and ending points of trajectories\n input_linewidths = 1 if highlight_inputs else 0\n ax.scatter(inputs_aug[:, 0].numpy(), inputs_aug[:, 1].numpy(),\n inputs_aug[:, 2].numpy(), c=color, alpha=alpha,\n linewidths=input_linewidths, edgecolor='orange')\n ax.scatter(features[:, 0].numpy(), features[:, 1].numpy(),\n features[:, 2].numpy(), c=color, alpha=alpha, linewidths=0)\n\n # For each point in batch, plot its trajectory\n for i in range(inputs_aug.shape[0]):\n # Plot trajectory\n trajectory = trajectories[:, i, :]\n x_traj = trajectory[:, 0].numpy()\n y_traj = trajectory[:, 1].numpy()\n z_traj = trajectory[:, 2].numpy()\n ax.plot(x_traj, y_traj, z_traj, c=color[i], alpha=alpha)\n #compute_distance(x_traj, y_traj, z_traj)\n # Optionally add arrow\n if include_arrow:\n arrow_start = x_traj[-2], y_traj[-2], z_traj[-2]\n arrow_end = x_traj[-1], y_traj[-1], z_traj[-1]\n\n arrow = Arrow3D([arrow_start[0], arrow_end[0]],\n [arrow_start[1], arrow_end[1]],\n [arrow_start[2], arrow_end[2]],\n mutation_scale=15,\n lw=0, color=color[i], alpha=alpha)\n ax.add_artist(arrow)\n\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n else:\n raise RuntimeError(\"Input dimension must be 2 or 3 but was {}\".format(input_dim))\n\n ax.set_aspect(get_square_aspect_ratio(ax))\n\n if len(save_fig):\n plt.savefig(save_fig, format='png', dpi=400, bbox_inches='tight')\n plt.clf()\n plt.close()", "def plot_pose3_on_axes(axes, T, axis_length=0.1, center_plot=False, line_obj_list=None):\n return plot_pose3RT_on_axes(axes, *decompose_T(T), axis_length, center_plot, line_obj_list)", "def plot_trajectory(self, n, show=False, save=True, savename='ctrw_trajectory.pdf'):\n\n plt.figure()\n # plt.plot(self.trajectory_hops[n, :, 0] / 1000000, self.trajectory_hops[n, :, 1], linewidth=2)\n plt.plot(self.trajectories[n, :, 0] / 1000000, self.trajectories[n, :, 1], linewidth=2)\n plt.gcf().get_axes()[0].tick_params(labelsize=14)\n plt.xlabel('Time (ms)', fontsize=14)\n plt.ylabel('$z$-coordinate (nm)', fontsize=14)\n plt.tight_layout()\n\n if show:\n plt.show(block=True)\n if save:\n plt.savefig(savename)", "def plotVoronoiCell(self, cells):\n for i in cells:\n #i indexes volumes\n i = self.nonBI[i] #now i indexes vor.point_region\n\n vI = self.vor.regions[self.vor.point_region[i]]\n v = self.vor.vertices[vI, :]\n r = v\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Voronoi Cell of Particle ' + str(i))\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m]')\n ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries')\n ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center')\n ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0]))\n ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1]))\n ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2]))\n # limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])])))\n # ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1)\n ax.legend()", "def svm_add_3d_hyperplane(model, ax, plotted_points):\n SPACE_SAMPLING_POINTS = 70\n X_MIN = np.min(plotted_points[:, 0])\n X_MAX = np.max(plotted_points[:, 0])\n Y_MIN = np.min(plotted_points[:, 1])\n Y_MAX = np.max(plotted_points[:, 1])\n Z_MIN = np.min(plotted_points[:, 2])\n Z_MAX = np.max(plotted_points[:, 2])\n xx, yy, zz = np.meshgrid(np.linspace(X_MIN, X_MAX, SPACE_SAMPLING_POINTS),\n np.linspace(Y_MIN, Y_MAX, SPACE_SAMPLING_POINTS),\n np.linspace(Z_MIN, Z_MAX, SPACE_SAMPLING_POINTS))\n if hasattr(model, 'decision_function'):\n Z = model.decision_function(np.c_[xx.ravel(), yy.ravel(), zz.ravel()])\n elif hasattr(model, 'predict_proba'):\n Z = model.predict_proba(\n np.c_[xx.ravel(), yy.ravel(), zz.ravel()])[:, 1]\n else:\n exit('No decision function or predict_proba for classifer')\n Z = Z.reshape(xx.shape)\n verts, faces, _, _ = measure.marching_cubes(Z, 0)\n verts = verts * \\\n [X_MAX - X_MIN, Y_MAX - Y_MIN, Z_MAX - Z_MIN] / SPACE_SAMPLING_POINTS\n verts = verts + [X_MIN, Y_MIN, Z_MIN]\n mesh = Poly3DCollection(verts[faces],\n facecolor='orange', edgecolor='gray', alpha=0.4)\n ax.add_collection3d(mesh)", "def plot_h_static_3d(n: int = 1):\n # todo: Major DRY\n E = -2 / (n + 1) ** 2\n x, ψ = h_static_3d(E)\n\n fig, ax = plt.subplots()\n ax.plot(x, ψ)\n\n ax.grid(True)\n plt.xlim(0, 20)\n plt.ylim(-0.02, 0.02)\n plt.show()", "def plotOfSlice(self,index=0):\n\t\tj=index;\n\t\t[n,m]=_np.shape(self._data)\n\t\ty=_np.zeros(n);\n\t\tfor i in range(0,n):\n\t\t\ty[i]=self._data[i][j]*1e4\n\t\tp1=_plot.plot(title='t=%.3f ms. %s ' % (self.time[j]*1000, self.title),\n\t\t\t\t\t shotno=self.shotno)\n\t\ttheta=_np.linspace(self._theta[0],self._theta[-1],100)\n#\t\tm0Fit=self._x[0,j]\n\t\tm1Fit=self._x[0,j]+self._x[1,j]*_np.sin(theta)+self._x[2,j]*_np.cos(theta)\n\t\tm2Fit=self._x[0,j]+self._x[3,j]*_np.sin(2*theta)+self._x[4,j]*_np.cos(2*theta)\n\t\tm3Fit=self._x[0,j]+self._x[5,j]*_np.sin(3*theta)+self._x[6,j]*_np.cos(3*theta)\n\t\tm4Fit=self._x[0,j]+self._x[7,j]*_np.sin(4*theta)+self._x[8,j]*_np.cos(4*theta)\n\t\tm5Fit=self._x[0,j]+self._x[9,j]*_np.sin(5*theta)+self._x[10,j]*_np.cos(5*theta)\n\t\tfitTotal=(-4.)*self._x[0,j]+m1Fit+m2Fit+m3Fit+m4Fit+m5Fit # the -4 corrects for the 4 extra offsets added from the preview 5 fits\n\t\t\n\t\tp1.addTrace(yData=y,xData=self._theta,\n\t\t\t\t\tlinestyle='',marker='.',yLegendLabel='raw')\n\t\tp1.addTrace(yData=m1Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=1')\n\t\tp1.addTrace(yData=m2Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=2')\n\t\tp1.addTrace(yData=m3Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=3')\n\t\tp1.addTrace(yData=m4Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=4')\n\t\tp1.addTrace(yData=m5Fit,xData=theta,\n\t\t\t\t\tyLegendLabel='m=5')\n\t\tp1.addTrace(yData=fitTotal,xData=theta,\n\t\t\t\t\tyLegendLabel='m=1-5')\n\t\treturn p1", "def plotMSSpectra3D(listOfFilesToPlot, listOfNames=None, listOfColors=None, gridLines=False, yMin=0.5, yMax=2.5, yScale = 1.0,\r\n legend=True, normalizeToN15=False, subtractRef=None, legendLoc=4, lw=1.5, xMin=0, xMax=2000, scaleP=False, scaleI=0, scaleVal=1.0,\r\n figsize=(10,10), tLeft=0, tRight=-1, fixedOffset=False, noTicks=False, xlabel='mass', zlabel='intensity', a14=1.0): \r\n if listOfNames==None:\r\n listOfNames = listOfFilesToPlot\r\n if listOfColors==None:\r\n listOfColors = [pylab.cm.jet(float(i)/float(len(listOfFilesToPlot))) for i in range(len(listOfFilesToPlot))]\r\n \r\n fig = pylab.figure(figsize=figsize)\r\n ax = fig.add_subplot(111, projection='3d')\r\n\r\n yTotal = len(listOfFilesToPlot)\r\n top = 0.0\r\n\r\n if not (subtractRef is None):\r\n [bhah, zsRef, blah] = qMS.readMSSpectraFile(listOfFilesToPlot[subtractRef])\r\n #zsRef = list(numpy.array(zsRef)-fixedOffset\r\n zNorm = max(zsRef[len(zsRef)/2:])\r\n zsRef = numpy.array(zsRef)/zNorm\r\n \r\n for i,f in enumerate(listOfFilesToPlot):\r\n [xs, zs, name] = qMS.readMSSpectraFile(f)\r\n if fixedOffset:\r\n off = zs[len(zs)/2]\r\n print off\r\n #zs = list(numpy.array(zs)-zs[len(zs)/2])\r\n zs = list(numpy.array(zs)-off)\r\n ys = [yTotal-i]*len(xs)\r\n ys = numpy.array(ys)*yScale\r\n if normalizeToN15:\r\n zNorm = max(zs[len(zs)/2:])\r\n zs = numpy.array(zs)/zNorm\r\n if not (subtractRef is None):\r\n zNorm = max(zs[len(zs)/2:])\r\n zs = numpy.array(zs)/zNorm\r\n zs[:len(zsRef)/2] = zs[:len(zs)/2]-zsRef[:len(zsRef)/2]\r\n zs = zs*zNorm\r\n #xs = xs[:len(xs)/2]\r\n #ys = ys[:len(ys)/2]\r\n #zs = zs[:len(zs)/2]\r\n zs[:len(zs)/2] = numpy.array(zs[:len(zs)/2])*a14\r\n if (scaleP is True) and (i==scaleI):\r\n zs = numpy.array(zs)*scaleVal\r\n ax.plot(numpy.array(xs[tLeft:tRight]),numpy.array(ys[tLeft:tRight]),numpy.array(zs[tLeft:tRight]), color=listOfColors[i], lw=lw, label=listOfNames[i])\r\n top = max([top, float(max(zs))])\r\n\r\n\r\n ax.w_xaxis.pane.set_visible(False)\r\n ax.w_yaxis.pane.set_visible(False)\r\n ax.w_zaxis.pane.set_visible(False)\r\n\r\n if gridLines: \r\n ax.w_xaxis.gridlines.set_linewidth(1)\r\n ax.w_yaxis.gridlines.set_linewidth(1)\r\n ax.w_zaxis.gridlines.set_linewidth(1)\r\n \r\n else:\r\n ax.w_xaxis.gridlines.set_visible(False)\r\n ax.w_yaxis.gridlines.set_visible(False)\r\n ax.w_zaxis.gridlines.set_visible(False)\r\n\r\n [i.set_linewidth(1) for i in ax.w_xaxis.get_ticklines()]\r\n [i.set_linewidth(1) for i in ax.w_yaxis.get_ticklines()]\r\n [i.set_linewidth(1) for i in ax.w_zaxis.get_ticklines()]\r\n\r\n ax.w_xaxis.line.set_linewidth(1)\r\n ax.w_yaxis.line.set_linewidth(1)\r\n ax.w_zaxis.line.set_linewidth(1)\r\n \r\n ax.set_zticks([round(i,1) for i in [0, top/3, 2*top/3, top]])\r\n ax.set_zlim3d([0, top])\r\n ax.set_ylim3d(yMin, yMax)\r\n ax.set_yticks(range(1,yTotal+1))\r\n pylab.yticks(range(1,yTotal+1), ['']*yTotal)\r\n ax.set_xlim3d([xMin, xMax])\r\n \r\n if noTicks:\r\n ax.set_zticks([])\r\n ax.set_xticks([])\r\n ax.set_yticks([])\r\n\r\n ax.set_xlabel(xlabel)\r\n ax.set_zlabel(zlabel)\r\n\r\n ax.view_init(15, -60)\r\n if legend:\r\n pylab.legend(loc=legendLoc)\r\n \r\n pylab.tight_layout()\r\n return ax", "def plot_xyz():\n plt.subplot(3,1,1) # for x axis\n plt.title('x value v.s. time')\n plt.grid(True)\n plt.ylabel('X')\n plt.xlabel('t')\n plt.plot(x, '-r')\n\n plt.subplot(3,1,2) # for y axis\n plt.title('y value v.s. time')\n plt.grid(True)\n plt.ylabel('Y')\n plt.xlabel('t')\n plt.plot(y, '-g')\n\n plt.subplot(3,1,3) # for z axis\n plt.title('z value v.s. time')\n plt.grid(True)\n plt.ylabel('Z')\n plt.xlabel('t')\n plt.plot(z, '-b')", "def sample_and_plot(self):\n fig = plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(self.X, self.Y, self.sample(), cmap = plt.cm.jet, rstride = 2, cstride = 2, linewidth = 1)\n plt.show()", "def axis3D(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep):\n dislin.graf3d(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep)", "def plot(self):\n pass", "def render(static, tour_indices, save_path):\n\n plt.close('all')\n print('static_shape', static.shape)\n print('tour_indices_shape', tour_indices.shape)\n\n num_plots = 3 if int(np.sqrt(len(tour_indices))) >= 3 else 1\n\n _, axes = plt.subplots(nrows=num_plots, ncols=num_plots,\n sharex='col', sharey='row')\n\n if num_plots == 1:\n axes = [[axes]]\n axes = [a for ax in axes for a in ax]\n\n for i, ax in enumerate(axes):\n\n # Convert the indices back into a tour\n idx = tour_indices[i]\n print('idx0', idx)\n if len(idx.size()) == 1:\n idx = idx.unsqueeze(0)\n print('idx1', idx)\n\n idx = idx.expand(static.size(1), -1)\n print('idx2', idx)\n data = torch.gather(static[i].data, 1, idx).cpu().numpy()\n print('data', data)\n\n start = static[i, :, 0].cpu().data.numpy()\n x = np.hstack((start[0], data[0], start[0]))\n y = np.hstack((start[1], data[1], start[1]))\n print('x', x)\n print('y', y)\n\n # Assign each subtour a different colour & label in order traveled\n idx = np.hstack((0, tour_indices[i].cpu().numpy().flatten(), 0))\n print('idx3', idx)\n where = np.where(idx == 0)[0]\n print('where', where)\n\n for j in range(len(where) - 1):\n\n low = where[j]\n high = where[j + 1]\n\n if low + 1 == high:\n continue\n\n ax.plot(x[low: high + 1], y[low: high + 1], zorder=1, label=j)\n\n ax.legend(loc=\"upper right\", fontsize=3, framealpha=0.5)\n ax.scatter(x, y, s=4, c='r', zorder=2)\n ax.scatter(x[0], y[0], s=20, c='k', marker='*', zorder=3)\n\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n\n plt.tight_layout()\n plt.savefig(save_path, bbox_inches='tight', dpi=400)", "def plot_3d(x, y):\n # Create grid coordinates\n x_axis = np.linspace(-10, 10, 50)\n y_axis = np.linspace(-1, 4, 50)\n xx, yy = np.meshgrid(x_axis, y_axis, indexing='xy')\n z = np.zeros((x_axis.size, y_axis.size))\n\n # Calculate z-values based on grid coefficients\n for (i, j), v in np.ndenumerate(z):\n z[i, j] = compute_cost(x, y, theta=[[xx[i, j]], [yy[i, j]]])\n\n # Construct plot\n fig = plt.figure(figsize=(12, 10))\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(xx, yy, z, rstride=1, cstride=1, alpha=0.6, cmap=plt.cm.jet)\n ax.set_zlabel('Cost')\n ax.set_zlim(z.min(), z.max())\n ax.view_init(elev=15, azim=230)\n plt.title('X vs. Y vs. Cost')\n ax.set_xlabel(r'$\\theta_0$', fontsize=17)\n ax.set_ylabel(r'$\\theta_1$', fontsize=17)\n plt.show()\n plt.close()", "def visualize_coordinate_frame(im, K, R, t, vis_size_in_px=15):\n f = 0.5 * (K[0, 0] + K[1, 1])\n depth = 500. # [mm]\n a = depth * vis_size_in_px / f\n pts_3d = np.array([[0., 0., 0.], [a, 0., 0.], [0., a, 0.], [0., 0., a]])\n pts_im = misc_bop.project_pts(pts_3d, K, R, t)\n\n im_pil = Image.fromarray(im)\n draw = ImageDraw.Draw(im_pil)\n for i in range(1, 4):\n color = [0, 0, 0]\n color[i - 1] = 255\n pts = tuple(\n map(int, [pts_im[0, 0], pts_im[0, 1], pts_im[i, 0], pts_im[i, 1]]))\n draw.line(pts, fill=tuple(color), width=2)\n del draw\n return np.asarray(im_pil)", "def plot_traj3D_NEA(r_NEA,X_proj='N',Y_proj='E',view=(45,-45),to_scale='XYZ',mrk_size=80,dest_folder=None):\n \n from mpl_toolkits.mplot3d import Axes3D\n import plotting_utilities as plut\n \n fig = plt.figure(figsize=(12.3,10))\n ax = fig.add_subplot(1,1,1,projection='3d')\n \n # 3D trajectory\n ax.plot( r_NEA[0], r_NEA[1], r_NEA[2], color='#006633', ls='solid', linewidth=2)\n ax.scatter(r_NEA[0,0], r_NEA[1,0], r_NEA[2,0], marker='o', s=mrk_size, c='#006633', label='Start')\n ax.scatter(r_NEA[0,-1], r_NEA[1,-1], r_NEA[2,-1], marker=(5,0), s=mrk_size, c='#006633', label='End')\n \n if to_scale=='XYZ': \n plut.make_axis_equal_3d(r_NEA[0],r_NEA[1],r_NEA[2],ax,to_scale='XYZ')\n ax.set_title(\"Trajectory, to scale\")\n elif to_scale=='XY': \n plut.make_axis_equal_3d(r_NEA[0],r_NEA[1],r_NEA[2],ax,to_scale='XY')\n ax.set_title(r\"Trajectory, altitude not to scale\")\n else:\n ax.set_title(\"Trajectory, not to scale\")\n \n XMIN, YMIN, ZMIN = ax.get_xlim().min(), ax.get_ylim().min(), ax.get_zlim().min()\n XMAX, YMAX, ZMAX = ax.get_xlim().max(), ax.get_ylim().max(), ax.get_zlim().max()\n \n ax.set_xlim3d([XMIN, XMAX])\n ax.set_ylim3d([YMIN, YMAX])\n ax.set_zlim3d([ZMIN, ZMAX])\n \n # Ground track\n ax.plot( r_NEA[0], r_NEA[1], np.ones(len(r_NEA[2]))*ZMIN, color='k', ls='dashed', linewidth=1) \n ax.scatter(r_NEA[0,0], r_NEA[1,0], ZMIN, c='w', marker='o', s=0.8*mrk_size)\n ax.scatter(r_NEA[0,-1], r_NEA[1,-1], ZMIN, c='w', marker=(5,0), s=0.8*mrk_size)\n \n # altitude on plane parallel to YZ, Northern or Southern wall of plot\n if X_proj == 'S': \n ax.plot(np.ones(len(r_NEA[0]))*XMIN, r_NEA[1], r_NEA[2], color='0.65', ls='dashed', linewidth=1)\n ax.scatter(XMIN, r_NEA[1,0] , r_NEA[2,0], c='w', marker='o', s=0.8*mrk_size)\n ax.scatter(XMIN, r_NEA[1,-1] , r_NEA[2,-1], c='w', marker=(5,0), s=0.8*mrk_size)\n elif X_proj == 'N':\n ax.plot(np.ones(len(r_NEA[0]))*XMAX, r_NEA[1], r_NEA[2], color='0.65', ls='dashed', linewidth=1)\n ax.scatter(XMAX, r_NEA[1,0] , r_NEA[2,0], c='w', marker='o', s=0.8*mrk_size) \n ax.scatter(XMAX, r_NEA[1,-1] , r_NEA[2,-1], c='w', marker=(5,0), s=0.8*mrk_size)\n \n # altitude on plane parallel to XZ, Eastern or Western wall of plot\n if Y_proj == 'W': \n ax.plot( r_NEA[0], np.ones(len(r_NEA[1]))*YMIN, r_NEA[2], color='0.65', ls='dashed', linewidth=1)\n ax.scatter(r_NEA[0,0], YMIN, r_NEA[2,0], c='w', marker='o', s=0.8*mrk_size)\n ax.scatter(r_NEA[0,-1], YMIN, r_NEA[2,-1], c='w', marker=(5,0), s=0.8*mrk_size)\n elif Y_proj == 'E':\n ax.plot( r_NEA[0], np.ones(len(r_NEA[1]))*YMAX, r_NEA[2], color='0.65', ls='dashed', linewidth=1)\n ax.scatter(r_NEA[0,0], YMAX, r_NEA[2,0], c='w', marker='o', s=0.8*mrk_size)\n ax.scatter(r_NEA[0,-1], YMAX, r_NEA[2,-1], c='w', marker=(5,0), s=0.8*mrk_size)\n \n # Labels and legend\n ax.set_xlabel('\\n'+\"North (m)\", linespacing=3.5)\n ax.set_ylabel('\\n'+\"East (m)\", linespacing=3.5)\n ax.set_zlabel('\\n'+r'$h_\\mathrm{SL}$', linespacing=3.5)\n \n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles, labels, ncol=1, loc='best')\n \n # Axis visualization and scale\n ax.invert_xaxis()\n ax.view_init(view[0],view[1]) # Elevation, Azimuth\n \n plt.tight_layout()\n \n # Export\n if dest_folder != None:\n plt.savefig(dest_folder+'plot_traj3D_NEA.pdf')", "def draw_grid(plt):\n x0, x1, x2, x3 = 0, 3057, 6508, 9860\n y0, y1, y2, y3, y4, y5, y6, y7, y8 = 0, 1535, 2041, 2547, 3053, 3559, 4257, 5303, 6978\n alpha, linewidth = 0.3, 0.5\n\n # Vertical Lines\n plt.plot((x0, x0), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x1, x1), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x2, x2), (y0, y5), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x3, x3), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n\n # Horizontal Lines\n plt.plot((x0, x3), (y0, y0), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y1, y1), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y2, y2), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y3, y3), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y4, y4), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y5, y5), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x1), (y6, y6), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x1, x3), (y7, y7), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y8, y8), 'black', alpha=alpha, linewidth=linewidth)", "def plot_chain(chain):\n\n\tlabels = ['a', 'b']\n\tplt.figure(figsize=(20,6))\n\tfor i_dim in range(2):\n\t\tplt.subplot(2,1,i_dim+1)\n\t\tplt.ylabel(labels[i_dim])\n\n\t\tfor i in range(100):\n\t\t\tplt.plot(chain[i,:,i_dim],color='black', alpha=0.5)\n \n\tplt.show()", "def coordPlt(grid, buffer=10, step=5):\n plt.cla()\n\n plt.plot(grid[1][0::step, 0::step],\n grid[0][0::step, 0::step],\n '.-b' )\n\n plt.plot(grid[1][0::step, 0::step].T,\n grid[0][0::step, 0::step].T,\n '.-b' )\n\n plt.axis( [ grid[1].max() + buffer,\n grid[1].min() - buffer,\n grid[0].max() + buffer,\n grid[0].min() - buffer],\n )\n plt.axis('off')\n plt.grid()", "def plot(self, scene=None, **kwargs):\r\n result = []\r\n for tile in self.tiles():\r\n result.append(tile.raw_plot(scene, self.z.min(), self.z.max(),\r\n scalarbar=(result==[]), **kwargs))\r\n if scene is None: scene = result[0].scene\r\n return result", "def show(data_set, number_points: int):\n print(f'info: Showing {number_points} as maximum.')\n sub_set_points = np.random.choice(range(data_set.shape[0]), size=min(data_set.shape[0], number_points))\n x = data_set[sub_set_points, 0]\n y = data_set[sub_set_points, 1]\n z = data_set[sub_set_points, 2]\n\n fig = plt.figure(figsize=(8, 8))\n ax = mplot3d.Axes3D(fig)\n ax.set_title('NMSLIB index 3D representation', fontsize=20)\n ax.scatter(xs=x, ys=y, zs=z)\n plt.show()", "def scatter3d(self, x, y, z, filename=None, spot_cols=None, label=False, stem=False, \n label_font_size=6, rotation=134, elevation=48, interactive=False, squish_scales=False, \n spot_size=40, **kargs):\n assert filename, \"scatter(): Must provide a filename\" \n \n xdata = self.__v[x-1]\n ydata = self.__v[y-1]\n zdata = self.__v[z-1]\n \n fig = self.__draw.getfigure(**kargs)\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elevation, azim=rotation)\n \n cols = self.cols\n if spot_cols:\n cols = spot_cols \n \n ax.scatter(xdata, ydata, zdata, edgecolors=\"none\", c=cols, s=spot_size)\n if label:\n for i, lab in enumerate(self.labels):\n ax.text(xdata[i], ydata[i], zdata[i], lab, size=label_font_size, ha=\"center\", va=\"bottom\")\n \n if stem: # stem must go after scatter for sorting. Actually, not true right? matplotlib uses zorder for that...\n z_min = min(zdata)\n for x_, y_, z_ in zip(xdata, ydata, zdata): \n line = art3d.Line3D(*list(zip((x_, y_, z_min), (x_, y_, z_))), marker=None, c=\"grey\", alpha=0.1)\n ax.add_line(line)\n \n ax.set_xlabel(\"PC%s\" % (x,)) # can be overridden via do_common_args()\n ax.set_ylabel(\"PC%s\" % (y,))\n ax.set_zlabel(\"PC%s\" % (z,))\n \n if \"logx\" in kargs and kargs[\"logx\"]:\n ax.set_xscale(\"log\", basex=kargs[\"logx\"])\n if \"logy\" in kargs and kargs[\"logy\"]:\n ax.set_yscale(\"log\", basey=kargs[\"logy\"])\n \n if squish_scales: \n # Don't worry about kargs, do_common_args will overwrite.\n ax.set_xlim([min(xdata), max(xdata)])\n ax.set_ylim([min(ydata), max(ydata)])\n ax.set_zlim([min(zdata), max(zdata)])\n \n self.__draw.do_common_args(ax, **kargs)\n if \"zlims\" in kargs:\n ax.set_zlim([kargs[\"zlim\"][0], kargs[\"zlim\"][1]])\n \n if interactive:\n fig.show() # hope you are not on a cluster!\n \n real_filename = self.__draw.savefigure(fig, filename)\n \n config.log.info(\"scatter3d(): Saved 'PC%s' vs 'PC%s' vs 'PC%s' scatter to '%s'\" % (x, y, z, real_filename))", "def display4(*args):\n #-------------------- unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- beta x,y & dispersion x\n s = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)] # Abszisse\n bx = [twiss_func(i,'bx') for i in range(twiss_func.nbpoints)] # beta x\n by = [twiss_func(i,'by') for i in range(twiss_func.nbpoints)] # beta y\n dx = [twiss_func(i,'dx') for i in range(twiss_func.nbpoints)] # dispersion x\n#-------------------- longitudinal trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- beta functions\n splot211=plt.subplot(211)\n splot211.set_title('beta x,y')\n # mapping box\n splot211.text(0.01, 1.1, UTIL.FLAGS.get('mapping'),transform=splot211.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n # function plots\n plt.plot(s,bx, label=r\"$\\beta$x [m]\", color='black', linestyle='-')\n plt.plot(s,by, label=r\"$\\beta$y [m]\", color='red', linestyle='-')\n plt.plot(s,dx, label=r'$\\eta_x$ [m]' , color='green', linestyle='-') # dispersion x\n vscale=splot211.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # zero line\n splot211.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(212)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('synchrotron oscillation')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z2,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')", "def plot(self, axes=None, simple=True, origin=None):\r\n if axes is None: # new figure\r\n fig = figure()\r\n axes = Axes3D(fig)\r\n x, y, z, d = self.x / um, self.y / um, self.z / um, self.diameter / um\r\n if origin is not None:\r\n x0, y0, z0 = origin\r\n x = hstack((x0, x))\r\n y = hstack((y0, y))\r\n z = hstack((z0, z))\r\n if len(x) == 1: # root with a single compartment: probably just the soma\r\n axes.plot(x, y, z, \"r.\", linewidth=d[0])\r\n else:\r\n if simple:\r\n axes.plot(x, y, z, \"k\")\r\n else: # linewidth reflects compartment diameter\r\n for n in range(1, len(x)):\r\n axes.plot([x[n - 1], x[n]], [y[n - 1], y[n]], [z[n - 1], z[n]], 'k', linewidth=d[n - 1])\r\n for c in self.children:\r\n c.plot(origin=(x[-1], y[-1], z[-1]), axes=axes, simple=simple)", "def visualizeTrajectory(y, g):\n visualizeObs()\n x = np.linspace(-1.5, 1.5, 13)[1:-1]\n plt.plot(np.concatenate(([-1.5],x,[1.5])), np.concatenate(([0],y,[0])), color='black', marker='+')\n if g is not None:\n for i in range(y.size):\n plt.arrow(x[i], y[i], 0, -0.5*g[i], color='blue', head_width=0.05)", "def heatmap3d(xL, yL ,zL, valueL, grid=True, color='cool',\n size=100, marker='o',alpha=0.8,save=False, savepath='./'):\n from mpl_toolkits.mplot3d import Axes3D\n #Normalize valueL into 0 to 1\n normalizedValueL = list( (valueL - min(valueL)) / (max(valueL) - min(valueL)) )\n\n if color=='hot':\n colors = plt.cm.hot_r(normalizedValueL)\n # For color bar display\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.hot_r)\n elif color=='cool':\n colors = plt.cm.cool_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.cool_r)\n elif color=='hsv':\n colors = plt.cm.hsv_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.hsv_r)\n elif color=='jet':\n colors = plt.cm.jet_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.jet_r)\n elif color=='gray':\n colors = plt.cm.gray_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.gray_r)\n elif color=='spring':\n colors = plt.cm.spring_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.spring_r)\n elif color=='summer':\n colors = plt.cm.summer_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.summer_r)\n elif color=='autumn':\n colors = plt.cm.autumn_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.autumn_r)\n elif color=='winter':\n colors = plt.cm.winter_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.winter_r)\n else:\n print('Since there is no color, it will be the default cool')\n colors = plt.cm.cool_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.cool_r)\n\n colmap.set_array(valueL)\n\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # Set the grid on of off\n if not grid:\n ax.grid(False)\n\n ax.scatter(xL,yL,zL, s =size, c=colors, marker=marker, alpha=alpha)\n # For color bar display\n cb = fig.colorbar(colmap)\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n if save==True:\n date = datetime.datetime.now()\n plt.savefig(savepath+'3Dheatmap_'+str(date.year)+'_'+ str(date.month)+ \\\n '_'+str(date.day)+'_'+str(date.hour)+'_'+ \\\n str(date.minute)+'_'+str(date.second), dpi=150)\n plt.show()", "def plot(self,\n name: str,\n G_list: list = None,\n V_goal_list: list = None,\n opt_path: list = None):\n colorscales = ['Reds', 'Greens', 'Blues', 'Magentas']\n color = ['red', 'green', 'blue', 'magenta']\n pd = []\n\n if self.d == 3:\n X = []\n Y = []\n Z = []\n if opt_path:\n for i, path in enumerate(opt_path):\n X.clear(), Y.clear(), Z.clear()\n for state in path:\n X += [state[0]]\n Y += [state[1]]\n Z += [state[2]]\n pd.append(go.Scatter3d(x=X, y=Y, z=Z, marker=dict(color=color[i], size=5), name='Path_M' + str(i)))\n\n if G_list:\n X.clear(), Y.clear(), Z.clear()\n for G in G_list:\n for e in G.E.values():\n X += [G.V[e.node_a].value[0], G.V[e.node_b].value[0], None]\n Y += [G.V[e.node_a].value[1], G.V[e.node_b].value[1], None]\n Z += [G.V[e.node_a].value[2], G.V[e.node_b].value[2], None]\n pd.append(go.Scatter3d(x=X, y=Y, z=Z, mode='lines', showlegend=True,\n line=dict(color='rgb(125,125,125)', width=0.5),\n hoverinfo='none', name='Tree'))\n pd.append(go.Scatter3d(x=[self.start[0]], y=[self.start[1]], z=[self.start[2]],\n mode='markers', marker=dict(color='red', size=5), name='Start'))\n\n if V_goal_list:\n X.clear(), Y.clear(), Z.clear()\n for i, V in enumerate(V_goal_list):\n for j in V:\n X += [G_list[i].V[j].value[0]]\n Y += [G_list[i].V[j].value[1]]\n Z += [G_list[i].V[j].value[2]]\n pd.append(go.Scatter3d(x=X, y=Y, z=Z, mode='markers',\n marker=dict(color='magenta', size=5),\n name='Intersection nodes'))\n\n if self.name in ['3d_point_wo_obstacles', '3d_point_w_obstacles']:\n for i, m in enumerate(self.manifolds):\n limits = [self.lim_lo[0], self.lim_up[0], self.lim_lo[1], self.lim_up[1]]\n X_m, Y_m, Z_m = m.draw(limits=limits)\n\n if m.draw_type == \"Scatter\":\n pd.append(go.Scatter3d(x=X_m, y=Y_m, z=Z_m, showlegend=False, mode='markers',\n marker=dict(color=color[i], size=5)))\n elif m.draw_type == \"Surface\":\n pd.append(go.Surface(x=X_m, y=Y_m, z=Z_m, opacity=0.8, showscale=False,\n colorscale=colorscales[i]))\n\n for obs in self.obstacles:\n plot_box(pd=pd, pos=np.array([0., 0., obs[0]]), quat=np.array([0., 0., 0., 1.]), size=np.array(obs[1:]))\n\n fig = go.Figure(data=pd, layout=go.Layout(yaxis=dict(scaleanchor=\"x\", scaleratio=1)))\n plot(fig, filename='plots/task_' + self.name + '_' + name + '.html', auto_open=True)", "def render(self):\r\n super().render()\r\n layers, titles, latVect, lonVect = self.make_layers()\r\n LON, LAT = np.meshgrid(lonVect, latVect)\r\n lon = LON.flatten()\r\n lat = LAT.flatten()\r\n for i in range(len(layers)):\r\n vals = layers[i].flatten()\r\n hovertext = []\r\n for k in range(len(vals)):\r\n hovertext.append('lon: {:.2f}<br>lat: {:.2f}<br>{}: {:.1e}'.format(lon[k], lat[k], self.variable + self.unit,vals[k]))\r\n if self.levels == 0:\r\n data = [\r\n go.Heatmap(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n zmin=self.vmin,\r\n zmax=self.vmax,\r\n hoverinfo='text',\r\n text=hovertext \r\n )\r\n ]\r\n elif self.levels > 0:\r\n data = [\r\n go.Contour(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n hoverinfo='text',\r\n text=hovertext, \r\n connectgaps=False,\r\n contours=dict(\r\n coloring='heatmap',\r\n showlabels=True,\r\n start=self.vmin,\r\n end=self.vmax,\r\n size=(self.vmax-self.vmin) / float(self.levels)\r\n )\r\n # line=dict(smoothing=0.85) \r\n )\r\n ] \r\n\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel}\r\n ) \r\n\r\n\r\n\r\n if self.surface3D:\r\n data = [\r\n go.Surface(\r\n x=lonVect,\r\n y=latVect,\r\n z=layers[i],\r\n colorscale=self.cmap,\r\n # hoverinfo='text',\r\n # text=hovertext \r\n )\r\n ]\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n scene = dict(\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel},\r\n zaxis={'title': self.variable + self.unit}\r\n )\r\n ) \r\n\r\n\r\n self._save_plotly_(go, data, layout)" ]
[ "0.7024431", "0.6795507", "0.66586065", "0.6364133", "0.6340602", "0.63186383", "0.61583287", "0.61494225", "0.613501", "0.6111204", "0.6093264", "0.60898846", "0.6077247", "0.60679746", "0.6041715", "0.5961347", "0.59601736", "0.5956387", "0.59558564", "0.5903093", "0.58980805", "0.588547", "0.5870054", "0.58670604", "0.58387476", "0.5833588", "0.58301294", "0.5827255", "0.5820235", "0.58175", "0.5808704", "0.5803027", "0.58016974", "0.58005875", "0.5761439", "0.5754024", "0.57474124", "0.57338417", "0.5725175", "0.57193947", "0.5719114", "0.56907743", "0.56745934", "0.5657793", "0.5648571", "0.5643836", "0.5632101", "0.56241715", "0.5621989", "0.56055516", "0.5596854", "0.55734056", "0.5570661", "0.55661756", "0.55574626", "0.5557056", "0.555229", "0.5552106", "0.55492824", "0.5549248", "0.554207", "0.553727", "0.55201834", "0.55157936", "0.55000144", "0.54940426", "0.5491326", "0.5488449", "0.5485109", "0.54797095", "0.5475756", "0.5475433", "0.54750544", "0.5474234", "0.547058", "0.5466678", "0.5462453", "0.5456705", "0.5454241", "0.54537684", "0.5443782", "0.5442128", "0.54418796", "0.5438772", "0.5435398", "0.5418453", "0.5418286", "0.5414185", "0.5409342", "0.54014397", "0.5400879", "0.54006606", "0.5400498", "0.5399726", "0.5398904", "0.5394333", "0.5385519", "0.5384615", "0.53822863", "0.5380725" ]
0.66579044
3
Initializes the class with n and raises error if id is too big
def __init__(self, n): if n >= 52: raise Exception("This card does not exist") self.n = n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, n):\n self.n = n", "def __init__(self, n):\n self.n = n", "def __init__(self, N):\n pass", "def __init__(self, id=None):\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects", "def __init__(self, id=None):\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects", "def __init__(self, id=None):\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects", "def __init__(self, id=None):\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects", "def __init__(self, id=None):\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = self.__nb_objects", "def __init__(self, id=None):\n if id:\n self.id = id\n\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects", "def __init__(self, id=None):\n\n if id is not None:\n \"\"\"asign instance attribute, public\"\"\"\n\n self.id = id\n else:\n \"\"\"increment attribute of class\"\"\"\n\n Base.__nb_objects += 1\n \"\"\"and assign the new value of __nb...\n to the instance atrribute, public id\"\"\"\n\n self.id = self.__nb_objects", "def __init__(self, id: int = 0, /):", "def __init__(self, id: int, /):", "def __init__(self, id: int, /):", "def __init__(self, id: int, /):", "def __init__(self, id: int, /):", "def __init__(self, n):\n self.n = n\n self.e = [set() for i in range(n)]", "def __init__(self, n):\n self.n = n\n self.parents = [-1] * n", "def __init__(self, id, n):\n\n hijos = np.delete(np.arange(n),id,0)#Se remueve el nodo de la lista una vez recorrido\n self.__raiz = NodoCiudad(id,None,hijos)", "def __init__(\n self, \n id: int, \n /\n ):", "def __init__(self, size=0):\n self.__size = size\n try:\n size += 1\n if(size < 0):\n raise(ValueError)\n except TypeError:\n raise Exception('size must be an integer')\n except ValueError:\n raise Exception('size must be >= 0')", "def __init__(self, number=0):\n pass", "def __init__(self, n=0, e=0):\r\n raise NotImplementedError()", "def __init__(self,n):\n\t\tself._dict={}\n\t\tfor i in range(n):\n\t\t\tself._dict[i]=[]", "def __init__(self, id=None):\n\n if id is not None:\n self.id = id\n \"\"\"if id is not None, assign the public instance attribute id \"\"\"\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n \"\"\"increment __nb_objectsandassignthenewvaluepublicinstanceattid\"\"\"", "def qf_init(self, N):\n for x in range(N):\n self.id.append(x)\n self.sz.append(1)", "def set_NID(i, n=8):\n global NID, ppn\n ppn = n\n assert NID is None, \"Attempting to call set_NID() more than once\"\n msg = \"NID (%s) must be an even multiple of # processors per node (%s)\"\n assert i % ppn == 0, msg % (i, ppn)\n NID = i", "def __init__(self, n: int):\n self.invalid_1 = set()\n self.invalid_2 = set()\n self.p1 = collections.defaultdict(int)\n self.p2 = collections.defaultdict(int)\n self.tar = n\n self.walked = set()", "def __init__(self, n):\n self._count = n\n self._parent = list(range(n))\n self._rank = [0]*n\n\n \"\"\"\n Added a 'binary' list to keep track of sites that have been\n unioned, as well as an integer that counts the number of\n isolated sites. Also a list to keep track of the roots'\n tree sizes, as well as an integer that holds the maximum\n tree size (maximum component in the graph)\n \"\"\"\n self._nodes = [1]*n\n self._iso = n\n self._size = [1]*n\n self._max = 0", "def __init__(self, id):\n \n self.id = id", "def __init__(self, max_n):\n self._max_n = max_n\n self.__pq = [0] * (max_n + 1)\n self.__qp = [-1] * (max_n + 1)\n self.__keys = [None] * (max_n + 1)\n self.__n = 0", "def n(self, n) :\n\t\ttry :\n\t\t\tself._n = n\n\t\texcept Exception as e:\n\t\t\traise e", "def qf_init(self, N):\n for x in range(N):\n self.id.append(x)\n\n for x in range(N):\n self.sz.append(1)", "def __init__(self, n=10000):\n if n < 3:\n raise ValueError('too small n: {0}'.format(n))\n self._p = list(generate_primes(n))\n self._len = len(self._p)\n self._i = 0\n self._n = n\n self._a = n", "def __init__(__self__, *,\n number: int):\n pulumi.set(__self__, \"number\", number)", "def __init__(self):\n self.id = uuid.uuid4()\n self.MAX_LENGTH = 999999999999", "def __init__(self, n):\n self.row, self.col, self.diag, self.anti_diag, self.n = [0] * n, [0] * n, 0, 0, n", "def __init__(self, n: int):\n\n self.root = [-1] * n", "def __init__(self, value, n):\n\t\tif value < 0:\n\t\t\traise ValueError('Negative values not allowed')\n\t\tif value > n:\n\t\t\traise ValueError('Value can\\'t be greater than n')\n\t\tif n < 1:\n\t\t\traise ValueError('n must be positive')\n\t\tself.value = value\n\t\tself.n = n", "def __init__(self, n=1):\n vertices = [Vertex(i) for i in range(n)]\n for vertex in vertices:\n self.add_vertex(vertex)\n self.populate_graph()", "def __init__(self, n: int) -> None:\n\n assert n > 1, \"for n = 1 use Bernoulli distribution.\"\n\n self.n = n", "def __init__(self, id: str):\n self.id = id", "def __init__(self, n_components):\n self.n_components = n_components", "def __init__(self, *, id: int = 0, timeout: int = 5000):", "def set_n(self, n: int) -> None:\r\n self.n_is_set = True\r\n self.n = n", "def __init__(self, id: int, *, freq: int = 400000) -> None:\n ...", "def __init__(self, n, a=None):\n if a == None:\n self.a = float(\"inf\")\n else:\n self.a = a\n self.n = n % self.a", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self,\n id: str) -> None:\n self.id = id", "def __init__(self, size=0):\n if type(size) == int:\n if size >= 0:\n self.__size = size\n else:\n raise ValueError('size must be >= 0')\n else:\n raise TypeError('size must be an integer')", "def __init__(self, n, m):\n\t\tassert n > 0 and m > 0\n\t\tself.rows={}\n\t\tself.size = (n,m)", "def __init__(self, size=0):\n self.__size = size\n if isinstance(self.__size, int):\n if size < 0:\n raise ValueError('size must be >= 0')\n else:\n self.__size = size\n else:\n raise TypeError('size must be an integer')", "def __init__(self, n: int):\n self.size = n\n self.board = [[CellValues.EMPTY.value] * n for _ in range(n)]\n self.num_empty_cells = n * n", "def __init__(self, number: int, description: str):\n pass", "def __new__(cls, n, m):\n assert n >= m\n assert m > 0\n assert n > 0\n assert isinstance(n, int) and isinstance(m, int)\n return int.__new__(cls, Ar(n, m) / Ar(m, m))", "def __init__(self, size = 100):\n\t\tself.__parent = [i for i in range(size)]\n\t\tself.__rank = [0 for _ in range(size)]", "def __init__(self, n, sents):\n # call superclass to compute counts\n super(AddOneNGram, self).__init__(n, sents)", "def __init__(self, size=0):\n if isinstance(size, int):\n self.__size = size\n else:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")", "def __init__(self, iterable_input, n=0, name='re-cycle', verbose=True):\n super().__init__(iterable_input=iterable_input, name=name, verbose=verbose)\n self.n = n", "def __init__(self, n_components=None):\n self.n_components = n_components", "def __init__(self, n: int, identity_element_func, binary_operation_func):\n self.n = n\n self.identity = identity_element_func\n self.binary = binary_operation_func\n n2 = 1 # n2はnより大きい2の冪数\n while n2 < n:\n n2 <<= 1\n self.n2 = n2\n self.tree = [identity_element_func() for _ in range(n2 << 1)]", "def __init__(self, id: str) -> None:\n ...", "def __init__(self, no, length):\n self.no = no\n self.length = length", "def __init__(self, arr, n):\n self.BITree = [0] * (n+1)\n self.size = n\n\n for i in range(n):\n self.update(i, arr[i])", "def __init__(self, n):\n self._n = n\n self._grid = [[False] * n for _ in range(n)]\n # create sites for n-by-n grid and 2 \"virtual\" sites for top and bottom\n # self._uf = QuickFindUF(n * n + 2)\n self._uf = WeightedQuickUnionUF(n * n + 2) # QuickFindUF(n * n + 2)\n # connect top and bottom virtual sites with respecting sides of grid\n self._top_idx = n * n\n self._bottom_idx = n * n + 1\n for i in range(n):\n self._uf.union(self._top_idx, i)\n self._uf.union(self._bottom_idx, (n - 1) * n + i)", "def __init__(self, nonogram_size):\n # create random id\n self.nonogram_id = uuid.uuid4()\n self.row_numbers = [(2), (2), (2)]\n self.column_numbers = [(1, 1), (3), (1)]\n self.nonogram_size = nonogram_size\n self.grid = Nonogram.create_rand_grid(nonogram_size)\n #TODO\n self.fitness = 999", "def __init__(self, size=0):\n if isinstance(size, int) is not True:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size", "def __init__(__self__, *,\n id: str):\n pulumi.set(__self__, \"id\", id)", "def __init__(__self__, *,\n id: str):\n pulumi.set(__self__, \"id\", id)", "def __init__(self, size=0):\n if type(size) != int:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size\n return", "def __init__(self, n):\n self.rows = [0] * n\n self.cols = [0] * n\n self.diagonal1 = 0\n self.diagonal2 = 0\n self.n = n", "def __init__(self, Nbin=10):\n \n self.Nbin = Nbin", "def __init__(self, node_id):\n # Assign ID and update class-counter\n self.id = node_id\n\n # Initialize\n self.is_sequence_end = False\n self.children = {}", "def __init__(self, *num):\n\n if len(num) != 1:\n raise InvalidArgsError(self)\n super().__init__(num)", "def __init__(self, *num):\n\n if len(num) != 1:\n raise InvalidArgsError(self)\n super().__init__(num)", "def __init__(self, n):\r\n self.size = n\r\n self.mat = []\r\n for i in range(n):\r\n self.mat.append([0] * n)", "def __init__(self, players: list, max_size=0):\n\t\tself.id = uuid.uuid4()\n\t\tself.max_size = max_size\n\t\tif not len(players) > max_size:\n\t\t\tlogging.error('Size mismatch')\n\t\tself.players = players", "def __init__(self, count) -> None:\r\n super().__init__(\"Unsafe repeater was specified (out-of-bounds)\")\r\n\r\n self.count = count", "def __init__(self, size=0):\n if type(size) is not int:\n raise TypeError('size must be an integer')\n if size < 0:\n raise ValueError('size must be >= 0')\n self.__size = size", "def __init__(self, n):\n self.n = n\n self.rows = [0 for _ in range(n)]\n self.colums = [0 for _ in range(n)]\n self.diag = [0,0]", "def __init__(self, iterable_input, n=1, name='re-repeat', verbose=True):\n super().__init__(iterable_input=iterable_input, name=name, verbose=verbose)\n self.n = n", "def __init__(self, samples_per_class=10, n_classes=10, n_features=1):\n self.samples_per_class = samples_per_class\n self.n_classes = n_classes\n self.n_features = n_features\n\n # Create a dataframe to be consistent with other Datasets\n self.df = pd.DataFrame({\n 'class_id': [i % self.n_classes for i in range(len(self))]\n })\n self.df = self.df.assign(id=self.df.index.values)", "def __init__(self, n: int):\n self.rows = [[0] * n for i in range(2)]\n self.cols = [[0] * n for i in range(2)]\n self.d = [0, 0]\n self.subd = [0, 0]\n self.n = n\n self.winner = 0", "def __init__(self, size):\n self.__size = size\n self.integer_validator(\"size\", size)\n super().__init__(size, size)\n self._size = size", "def __init__(self, d, n=None):\n self.data = d\n self.next_node = n\n # adding a hash value for the data\n self.hash = self.generate_hash()\n return None", "def __init__(self, size, seed=None):\n self.size = size\n self.seed = seed", "def gen_random_id(self, n: int = 12) -> object:\n random_source = string.ascii_letters + string.digits\n id_ = random.choice(string.ascii_lowercase)\n id_ += random.choice(string.ascii_uppercase)\n id_ += random.choice(string.digits)\n\n for i in range(n):\n id_ += random.choice(random_source)\n\n _list = list(id_)\n random.SystemRandom().shuffle(_list)\n clid = ''.join(_list)\n return clid", "def from_id (self, id):\n self.seq = int(id)\n self.newobj = False\n return self", "def __init__(self, maxNumbers):\n self.numbers = set(range(maxNumbers))", "def __init__(self):\n self._id = None\n self._id_count = 1\n self._name = None\n self._sex = None\n self._birt = None\n self._deat = None\n self._spouse_of = []\n self._child_of = None", "def __init__(self, k):\n self.k = k\n self.N = 2**self.k", "def __init__(self, name, cnp):\n self._id = Client.counter\n Client.counter += 1\n self.name = name\n self.cnp = cnp", "def __init__(self, n: int):\n self.n = n\n self.rows_1 = [0 for _ in range(n + 1)]\n self.rows_2 = [0 for _ in range(n + 1)]\n self.cols_1 = [0 for _ in range(n + 1)]\n self.cols_2 = [0 for _ in range(n + 1)]\n self.diag1 = [0 for _ in range(n + 1)]\n self.diag2 = [0 for _ in range(n + 1)]", "def __init__(self, n: int):\n self.n = n\n self.board = [[0 for _ in range(n)] for _ in range(n)]", "def __init__(self, m, n, data):\n self.m = m\n self.n = n\n self.data = data", "def __init__(self, n=5, name=None):\n super().__init__(name=name)\n self.n = n\n self._images = []\n self._sigmas = []", "def __init__(self):\n self.id = id(self)", "def __init__(self,size=100):\n self.__parent = [ i for i in range(size) ]\n self.__size = [ 1 for i in range(size) ]\n self.__rank = [ 0 for i in range(size) ]", "def __init__(self):\n self.number: int" ]
[ "0.74003464", "0.74003464", "0.7096325", "0.6966253", "0.6966253", "0.6966253", "0.6966253", "0.6960996", "0.6900746", "0.6753491", "0.6679413", "0.66501856", "0.66501856", "0.66501856", "0.66501856", "0.6589031", "0.6516931", "0.6489422", "0.64609724", "0.6439665", "0.64046675", "0.64027256", "0.6365352", "0.63618076", "0.6346405", "0.63398767", "0.6325269", "0.630154", "0.6293359", "0.6260912", "0.6240175", "0.6229035", "0.622393", "0.62181884", "0.61945736", "0.61859626", "0.616481", "0.61536366", "0.6150891", "0.61340505", "0.6122732", "0.61092025", "0.60903656", "0.6083796", "0.60780483", "0.6073762", "0.60487044", "0.60487044", "0.60487044", "0.60487044", "0.6028988", "0.600183", "0.59982634", "0.5997378", "0.59947294", "0.5990632", "0.5980553", "0.59740436", "0.5972591", "0.593699", "0.5917265", "0.590583", "0.5905609", "0.58997095", "0.5866735", "0.58567476", "0.583578", "0.5828861", "0.58264244", "0.58264244", "0.582256", "0.58185905", "0.581736", "0.5807656", "0.5801784", "0.5801784", "0.5799726", "0.57994133", "0.57993203", "0.5795431", "0.5795038", "0.5794225", "0.5772901", "0.5770859", "0.57561326", "0.57430595", "0.57371664", "0.5725989", "0.5722327", "0.5722216", "0.5716665", "0.570615", "0.5700622", "0.5693473", "0.5688455", "0.5684665", "0.56646025", "0.56607485", "0.5657552", "0.56529945" ]
0.67713875
9
Returns a card according to it's rank and symbol by comparing the suit to sym and rank to rank_sym
def __repr__(self): return (str(self.rank_sym[self.rank()]) + self.sym[self.suit()])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCard(self, rank, suit):\r\n for card in self.cards:\r\n if card.rank == rank and card.suit == suit:\r\n return card\r\n return None", "def card_factory(rank,suit):\n pass", "def get_card(self, suit, face):\n for card in self.deck:\n if card.suit == suit and card.value == face:\n return card", "def read_card():\n suit_is_valid = False\n while not suit_is_valid:\n suit_input = input('Suit: ').upper()\n for suit in Suit:\n if suit_input == suit.name:\n card_suit = suit\n suit_is_valid = True\n\n rank_is_valid = False\n while not rank_is_valid:\n rank_input = input('Rank: ').upper()\n for rank in Rank:\n if rank_input == rank.name:\n card_rank = rank\n rank_is_valid = True\n return Card(card_suit, card_rank)", "def _compute_rank(self):\n# print(Card((self.ranks[0]),self.suits[0]))\n# print(Card((self.ranks[1]),self.suits[1]))\n# print(Card((self.ranks[2]),self.suits[2]))\n# print(Card.ranks[self.ranks[0]])\n# #print(Card.ranks[self.ranks[0]+1])\n# print(self.ranks[1])\n# print(Card.suits[self.suits[1]])\n a = ['Ace','2','3']\n newlist =[self.ranks[0],self.ranks[1],self.ranks[2]]\n newlist = sorted(newlist)\n if(Card.suits[self.suits[0]] == Card.suits[self.suits[1]] == Card.suits[self.suits[2]]):\n #a = ['Ace','2','3']\n if(Card.ranks[self.ranks[0]] in a) and (Card.ranks[self.ranks[1]] in a) and (Card.ranks[self.ranks[2]] in a):\n self.rank=5\n else:\n if(newlist[1] - newlist[0]) == 1 and (newlist[2]-newlist[1])==1:\n #StraightFlush\n self.rank=5\n else:\n #Flush\n self.rank=2\n \n #Threeofakind\n elif (Card.ranks[self.ranks[0]] == Card.ranks[self.ranks[1]] == Card.ranks[self.ranks[2]]):\n self.rank=4\n #Pair\n elif(Card.ranks[self.ranks[0]]==Card.ranks[self.ranks[1]] or Card.ranks[self.ranks[0]]==Card.ranks[self.ranks[2]] or Card.ranks[self.ranks[1]]==Card.ranks[self.ranks[2]] or Card.ranks[self.ranks[2]]==Card.ranks[self.ranks[1]]):\n self.rank=1 \n #Straight\n elif(((newlist[1] - newlist[0]) == 1) and (newlist[2]-newlist[1])==1):\n self.rank=3\n \n elif((Card.ranks[self.ranks[0]] in a) and (Card.ranks[self.ranks[1]] in a) and (Card.ranks[self.ranks[2]] in a)):\n if(Card.ranks[self.ranks[0]] != Card.ranks[self.ranks[1]] != Card.ranks[self.ranks[2]]):\n #if((Card.ranks[self.ranks[0]] != Card.ranks[self.ranks[1]]) and (Card.ranks[self.ranks[0]]!= Card.ranks[self.ranks[2]])and (Card.ranks[self.ranks[1]]!= Card.ranks[self.ranks[2]])):\n self.rank=3\n\n else:\n self.rank=0\n #pass", "def rank_matches(cards, rank):\n\n return [card for card in cards if card.rank == rank]", "def test():\n sf = \"6C 7C 8C 9C TC\".split() # Straight Flush\n sf1 = \"6C 7C 8C 9C TC\".split() # Straight Flush\n sf2 = \"6D 7D 8D 9D TD\".split() # Straight Flush\n fk = \"9D 9H 9S 9C 7D\".split() # Four of a Kind\n fk3 = \"TC TS TH 2C TD\".split() # Four of a Kind\n fh = \"TD TC TH 7C 7D\".split() # Full House\n fl = \"AH KH JH 6H TH\".split() # Flush\n st = \"AH KC QD JD TS\".split() # Straight\n tk = \"2H 2C 2D AC TD\".split() # Three of kind\n tp = \"TD 9H TH 7C 9S\".split() # Two Pair\n op = \"TD TC AD KD QD\".split() # One Pair\n hq = \"2D 3D 4C 5H 7H\".split() # High card\n al = \"AC 2D 4H 3D 5S\".split() # Ace-Low Straight\n tp1 = \"7H 7D 9C 3C 9S\".split() #Two Pair\n fkranks = card_ranks(fk)\n tpranks = card_ranks(tp)\n op1 = \"KH 7C 5S KS 2S\".split() # One pair\n tp2 = \"TH 3S 2H 3D TC\".split() # Two pair\n tk1 = \"TH JD JH 8C JC\".split() # Three of kind\n hq1 = \"TH 9D 5C 3H 2C\".split() # High card\n f3 = \"2C 4C 6C 7C TC\".split() # Flush\n s3 = \"3C 4D 5H 6D 7H\".split() # Straight\n assert poker([fk3, f3, s3]) == fk3 #gilje start\n assert poker([sf, 20*fk]) == sf\n assert poker([fk3, 5*f3]) == fk3\n assert card_ranks(fk3) == [10, 10, 10, 10, 2]\n assert card_ranks(f3) == [10, 7, 6, 4, 2]\n assert hand_rank(fk3) == (7, 10, 2)\n assert hand_rank(f3) == (5, [10, 7, 6, 4, 2])\n assert flush(f3) == True\n assert straight(card_ranks(s3)) == True\n assert straight(card_ranks(f3)) == False #gilje slutt\n assert poker([fh, tk, hq]) == fh #oistein start\n assert poker([fl, sf1, tk]) == sf1\n assert poker([op, al, fh]) == fh\n assert poker([st, fk, tp]) == fk\n assert poker([tk, tp, op]) == tk\n assert poker([hq, op, hq]) == op\n assert card_ranks(op1) == [13, 13, 7, 5, 2]\n assert card_ranks(tp2) == [10, 10, 3, 3, 2]\n assert card_ranks(tk1) == [11, 11, 11, 10, 8]\n assert card_ranks(hq1) == [10, 9, 5, 3, 2] #oistein slutt\n assert poker([hq, tp, op]) == tp#steffen start\n assert poker([al, st]) == st\n assert poker([al, st, fl]) == fl\n assert card_ranks(hq) == [7, 5, 4, 3, 2]\n assert card_ranks(fk) == [9, 9, 9, 9, 7]\n assert card_ranks(fh) == [10, 10, 10, 7, 7]#steffen slutt\n assert poker([sf2, tk, al]) == sf2#arild start\n assert poker([hq, st]) == st\n assert poker([al, st, fk]) == fk\n assert flush(fl) == True\n assert straight(card_ranks(tp)) == False\n assert card_ranks(fk) == [9, 9, 9, 9, 7]\n assert card_ranks(hq) == [7, 5, 4, 3, 2]\n assert hand_rank(tk) == (3, 2, [14, 10, 2, 2, 2])\n assert hand_rank(st) == (4, 14)\n assert kind(5, tpranks) == None#arild slutt\n assert poker([tp, op]) == tp #Even start\n assert poker([hq, tk]) == tk\n assert poker([sf1] + 50*[fl]) == sf1\n assert card_ranks(sf1) == [10, 9, 8, 7, 6]\n assert card_ranks(tk) == [14, 10, 2, 2, 2]\n assert card_ranks(st) == [14, 13, 12, 11, 10]\n assert kind(4, fkranks) == 9\n assert kind(3, fkranks) == None\n assert kind(2, tpranks) == 10\n assert kind(1, fkranks) == 7 #Even slutt\n assert poker([sf1, fk, fh]) == sf1\n assert poker([fk, fh]) == fk\n assert poker([fh, fh]) == [fh, fh]\n assert poker([sf1]) == sf1\n assert poker([sf1] + 99*[fh]) == sf1\n assert hand_rank(sf1) == (8, 10)\n assert hand_rank(fk) == (7, 9, 7)\n assert hand_rank(fh) == (6, 10, 7)\n assert straight(card_ranks(al)) == True\n assert poker([sf1, sf2, fk, fh]) == [sf1, sf2]\n assert kind(4, fkranks) == 9\n assert kind(3, fkranks) == None\n assert kind(2, fkranks) == None\n assert kind(1, fkranks) == 7\n return 'You did good, and you should feel good about yourself :)'", "def check_hand_rank(hand):\n card_rank = ['--23456789TJQKA'.index(n) for n,h in hand]\n card_rank.sort()\n card_rank.reverse()\n #for royal straight flush\n card_rank_rsf = ['HDSC'.index(h) for n,h in hand]\n card_rank_rsf.sort()\n card_rank_rsf.reverse()\n if card_rank == [14,5,4,3,2]:\n card_rank = [5,4,3,2,1]\n if royal_straight_flush(hand):\n return 9,card_rank_rsf[0]\n elif straight_flush(hand):\n return 8,max(card_rank)\n elif four_of_a_kind(hand):\n return 7,max(card_rank)\n elif full_house(hand):\n tong = 0\n kuu = 0\n s = [n for n,h in hand]\n for i in xrange(len(s)):\n if(s.count(s[i])==3):\n tong = s[i]\n else:\n kuu = s[i]\n return 6,int(tong),int(kuu)\n elif flush(hand):\n return 5,max(card_rank)\n elif straight(hand):\n return 4,max(card_rank)\n elif three_of_a_kind(hand):\n ld = 0\n a = 0\n for i in xrange(0,3):\n if card_rank.count(card_rank[i]) > 1 :\n ld = (card_rank[i])\n else:\n a = card_rank[i]\n return 3,ld,a\n elif two_pair(hand):\n ld = []\n a = 0\n for i in xrange(0,3):\n if card_rank.count(card_rank[i]) >=2:\n ld.append(card_rank[i])\n card_rank.pop(i)\n else:\n a = card_rank[i]\n ld.sort(reverse=True)\n return 2,ld[0],ld[1],a\n elif one_pair(hand):\n ld = 0\n a = []\n for i in xrange(len(card_rank)):\n if card_rank.count(card_rank[i]) > 1 :\n ld = (card_rank[i])\n else:\n a.append(card_rank[i])\n a.sort(reverse = True)\n return 1,ld,a[0],a[1],a[2]\n else:\n return 0,max(card_rank)", "def compare_cards(p1_name, p2_name, card1, card2, ranks=['2','3','4','5','6','7','8','9','10','J','Q','K','A']):\n\n rank1, rank2 = card1[:-1], card2[:-1]\n\n if rank1 not in ranks: raise ValueError(\"Card 1 does not have a valid card value!\")\n if rank2 not in ranks: raise ValueError(\"Card 2 does not have a valid card value!\")\n\n print(p1_name+\"\\'s\", card1, \"vs.\", p2_name+\"\\'s\", card2)\n\n winner = -1\n\n if (rank1 == rank2): winner = 0\n elif (rank1 == '2' and rank2 == 'A'): winner = 1\n elif (rank1 == 'A' and rank2 == '2'): winner = 2\n else: winner = 1 if (ranks.index(rank1) > ranks.index(rank2)) else 2\n\n if (winner == 0): print(\"There Was a Tie Between\", card1, \"and\", card2)\n elif (winner == 1): print(p1_name, \"Wins This Round With a\", card1, \"Against a\", card2)\n elif (winner == 2): print(p2_name, \"Wins This Round With a\", card2, \"Against a\", card1)\n\n return winner", "def testCard(self):\n # test1\n cardObj1 = Card('A','d')\n self.assertEquals(1,cardObj1.get_rank())\n self.assertEquals('d',cardObj1.get_suit())\n # test2\n cardObj2 = Card('J','d')\n self.assertEquals(10,cardObj2.get_rank())\n # test3\n cardObj3 = Card(5,'d')\n self.assertEquals(5,cardObj3.get_rank())", "def rank(players, community):\n\t# Structure that holds the player ranking results\n\tclass RankResults():\n\t\tdef __init__(self):\n\t\t\tself.winner = [] # (WIN, player_name) or (TIE, [player1, player2, ...]) \n\t\t\tself.bestHands = [] # [(pl_name, bestHand, handRank), ... ]\n\t\t\tself.kicker = [] # If player hands' ranks tie but lose\n\t\t\t\t\t# by kicker, this will have one card\n\t\t\t\n\t\n\t\tdef __repr__(self):\n\t\t\tif self.winner[0] == \"Win\":\n\t\t\t\twinPlayerIndex = [player[0] for player in \\\n\t\t\t\t\t\t\tself.bestHands].index(self.winner[1])\n\t\t\telse:\n\t\t\t\twinPlayerIndex = [player[0] for player in \\\n\t\t\t\t\t\t\tself.bestHands].index(self.winner[1][0])\n\t\t\twinningRank = self.bestHands[winPlayerIndex][2]\n\t\t\t\n\t\t\t# Returns Win/Tie, player name, and winning rank\n\t \treturn str(self.winner) + \" rank = \" + str(winningRank) + \" kicker = \" \\\n\t\t\t\t+ str(self.kicker)\n\n\t### Rank function definition starts here\n\n\t# Dictionary for converting card strings to numbers\n\tcardNums = {\"A\":14, \"K\":13, \"Q\":12, \"J\":11, \"10\":10, \"9\":9, \"8\":8, \\\n\t\t\t\"7\":7, \"6\":6, \"5\":5, \"4\":4, \"3\":3, \"2\":2}\n\n\t# scan each player's hand and return their best hand\n\twinHands = []\n\tresult = RankResults()\n\tfor player in players:\n\t\tcards = player.hand + community\n\t\t(playerHand, handStrength) = best_hand(cards)\n\t\tif len(winHands) != 0:\n\t\t\t# compare current player's hand to other\n\t\t\t# players in the best hands list\n\t\t\tif handStrength > winHands[0][2]:\n\t\t\t\twinHands = [(player.name, playerHand, handStrength)]\n\t\t\telif handStrength == winHands[0][2]:\n\t\t\t\twinHands.append( (player.name, playerHand, handStrength) )\n\t\t# if first player in list, \n\t\t# create a new list with this player's hand\t\t\t\n\t\telse: \n\t\t\twinHands = [(player.name, playerHand, handStrength)]\n\t\t\t\t\t\n\n\t\t# insert each player's hand into results\n\t\tresult.bestHands.append( (player.name, playerHand, handStrength) )\n\n\t# compare results. \n\t# winHands = ((name, handStrength, hand), ...)\n\tif len(winHands) == 1:\n\t\tresult.winner = (\"Win\", winHands[0][0])\n\telse:\n\t\t# tuple the i cards of every player to facilitate\n\t\t# comparison\n\t\tzippedHands = zip(*[winner[1] for winner in winHands])\n\t\t\n\t\t# Compare top 5 cards of tied winners\n\t\tfor i in range(5):\n\t\t\ttopCards = zippedHands[i]\n\t\t\tlargestCard = max(topCards) # find largest card \n\t\t\tisPlayerRemoved = False # loser detection flag\n\t\t\tnewWinHands = []\n\t\t\tfor j in range(len(topCards)):\n\t\t\t\tif topCards[j] == largestCard:\n\t\t\t\t\tnewWinHands.append(winHands[j]) \n\t\t\t\telse:\n\t\t\t\t\t# Remove players with < max\n\t\t\t\t\tisPlayerRemoved = True\n\t\t\t\t\t#winHands.remove(winHands.index(j))\n\t\t\t\t\t\n\t\t\twinHands = newWinHands\n\t\t\t# If only one winner remaining, stop checking\n\t\t\tif len(winHands) == 1:\n\t\t\t\tresult.kicker = largestCard\n\t\t\t\tresult.winner = (\"Win\", winHands[0][0])\t\t\n\t\t\t\tprint \"best hands = \" + str(result.bestHands)\n\t\t\t\treturn result\t\n\t\t\t# If player was removed, remake zippedHands\n\t\t\tif isPlayerRemoved:\n\t\t\t\tzippedHands = zip(*[winner[1] for winner in winHands])\n\t\t\t\t\t\n\t\t\n\t\tresult.winner = (\"Tie\", [winner[0] for winner in winHands])\n\t\n\tprint \"best hands = \" + str(result.bestHands)\n\n\treturn result", "def add_card(self,rank,suit):\r\n cursor = self.first()\r\n \r\n while cursor != None:\r\n \r\n \r\n #if the suit is equal to the header node suit and rank is valid.\r\n if (cursor.element()._suit[0] == suit) and (str(rank) in self.listOfCards):\r\n \r\n #check if the rank of the card exists in the hand\r\n if self.find(self._headD,rank) == False and (suit == 'D'):\r\n \r\n #create a card node and append it to the front of the list\r\n newCard = self.Card(rank,suit, self._headD)\r\n \r\n #update the head of the list\r\n self._headD = newCard\r\n \r\n if suit == 'D':\r\n \r\n self._sizeD += 1\r\n \r\n #update the size of D list in Suit object\r\n cursor.element().data = self._sizeD\r\n \r\n break\r\n \r\n #check if the rank of the card exists in the hand\r\n elif self.find(self._headC,rank) == False and (suit == 'C'):\r\n \r\n #create a card node and append it to the back of the list\r\n newCard = self.Card(rank,suit, self._headC)\r\n \r\n self._headC = newCard\r\n \r\n if suit == 'C':\r\n self._sizeC += 1\r\n \r\n cursor.element().data = self._sizeC\r\n \r\n break\r\n \r\n \r\n #check if the rank of the card exists in the hand\r\n elif self.find(self._headH, rank) == False and (suit == 'H'):\r\n \r\n #create a card node and append it to the back of the list\r\n newCard = self.Card(rank,suit, self._headH)\r\n self._headH = newCard\r\n \r\n if suit == 'H':\r\n self._sizeH += 1\r\n \r\n cursor.element().data = self._sizeH\r\n \r\n break\r\n \r\n #check if the rank of the card exists in the hand\r\n elif self.find(self._headS,rank) == False and (suit == 'S'):\r\n \r\n #create a card node and append it to the back of the list\r\n newCard = self.Card(rank,suit, self._headS)\r\n \r\n self._headS = newCard\r\n \r\n if suit == 'S':\r\n \r\n self._sizeS += 1\r\n \r\n cursor.element().data = self._sizeS\r\n \r\n break\r\n \r\n else:\r\n print(\"Item already exists in the hand.\")\r\n \r\n #update the cursor or to the next SuitNode\r\n cursor = self.after(cursor)", "def compare_cards(board, eng_card, scot_card, eng_type, scot_type, eng_parameter, scot_parameter):\n\n\n \n year_ends_early = False\n\n \n if get_card_val(eng_card) > get_card_val(scot_card):\n who_goes_first = 'ENGLAND'\n \n elif get_card_val(eng_card) < get_card_val(scot_card):\n who_goes_first = 'SCOTLAND'\n \n elif get_card_val(eng_card) == get_card_val(scot_card):\n \n who_goes_first = 'ENGLAND'\n \n if get_card_val(eng_card) == 4 and get_card_val(scot_card) == 4:\n year_ends_early = True\n \n board.who_goes_first = who_goes_first\n\n eng_played_truce = False\n if eng_card == 'TRU':\n eng_played_truce = True\n\n scot_played_truce = False\n if scot_card == 'TRU':\n scot_played_truce = True\n\n if who_goes_first == 'ENGLAND':\n\n resolve_card(board, eng_type, scot_type, eng_card, 'ENGLAND', eng_parameter, scot_played_truce)\n resolve_card(board, eng_type, scot_type, scot_card, 'SCOTLAND', scot_parameter, eng_played_truce)\n \n elif who_goes_first == 'SCOTLAND':\n \n resolve_card(board, eng_type, scot_type, scot_card, 'SCOTLAND', scot_parameter, eng_played_truce)\n resolve_card(board, eng_type, scot_type, eng_card, 'ENGLAND', eng_parameter, scot_played_truce)\n \n return who_goes_first, year_ends_early", "def __init__(self, suit, rank):\n \n if (suit in SUITS) and (rank in RANKS):\n self.suit = suit\n self.rank = rank\n else:\n self.suit = None\n self.rank = None\n \n global outcome\n outcome = INVALID_CARD, suit, rank", "def rank(self):\n \n if self.__rank:\n return self.__rank\n flush = True\n straight = False\n last = None\n merged = {}\n for c in self.__cards:\n if last:\n if flush and c.suit != last.suit:\n flush = False\n last = c\n if c.value in merged:\n merged[c.value] = merged[c.value] + 1\n else:\n merged[c.value] = 1\n if (len(merged)) == 5:\n # All unique cards, check for a straight\n if self.__cards[0].value - self.__cards[4].value == 4:\n straight = True\n if self.__cards[4].value == 2 and self.__cards[1].value == 5 and self.__cards[0].value == 14:\n straight = True\n # Set the value of the ace to 1 and resort so hand comparisons work correctly\n self.__cards[0].value = 1\n self.__cards = sorted(self.__cards, reverse=True)\n if straight and flush:\n if self.__cards[0].value == 14:\n self.__rank = Hand.ROYAL_FLUSH\n else:\n self.__rank = Hand.STRAIGHT_FLUSH\n elif flush:\n self.__rank = Hand.FLUSH\n elif straight:\n self.__rank = Hand.STRAIGHT\n else:\n self.__rank = Hand.HIGH_CARD\n self.__values = [c.value for c in self.__cards]\n else:\n multiples = [m for m in sorted(merged.items(), key = operator.itemgetter(1), reverse = True) if m[1] > 1]\n if len(multiples) > 1:\n if multiples[0][1] == multiples[1][1]:\n self.__rank = Hand.TWO_PAIRS\n else:\n self.__rank = Hand.FULL_HOUSE \n elif multiples:\n if multiples[0][1] > 3:\n self.__rank = Hand.FOUR_OF_A_KIND\n elif multiples[0][1] == 3:\n self.__rank = Hand.THREE_OF_A_KIND\n else:\n self.__rank = Hand.ONE_PAIR\n mvalues = sorted([m[0] for m in multiples], reverse=True)\n self.__values = mvalues + [c.value for c in self.__cards if c.value not in mvalues]\n if not self.__rank:\n self.__rank = Hand.HIGH_CARD\n\n return self.__rank", "def card_ranks(cards):\n ranks = [\"--23456789TJQKA\".index(r) for r,s in cards] # Each card contains a rank and a suit, hand/cards == [(11, 'Q'), (9, 'D')] \n # Using a \"Rank Strings Array\" (i.e using an array to represent the rank strings) to index it for the ranks\n ranks.sort(reverse=True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 3, 2, 1]) else ranks", "def card_ranks(hand):\n ranks = ['--23456789TJQKA'.index(r) for r, s in hand]\n ranks.sort(reverse = True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks", "def __eq__(self, card2):\n return self.suit == card2.suit and self.rank == card2.rank", "def _translate_card(self):\n if isinstance(self.suit, int):\n\n if self.suit == 0:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of spades\".format(name)\n\n elif self.suit == 1:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of hearts\".format(name)\n\n elif self.suit == 2:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of diamonds\".format(name)\n\n elif self.suit == 3:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of clubs\".format(name)\n\n else:\n raise ValueError(\"The integer passed to the method must be 0, 1, 2, 3\")\n\n else:\n raise TypeError(\"The argument for the method must be an integer\")\n\n return self.name, self.values", "def main(players=2):\n Pcard = []\n i2 = 0\n while len(Pcard) < players:\n P2 = (input(\"Player \"+str(len(Pcard)+1)+\" -- input your card: \"))\n Pcard.append(P2.split())\n i2 += 1\n hand_rank = []\n print(\"==============Result==============\")\n for i in xrange(players):\n hand_rank.append(check_hand_rank(Pcard[i]))\n if hand_rank[i][0] == 0:\n print(\"Player \"+str(i+1)+\" have: High card\")\n elif hand_rank[i][0] == 1:\n print(\"Player \"+str(i+1)+\" have: One pair\")\n elif hand_rank[i][0] == 2:\n print(\"Player \"+str(i+1)+\" have: Two pair\")\n elif hand_rank[i][0] == 3:\n print(\"Player \"+str(i+1)+\" have: Three of a kind\")\n elif hand_rank[i][0] == 4:\n print(\"Player \"+str(i+1)+\" have: Straight\")\n elif hand_rank[i][0] == 5:\n print(\"Player \"+str(i+1)+\" have: Flush\")\n elif hand_rank[i][0] == 6:\n print(\"Player \"+str(i+1)+\" have: Full house\")\n elif hand_rank[i][0] == 7:\n print(\"Player \"+str(i+1)+\" have: Four of a kind\")\n elif hand_rank[i][0] == 8:\n print(\"Player \"+str(i+1)+\" have: Straight flush\")\n elif hand_rank[i][0] == 9:\n print(\"Player \"+str(i+1)+\" have: Royal straight flush\")\n if len(str(winner(hand_rank)))/2 >= 2:\n return \"-- >\" + 'Winner are players: ' +str(winner(hand_rank)) + \" < --\"\n return \"-- > The Winner is player: \" + str(winner(hand_rank))+ \" < --\"", "def __eq__(self, other_card):\n if self.rank == other_card.rank or self.suit == other_card.suit:\n return True\n else:\n return False", "def hand_rank(hand):\n ranks = card_ranks(hand) # ranks is a list of all the ranks. A sorted list of ranks is returned\n if straight(hand) and flush(hand): # Straight flush\n return (8, max(ranks)) # 2 3 4 5 6 (8, 6) 6 7 8 9 T (8, 10)\n elif kind(4, ranks): # Here kind(4, ranks) is used to return a bolean value\n # kind(4, ranks) returns the int when true, returns false if not true (used as boolean)\n return (7, kind(4, ranks), kind(1, ranks)) # 9 9 9 9 3 (7, 9, 3) 9 9 9 9 5 (7, 9, 5)\n elif kind(3, ranks) and kind(2, ranks): # full house\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand): # flush\n return (5, ranks)\n elif straight(ranks): # straight\n return (4, max(ranks))\n elif kind(3, ranks): # 3 of a kind\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks): # 2 pair\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks): # kind\n return (1, kind(2, ranks), ranks)\n else: # high card\n return (0, ranks)", "def __init__(self, suit: str, rank: str) -> None:\n self.suit = suit\n self.rank = rank\n self.value = Card.values[rank]\n self.hidden = False", "def get_card(self):\n if self.card_suit in self.RED_SUITS:\n color = 'red'\n else:\n color = 'blue'\n\n return colored(self.card_name, 'yellow') + colored(self.card_suit,\n color)", "def resolve_card(board, eng_type, scot_type, card, role, parameter, truce = False):\n\n if role == 'ENGLAND':\n which_side = eng_type\n elif role == 'SCOTLAND':\n which_side = scot_type\n\n\n if card == '1':\n movement_execution(board, which_side, role, int(card), truce)\n elif card == '2':\n movement_execution(board, which_side, role, int(card), truce)\n elif card == '3':\n movement_execution(board, which_side, role, int(card), truce)\n\n else:\n\n if role == 'ENGLAND' or not scottish_king.run_king(board, eng_type, scot_type):\n \n \n \n if card == 'SEA':\n \n if play_pass(which_side) == 'play':\n sea_execution(board, which_side, role)\n \n \n elif card == 'HER':\n \n if play_pass(which_side) == 'play':\n her_execution(board, which_side, role, eng_type, scot_type)\n \n \n elif card == 'VIC':\n if play_pass(which_side) == 'play':\n vic_execution(board, which_side, role, parameter)\n \n \n elif card == 'PIL':\n \n if play_pass(which_side) == 'play':\n pil_execution(board, which_side, role, parameter)\n \n \n elif card == 'TRU':\n \n if play_pass(which_side) == 'play':\n return True", "def get_poker_hand(cards):\n cards = sorted(cards, key=aces_high, reverse=True)\n cards_low_ace = sorted(cards, key=lambda card: card.value, reverse=True)\n\n # Any jokers will have sorted to the front\n if cards and cards[0].joker:\n raise ValueError(\"Cannot calculate poker hand including jokers\")\n\n if len(cards) > 5:\n return max(map(get_poker_hand, itertools.combinations(cards, 5)))\n\n cvalues = collections.Counter(c.value for c in cards)\n suits = set(c.suit for c in cards)\n of_a_kind_card, of_a_kind = cvalues.most_common(1)[0]\n if len(cvalues) >= 2:\n second_pair_card, second_pair = cvalues.most_common(2)[-1]\n else:\n second_pair_card, second_pair = None, 0\n high_card = cards[0].value\n values = [c.value.value for c in cards]\n is_straight = len(cards) == 5 and all(\n i[0].value == i[1] for i in zip(cards, range(cards[0].value, -5, -1))\n )\n is_ace_low_straight = len(cards) == 5 and all(\n i[0].value == i[1]\n for i in zip(cards_low_ace, range(cards_low_ace[0].value, -5, -1))\n )\n\n if len(suits) == 1 and is_straight:\n return PokerHand.StraightFlush, aces_high(high_card)\n if len(suits) == 1 and is_ace_low_straight:\n return PokerHand.StraightFlush, cards_low_ace[0].value\n if of_a_kind == 4:\n return PokerHand.FourOfAKind, aces_high(of_a_kind_card)\n if of_a_kind == 3 and second_pair == 2:\n return PokerHand.FullHouse, aces_high(of_a_kind_card)\n if len(suits) == 1 and len(cards) == 5:\n return PokerHand.Flush, aces_high(high_card)\n if is_straight:\n return PokerHand.Straight, aces_high(high_card)\n if is_ace_low_straight:\n return PokerHand.Straight, cards_low_ace[0].value\n if of_a_kind == 3:\n return (PokerHand.ThreeOfAKind, aces_high(of_a_kind_card)) + (\n (aces_high(second_pair_card),) if second_pair_card else ()\n )\n if of_a_kind == 2 and second_pair == 2:\n return (PokerHand.TwoPair,) + tuple(\n map(\n aces_high,\n sorted(\n filter(None, (of_a_kind_card, second_pair_card)),\n reverse=True,\n key=aces_high,\n ),\n )\n )\n if of_a_kind == 2:\n return (PokerHand.Pair, aces_high(of_a_kind_card)) + (\n (aces_high(second_pair_card),) if second_pair_card else ()\n )\n\n return (PokerHand.HighCard,) + tuple(\n sorted((aces_high(c) for c in cvalues), reverse=True)\n )", "def scan_cards(player, river):\r\n best_rank = 0\r\n cards = player.hand + river\r\n hands = combinations(cards, 5) # find all 5 card hands\r\n best_hands = []\r\n for h in hands:\r\n flat = list(sum(h, ()))\r\n prep = np.zeros(shape=(10,))\r\n j = 0\r\n for i in flat:\r\n prep[j] = i\r\n j = j+1\r\n input = np.zeros(shape=(1,10))\r\n input[0] = prep\r\n rank = np.argmax(player.ai.predict(input)[0])\r\n\r\n if rank == best_rank:\r\n best_hands.append(h)\r\n if rank > best_rank:\r\n best_rank = rank\r\n best_hands = []\r\n best_hands.append(h)\r\n final_hand = best_hand(best_hands)\r\n return (best_rank, final_hand)", "def rank_five_cards(cards):\n\n # List of all card values\n values = sorted([card.number for card in cards])\n\n # Checks if hand is a straight\n is_straight = all([values[i] == values[0] + i for i in range(5)])\n\n # Additional straight check\n if not is_straight:\n\n # Weakest straight\n is_straight = all(values[i] == values[0] + i for i in range(4)) and values[4] == 12\n\n # Rotate values as the ace is weakest in this case\n values = values[1:] + values[:1]\n\n # Checks if hand is a flush\n is_flush = all([card.suit == cards[0].suit for card in cards])\n\n # Get card value counts\n value_count = {value: values.count(value) for value in values}\n\n # Sort value counts by most occuring\n sorted_value_count = sorted([(count, value) for value, count in value_count.items()],\n reverse=True)\n\n # Get all kinds (e.g. four of a kind, three of a kind, pair)\n kinds = [value_count[0] for value_count in sorted_value_count]\n\n # Get values for kinds\n kind_values = [value_count[1] for value_count in sorted_value_count]\n\n # Royal flush\n if is_straight and is_flush and values[0] == 8:\n return [ROYAL_FLUSH] + [str(value) for value in values]\n # Straight flush\n if is_straight and is_flush:\n return [STRAIGHT_FLUSH] + kind_values\n # Four of a kind\n if kinds[0] == 4:\n return [FOUR_OF_A_KIND] + kind_values\n # Full house\n if kinds[0] == 3 and kinds[1] == 2:\n return [FULL_HOUSE] + kind_values\n # Flush\n if is_flush:\n return [FLUSH] + kind_values\n # Straight\n if is_straight:\n return [STRAIGHT] + kind_values\n # Three of a kind\n if kinds[0] == 3:\n return [THREE_OF_A_KIND] + kind_values\n # Two pair\n if kinds[0] == 2 and kinds[1] == 2:\n return [TWO_PAIR] + kind_values\n # Pair\n if kinds[0] == 2:\n return [PAIR] + kind_values\n # No pair\n return [HIGH_CARD] + kind_values", "def get_card (self, card):\n\t\treturn self._card", "def is_match(self, card):\n\t\treturn self.suit == card.suit or self.value == card.value", "def best_hand(cards):\n\n\tvalues = [card[0:-1] for card in cards]\n\tsuits = [card[-1] for card in cards]\n\n\t# Dictionary for converting card strings to numbers\n\tcardNums = {\"A\":14, \"K\":13, \"Q\":12, \"J\":11, \"10\":10, \"9\":9, \"8\":8, \\\n\t\t\t\"7\":7, \"6\":6, \"5\":5, \"4\":4, \"3\":3, \"2\":2}\n\n\t# Convert card values to real numbers\n\tunsortedValues = [cardNums[value] for value in values]\n\t# unsorted values is necessary for retrieving card + suit\n\t# later\n\tvalues = unsortedValues [:] # make a copy of list\n\tvalues.sort() \t\t# sort values \n\tvalues.reverse()\t# largest # first \n\n\t### Check for possible hands\n\n\n\t# prepare variables for holding potential hands\n\tfourkind = []\n\tflush = [] \t# stores the suit of the flush\n\tstraight = [] \t# stores the highest number of straight \n\tthreekind = [] # stores the best possible 3-of-a-kind \n\tpairs = [] \t# stores one number for each pair\n\n\t# prepare counters for tracking possible hands\n\tstraightCounter = 1 # always have a straight of 1\n\t\n\t# Check for flush\n\tfor suit in suits:\n\t\tif suits.count(suit) >= 5:\n\t\t\tflush = suit\t\n\t\t\tbreak\n\n\t# check for straight, 4-kind, 3-kind, pairs\n\tfor i in range(6): # Don't process the last card\n\n\t\t# Check for straight if still possible\n\t\tif len(straight) == 0:\n\t\t\tprint \"values = \" + str(values)\n\t\t\tstraightSeq = [values.count(values[i]-j) >= 1 for j in range(1,5)]\t\n\t\t\tprint \"straightSeq = \" + str(straightSeq)\n\t\t\tif straightSeq.count(True) == 4:\n\t\t\t\tstraight.append(values[i])\t\n\n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif values[i] == 5:\n\t\t\t\t# check for 4-2-3 first\n\t\t\t\tstraightSeq = [values.count(values[i]-j) >= 1 for j in range(1,4)]\t\n\t\t\t\t# check for Ace\n\t\t\t\tif straightSeq.count(True) == 3 and \\\n\t\t\t\t\tvalues.count(cardNums[\"A\"]) >= 1:\n\t\t\t\t\tstraight.append(values[i])\t\n\n\t\t# Check for 4-kind\n\t\tif len(fourkind) == 0 and values.count(values[i]) == 4:\n\t\t\tfourkind = [values[i]]\n\t\t# Check for 3-kind but don't add same one twice \n\t\telif values.count(values[i]) == 3 and \\\n\t\t\tthreekind.count(values[i]) == 0:\t\n\t\t\tif len(threekind) == 0:\n\t\t\t\tthreekind.append(values[i])\n\t\t\telse: # add to pairs\n\t\t\t\tpairs.append(values[i])\n\t\t# Check for pairs, don't add same pair twice\n\t\telif values.count(values[i]) == 2 and \\\n\t\t\tpairs.count(values[i]) == 0: \n\t\t\tpairs.append(values[i])\n\n\t\n\n\t### Determine hand strength based on found hands\n\t# Since values are separated from suits, have to iterate\n\t# through unsorted values to find correct index of each card\n\n\tbesthand = []\n\n\t# Straight flush\n\tif len(straight) != 0 and len(flush) != 0:\n\t\tfor i in range(5): \n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif i == 4 and straight[0] == cardNums[\"5\"]:\n\t\t\t\tcardIndex = unsortedValues.index(cardNums[\"A\"])\n\t\t\telse:\n\t\t\t\tcardIndex = unsortedValues.index(straight[0] - i)\n\n\t\t\tcard = cards[cardIndex] \n\t\t\tif card[-1] == flush:\n\t\t\t\tbesthand.append(card)\n\t\t\telse:\n\t\t\t\tbreak\n\t\tif len(besthand) == 5:\n\t\t\treturn (besthand, Ranks.StraightFlush)\n\t\telse: # not a straight flush, so re-init besthand\n\t\t\tbesthand = []\n\n\t# Four of a kind\n\tif len(fourkind) != 0:\n\t\tcardValue = convNumToCard(fourkind[0])\n\t\t# insert the 4 out of 5 cards b/c suit is known\n\t\tbesthand = [cardValue + \"S\", cardValue + \"H\", cardValue + \"C\", cardValue + \"D\"]\n\t\t# add the highest value card that isn't 4-of-a-kind\n\t\tfor i in range(7):\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != fourkind[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tbreak\n\t\treturn (besthand, Ranks.FourKind)\n\t# Full House\t\n\telif len(threekind) != 0 and len(pairs) != 0:\n\t\tfor i in range(7): # add 3-kind to besthand\n\t\t\tif unsortedValues[i] == threekind[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 3:\n\t\t\t\t\tbreak\n\t\t\n\t\tfor i in range(7): # add pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.FullHouse)\n\t# Flush\n\telif len(flush) != 0:\n\t\t# iterate through sorted cards, add that card if its\n\t\t# suit matches the flush suit\n\t\tfor i in range(7):\n\t\t\t# find card in original unsorted list\n\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\tcard = cards[cardIndex] \n\t\t\tif card[-1] == flush[0]:\n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.Flush)\n\t# Straight\n\telif len(straight) != 0:\n\n\t\tfor i in range(5): \n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif i == 4 and straight[0] == cardNums[\"5\"]:\n\t\t\t\tcardIndex = unsortedValues.index(cardNums[\"A\"])\n\t\t\telse:\n\t\t\t\tcardIndex = unsortedValues.index(straight[0] - i)\n\t\t\tcard = cards[cardIndex] \n\t\t\tbesthand.append(card)\n\t\treturn (besthand, Ranks.Straight)\n\t# Three of a kind\n\telif len(threekind) != 0:\n\t\tfor i in range(7): # add 3-kind to besthand\n\t\t\tif unsortedValues[i] == threekind[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 3:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add two high cards to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != threekind[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.ThreeKind)\n\t# Two pair\n\telif len(pairs) == 2:\n\t\tfor i in range(7): # add 1st pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 2:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add 2nd pair to besthand\n\t\t\tif unsortedValues[i] == pairs[1]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 4:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add high card to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != pairs[0] and values[i] != pairs[1]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.TwoPair)\n\t# Pair\n\telif len(pairs) == 1:\n\t\tfor i in range(7): # add pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 2:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add high card to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != pairs[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.Pair)\n\t# High card\n\telse:\n\t\tfor i in range(7):\n\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\tcard = cards[cardIndex] \n\t\t\tbesthand.append(card)\n\t\t\tif len(besthand) == 5:\n\t\t\t\treturn (besthand, Ranks.HighCard)", "def evalcards(cardA, cardB, cardC, cardD):\n array = []\n ranks = []\n spadessort = []\n cardsinsuit = 1\n # BASESUIT definitions\n if cardA[-3:] == \"SPA\":\n basesuit = suitspades\n if cardA[-3:] == \"HEA\":\n basesuit = suithearts\n if cardA[-3:] == \"DIA\":\n basesuit = suitdiamonds\n if cardA[-3:] == \"CLB\":\n basesuit = suitclubs\n if cardB in basesuit:\n cardsinsuit += 1\n if cardC in basesuit:\n cardsinsuit += 1\n if cardD in basesuit:\n cardsinsuit += 1\n #BEGIN SORTING CARDS\n cardBBB = cardB\n cardCCC = cardC\n cardDDD = cardD\n if cardB not in basesuit:\n cardBBB = basesuit[12]\n if cardC not in basesuit:\n cardCCC = basesuit[12]\n if cardD not in basesuit:\n cardDDD = basesuit[12]\n array += [str(basesuit.index(cardA))]\n if len(str(basesuit.index(cardA))) == 1:\n del array[0]\n array += [\"0\"+str(basesuit.index(cardA))]\n array += [str(basesuit.index(cardBBB))]\n if len(str(basesuit.index(cardBBB))) == 1:\n del array[1]\n array += [\"0\"+str(basesuit.index(cardBBB))]\n array += [str(basesuit.index(cardCCC))]\n if len(str(basesuit.index(cardCCC))) == 1:\n del array[2]\n array += [\"0\"+str(basesuit.index(cardCCC))]\n array += [str(basesuit.index(cardDDD))]\n if len(str(basesuit.index(cardDDD))) == 1:\n del array[3]\n array += [\"0\"+str(basesuit.index(cardDDD))]\n array.sort()\n for x in range(0,cardsinsuit):\n ranks += [basesuit[int(array[x])]]\n #CHECKING FOR NOT IN SUIT AND FOR SPADES\n if cardB not in basesuit:\n if cardB in spades:\n spadessort += [cardB]\n else:\n ranks += [cardB]\n if cardC not in basesuit:\n if cardC in spades:\n if (cardB in spades) and (spades.index(cardC) < spades.index(cardB)):\n spadessort = listinsert(spadessort, 0, cardC)\n elif (cardB in spades) and (spades.index(cardC) > spades.index(cardB)):\n spadessort += [cardC]\n else:\n spadessort += [cardC]\n else:\n ranks += [cardC]\n if cardD not in basesuit:\n if cardD in spades:\n if (cardB in spades) and (cardC in spades):\n if (spades.index(cardD) < spades.index(cardC)) and (spades.index(cardD) < spades.index(cardB)):\n spadessort = listinsert(spadessort, 0, cardD)\n elif ((spades.index(cardD) < spades.index(cardC)) and (spades.index(cardD) > spades.index(cardB))) or ((spades.index(cardD) > spades.index(cardC)) and (spades.index(cardD) < spades.index(cardB))):\n spadessort = listinsert(spadessort, 1, cardD)\n elif (spades.index(cardD) > spades.index(cardC)) and (spades.index(cardD) > spades.index(cardB)):\n spadessort += [cardD]\n elif (cardB in spades) and (cardC not in spades):\n if spades.index(cardD) < spades.index(cardB):\n spadessort = listinsert(spadessort, 0, cardD)\n if spades.index(cardD) > spades.index(cardB):\n spadessort += [cardD]\n elif (cardB not in spades) and (cardC in spades):\n if spades.index(cardD) < spades.index(cardC):\n spadessort = listinsert(spadessort, 0, cardD)\n if spades.index(cardD) > spades.index(cardC):\n spadessort += [cardD]\n else:\n spadessort += [cardD]\n else:\n ranks += [cardD]\n ranks = spadessort + ranks\n return(ranks)", "def blackjackValue(self):\n NUMBERRANKS = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n FACECARDS = [\"jack\", \"queen\", \"king\"]\n ACE = [\"ace\"]\n if self.rank in NUMBERRANKS:\n return int(self.rank)\n elif self.rank in FACECARDS:\n return 10\n elif self.rank in ACE:\n return 11", "def factory(cls, game, player, play): # RIP n decks ;_;\n if len(play) == 1:\n return SingleCardRound(game, player, play)\n\n if len(play) > 1:\n play.sort()\n\n \"\"\"if number of cards > 1, checks to see if all cards are the same\"\"\"\n for i in range(len(play) - 1):\n if play[i].__eq__(play[i + 1]):\n allSame = True\n else:\n allSame = False\n if (allSame):\n return TupleCardsRound(game, player, play)\n\n \"\"\"if number of cards > 1 && cards not the same, check if play is all consecutive tuples\"\"\"\n tupleTracker = defaultdict(int)\n for j in range(len(play)):\n tupleTracker[play[j]] += 1\n cardValueList = tupleTracker.keys()\n numTuples = cardValueList[0]\n constainsTuples = True\n for cardValue in cardValueList:\n numCopies = cardValueList[tuple]\n if numTuples != numCopies:\n containsTuples = False\n if (containsTuples): # if hand is all tuples, see if they're consecutive\n isConsecutiveTupleCardsRound = True\n numConsecutiveTuples = 0\n for (index, key) in enumerate(cardValueList.sort()):\n if key.suit != keys[index + 1].suit or key.number != keys[index + 1].number - 1:\n isConsecutiveTupleCardsRound = False\n else:\n numConsecutiveTuples += 1\n if (isConsecutiveTupleCardsRound):\n return ConsecutiveTupleCardsRound(game, player, play)\n\n \"\"\"\n else check that the cards are the highest cards in that suit:\n look at the card \"groups\" that are in play and then check to see that there's no group in someone's hand that's higher than the current group\n is there a tuple? how many? if there is, mark this as a tuple with n tuples is there a consectuple? if there is, mark it as a consectuple\n \"\"\"\n \"\"\"making hash table for remainder of deck\"\"\"\n remainder = []\n for player in games.players:\n hand = player.hand\n for card in hand:\n remainder.append(card)\n\n remainderHash = defaultdict(int)\n for k in range(len(remainder)):\n remainderHash[remainder[k]] += 1\n\n \"\"\"below, check cardgroup against cardgroups in the remainderdeck to see if there's any higher cardgroups there\"\"\"\n isTopCardsRound = True\n numTuples = defaultdict(int)\n for card in cardValueList:\n\n \"\"\"this shit is not immediately useful. it just counts the number of tuples\"\"\"\n if tupleTracker[card] > 1:\n numTuples[tupleTracker[card]] += 1 #numTuples is actually hash table for init play\n\n for remainderCard in remainder:\n if card.suit != remainderCard.suit:\n continue\n if card.number < remainderCard.number:\n if tupleTracker[card] <= remainderHash[remainderCard]:\n isTopCardsRound = False # if any cardgroup is higher, then topGame is false\n\n \"\"\"by now this is definitely a topcardround, now to see if there's any tuples and how many tuples up in here\"\"\"\n if isTopCardsRound:\n if numTuples:\n isTopConsecutiveTuplesRound = True\n for (index, card) in enumerate(cardValueList.sort()):\n if card.suit != cardValueList[index + 1].suit or card.number != cardValueList[index + 1].number - 1: # problem\n isTopConsecutiveTupleCardsRound = False\n if (isTopConsecutiveTupleCardsRound):\n return TopConsecutiveTupleCardsRound(game, player, play)\n else:\n return TopCardsRound(game, player, play)\n\n \"\"\"if gets to here without returning something, this is a failed play\"\"\"\n raise ValueError('Cannot play illegal play to round')", "def card(n):\r\n assert type(n) == int and n > 0 and n <= 13, \"Bad card n\"\r\n specials = {1: 'A', 11: 'J', 12: 'Q', 13: 'K'}\r\n return specials.get(n, str(n))", "def hand_rank(hand):\n ranks = card_ranks(hand)\n if straight(ranks) and flush(hand):\n return (8, max(ranks))\n elif kind(4, ranks):\n return (7, kind(4, ranks), kind(1, ranks))\n elif kind(3, ranks) and kind(2, ranks):\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand):\n return (5, ranks)\n elif straight(ranks):\n return (4, max(ranks))\n elif kind(3, ranks):\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks):\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks):\n return (1, kind(2, ranks), ranks)\n else:\n return (0, ranks)", "def score_on_hands(cards_on_hand):\r\n score = 0\r\n straightCount = 0\r\n max_card = 0\r\n suite_dict = {}\r\n face_dict = {}\r\n transfer_dict = {'A':1,'J':11,'Q':12,'K':13}\r\n card_face = []\r\n '''Circulate the player's hand, build a list of points and a suit dict'''\r\n for index in range(len(cards_on_hand)):\r\n if str(cards_on_hand[index])[1] in transfer_dict:\r\n card_face.append(transfer_dict.get(str(cards_on_hand[index])[1]))\r\n elif str(cards_on_hand[index])[1] == '1':\r\n card_face.append(10)\r\n else:\r\n card_face.append(int(str(cards_on_hand[index])[1]))\r\n suite_dict[str(cards_on_hand[index])[0]] = 1\r\n '''Because 1 can be treated as 1 or 14, so if 1 exists, add 14 to the end of the list to calculate flush'''\r\n if 1 in card_face:\r\n card_face.append(14)\r\n\r\n '''Check straight, if it is straight, straight should be 4'''\r\n for face in range(len(card_face)-1):\r\n if card_face[face] +1 == card_face[face+1] :\r\n straightCount +=1\r\n\r\n '''Detect the number of cards of the same number'''\r\n for face in card_face:\r\n\r\n if face not in face_dict:\r\n face_dict[face] = 1\r\n else:\r\n face_dict[face] += 1\r\n\r\n '''Store the maximum number of points'''\r\n max_card = card_face[len(card_face)-1]\r\n\r\n '''Calculate player score'''\r\n if straightCount == 4:\r\n score+= 8\r\n\r\n if len(suite_dict) == 1:\r\n score+= 9\r\n\r\n for values in face_dict.values():\r\n if values == 2:\r\n score += 3\r\n elif values == 3:\r\n score += 7\r\n elif values == 4:\r\n score += 11\r\n\r\n return (score, max_card)", "def __str__(self):\n #Create dictionary for face cards\n translate = {11:'Jack', 12:'Queen', 13:'King', 14: 'Ace'}\n r = self._rank\n #check for face card\n if r in [11, 12, 13, 14]:\n myrank = translate[r]\n else:\n myrank = str(r)\n return myrank + \" of \" + self._suit", "def best_card(cards, trump=None, lead=None):\n\tval_map = {}\n\tfor c in cards:\n\t\tval = VALUE_MAP[c[0]]\n\t\tif lead == c[1]:\n\t\t\tval *= 10\n\t\tif trump == c[1]:\n\t\t\tval *= 100\n\t\t\tif c[0] == 'J':\n\t\t\t\tval = val*10 + 5\n\t\tif trump == same_color(c[1]) and c[0] == 'J':\n\t\t\tval = val*1000 + 3\n\n\t\tval_map[c] = val\n\n\treturn sorted(val_map.items(), key=lambda x: x[1], reverse=True)[0][0]", "def play_card(self, rnd: PlayerRound) -> int:\n # we can check if we are playing the correct game\n assert rnd.jass_type == JASS_HEARTS\n\n # get the valid cards to play\n valid_cards = rnd.get_valid_cards()\n\n # lets divide our cards into heart and other cards\n my_heart_cards = valid_cards * color_masks[HEARTS, :]\n my_other_cards = valid_cards - my_heart_cards\n\n if rnd.nr_cards_in_trick == 0:\n # we are the first player, so we can select what to play\n # lets select some random non-heart card if we have any (not that this is necessarily\n # a good strategy :-)\n if my_other_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_other_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n else:\n # if we have to give a card, lets try to give a heart card\n if my_heart_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_heart_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n\n self._logger.debug('Played card: {}'.format(card_strings[card]))\n return card", "def get_card(self, name):\n for card in self.cards:\n if card.name == name:\n return card\n\n return None", "def get_card(self, card):\n\n\t\tself.add_card_to_grps(card)\n\n\t\tself.grps = sorted(self.grps, key = lambda x: -len(x))\n\n\n\t\t# check if # of cards forming sets is more than 5; if yes, then break the set to allow computer to form runs\n\t\tnum_set_cards = 0\n\t\tpos = -1\n\t\tfor i in range(len(self.grps)):\n\t\t\tif len(self.grps[i]) > 1 and self.grps[i][0] == self.grps[i][1]:\n\t\t\t\tnum_set_cards += len(self.grps[i])\n\t\t\t\tpos = i\n\n\t\tif num_set_cards > 5:\n\t\t\tcard = self.grps[pos][-1]\n\t\t\tself.grps[pos].remove(card)\n\t\t\tlogger.info(f\"In computer.py/get_card: computer returned {card} to break too many set, computer = {self}\")\n\t\t\treturn card\n\n\n\t\t# if # of sets is fine, then remove a card from the group with least size\n\t\tcard = self.grps[-1][-1]\n\n\t\t\n\t\tif len(self.grps[-1]) == 1:\n\t\t\tself.grps.remove(self.grps[-1])\n\t\telse:\n\t\t\tself.grps[-1].remove(self.grps[-1][-1])\n\n\t\tlogger.info(f\"In computer.py/get_card: computer returned {card}, computer = {self}\")\n\n\t\treturn card", "def simulate(deck): \n \n # Initialize Banker and Player\n # player_third_card is initialized to -10 to signify that it doesn't exist.\n banker = 0\n player = 0\n player_third_card = -10\n \n# Deal out two hands of two cards\n player = (player + deck.pop()) % 10\n player = (player + deck.pop()) % 10\n \n banker = (banker + deck.pop()) % 10\n banker = (banker + deck.pop()) % 10\n \n# Check for natural\n if player >= 8 and banker >= 8:\n return 'tie'\n elif banker >= 8:\n return 'banker'\n elif player >= 8:\n return 'player'\n \n\n# Run through Player hand\n if player <= 5:\n player_third_card = deck.pop()\n player = (player + player_third_card) % 10\n \n\n# Run through Banker hand\n if player_third_card == -10 and banker < 6:\n banker = (banker + deck.pop()) % 10\n elif banker <= 2:\n banker = (banker + deck.pop()) % 10\n elif banker == 3 and player_third_card != 8:\n banker = (banker + deck.pop()) % 10\n elif banker == 4 and player_third_card >= 2 and player_third_card <=7:\n banker = (banker + deck.pop()) % 10\n elif banker == 5 and player_third_card >= 4 and player_third_card <=7:\n banker = (banker + deck.pop()) % 10\n elif banker == 6 and (player_third_card == 6 or player_third_card == 7):\n banker = (banker + deck.pop()) % 10\n \n \n# Compare hands and return results\n if player > banker:\n return 'player'\n elif banker > player:\n return 'banker'\n else:\n return 'tie'", "def compare_cards(self, guess):\n \n \"\"\"\n Compares cards to determine higher_lower, \n compares result with guess\n Args: \n self: : An instance of Dealer.\n self.card_1: int\n self.card_2: int\n guess: bool\n \"\"\"\n card_str_1 = self.get_card_str(self.card_1)\n card_str_2 = self.get_card_str(self.card_2)\n if guess: \n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score += 100\n if not guess:\n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score += 100\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score -= 75", "def make_card_wish(self, symbol, player):\n if player == self.current_player:\n if symbol in \"s c h d\":\n self.wait_for_card_wish = False\n self.card_wished = symbol\n self.choose_next_player()\n return True\n return False", "def get_card_str(self, card):\n card_str = str(card)\n if card == 11:\n card_str = \"Jack\"\n if card == 12:\n card_str = \"Queen\"\n if card == 13:\n card_str = \"King\"\n if card == 1:\n card_str = \"Ace\"\n \n return card_str", "def __init__(self, rank=\"\", suit=\"\"):\n self.suit = suit\n self.rank = rank\n self.face_up = False", "def card_value (card):\r\n value = card[0]\r\n if value in ['Jack','Queen','King']:\r\n return 10\r\n if value in [2,3,4,5,6,7,8,9,10]:\r\n return value\r\n else:\r\n raise 'CardValueError'", "def guess(card1: dict, card2: dict) -> bool:\n print(f\"The current card is {card1['rank']} of {card1['suit']}\")\n selection = str(input('Will the next card be higher h or lower l?: '))\n if selection == 'h':\n return compare(card1, card2) < 0\n elif selection == 'l':\n return compare(card1, card2) > 0\n else:\n print(\"Type h or l\")\n return False", "def _compare(self, other): \n if(self.rank==other.rank):\n if (self.rank == 5 and other.rank==5) or (self.rank ==3 and other.rank==3):\n maxself = max(self.ranks) \n maxother = max(other.ranks)\n if(maxself>maxother):\n if(Card.ranks[maxself]=='Ace' or Card.ranks[maxother] == 'Ace'):\n maxself1 = sorted(self.ranks,reverse=True)\n maxself1 = maxself1[1]\n maxother1 = sorted(other.ranks,reverse=True)\n maxother1 = maxother1[1]\n if(maxself1>maxother1):\n return 1\n else:\n return 0\n else:\n if(Card.ranks[maxself]=='Ace' or Card.ranks[maxother] == 'Ace'):\n maxself1 = sorted(self.ranks,reverse=True)\n maxself1 = maxself1[1]\n maxother1 = sorted(other.ranks,reverse=True)\n maxother1 = maxother1[1]\n if(maxself1<maxother1):\n return -1\n else:\n return 0\n \n if (self.rank == 4 and other.rank==4):\n maxself = max(self.ranks) \n maxother = max(other.ranks)\n if(maxself>maxother):\n return 1\n else:\n return -1\n if (self.rank ==2 and other.rank==2) or (self.rank ==0 and other.rank==0):\n newself = sorted(self.ranks,reverse=True)\n newother = sorted(other.ranks,reverse=True)\n maxsel = max(newself)\n maxoth = max(newother)\n if(maxsel>maxoth):\n return 1\n elif(maxsel<maxoth):\n return -1\n else:\n maxsel1= newself[1]\n maxoth1 = newother[1]\n if(maxsel1>maxoth1):\n return 1\n elif(maxsel1<maxoth1):\n return -1\n else:\n maxsel2= newself[2]\n maxoth2 = newother[2]\n if(maxsel2>maxoth2):\n return 1\n elif(maxsel2<maxoth2):\n return -1\n else:\n return 0\n if self.rank ==1 and other.rank==1:\n pairwali1 = {}\n pairwali2={}\n for i in range(0,3):\n if other.ranks[i] not in pairwali1:\n pairwali1[other.ranks[i]] = 1\n else:\n pairwali1[other.ranks[i]]= pairwali1[other.ranks[i]]+1\n if self.ranks[i] not in pairwali2:\n pairwali2[self.ranks[i]] = 1\n else:\n pairwali2[self.ranks[i]] = pairwali2[self.ranks[i]]+1\n t = list(pairwali1.keys())[list(pairwali1.values()).index(2)]\n r = list(pairwali2.keys())[list(pairwali2.values()).index(2)]\n if t!=r:\n if t>r:\n return -1\n elif t<r:\n return 1\n elif t==r:\n t= list(pairwali1.keys())[list(pairwali1.values()).index(1)]\n r = list(pairwali2.keys())[list(pairwali2.values()).index(1)]\n if t>r:\n return -1\n elif t<r:\n return 1\n else:\n return 0\n\n else:\n if(self.rank>other.rank):\n return 1\n else:\n return -1", "def is_one_rank_apart(card1, card2):\n def card_value(card):\n return 'A23456789TJQK'.index(solvers.deck.card_rank(card))\n\n pos1, pos2 = card_value(card1), card_value(card2)\n diff = abs(pos1 - pos2)\n return diff in (1, 12)", "def score(cards):\n \n values = sorted(map(lambda x: x[0], cards))\n\n if same_suit(cards) and values[0] == 10 and values[4] == 14: # royal flush\n return (10, 14, 0) \n\n if same_suit(cards) and values[4] - values[0] == 4 and len(set(values)) == 5: # straigh flush\n return (9, values[4], 0)\n\n if len(set(values)) == 2 and values[1] == values[3]: # four of a kind\n if values[0] != values[1]:\n high_card = values[0]\n else: high_card = values[4]\n return (8, values[2], high_card)\n\n if len(set(values)) == 2 and values[1] != values[3]: # full house\n return (7, values[2], 0)\n\n if same_suit(cards): # flush\n return (6, values[4], 0)\n\n if values[4] - values[0] == 4 and len(set(values)) == 5: # straight\n return (5, values[4], 0)\n\n if len(set(values)) == 3: # three of a kind or two pair\n # three of a kind\n if values[0] == values[2]:\n return (4, values[0], max(values[3:5]))\n if values[1] == values[3]:\n return (4, values[1], max(values[0], values[4]))\n if values[2] == values[4]: \n return (4, values[2], max(values[0:2]))\n else: # two pair\n return (3, max(values[1], values[3]), dict((values.count(i), i) for i in values)[1])\n\n if len(set(values)) == 4: # one pair\n high_value_card = dict((values.count(i), i) for i in values)[2]\n s = set(values)\n s.remove(high_value_card)\n return (2, high_value_card, max(s))\n\n return (1, values[4], 0)", "def next_play(self):\n\t\tfor card in self.hand:\n\t\t\tif is_valid(card):\n\t\t\t\tself.play_card(card)\n\t\t\t\treturn card\n\t\tglobal forced_rank\n\t\tif forced_rank == \"2\":\n\t\t\tglobal two_multiplier\n\t\t\tself.draw(two_multiplier)\n\t\t\tprint(f\"{self.name} draws {str(two_multiplier)} cards.\")\n\t\t\ttwo_multiplier = 0\n\t\t\tforced_rank = False\n\t\t\treturn None\n\t\tcard = self.draw(1)[0]\n\t\tprint(self.name + \" draws a card.\")\n\t\tif is_valid(card):\n\t\t\tself.play_card(card)\n\t\t\treturn card\n\t\tprint(self.name + \" passes the turn.\")", "def card(self):\n return self.cdb.name_to_card[self.card_name]", "def from_int(cls, rank: int):\n r = Card.ranks[rank % len(Card.ranks)]\n s = Card.suits[rank // len(Card.ranks)]\n return cls(r, s)", "def CARD_VALUES() -> dict:\n return {\"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"6\": 6, \"7\": 7, \"8\": 8, \"9\": 9,\n \"10\": 10, \"Jack\": 10, \"Queen\": 10, \"King\": 10, \"Ace\": 11}", "def choose_card(playable_cards):\r\n\r\n playing = playable_cards[0]\r\n print('\\n choosing \\n', playing)\r\n\r\n return playing # for now\r", "def take_comp_turn(self, deck, pile):\n matches = [card for card in self.hand if card.is_match(pile.top_card() != 0)]\n if len(matches) > 0: # can play\n choice = random.randrange(len(matches))\n self.play_card(matches[choice-1], pile)\n if matches[choice - 1].kind == 'wild' or matches[choice - 1].kind == 'wild4':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n matches[choice - 1].color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n print(str(self.name) + \" played \" + str(matches[choice-1]))\n\n else: # comp can't play\n # check if deck is empty -- if so, reset it\n if deck.is_empty():\n deck.reset_deck(pile)\n # draw a new card from the deck\n newcard = self.draw_card(deck)\n print(\"The computer drew: \" + str(newcard))\n if newcard.is_match(pile.top_card()): # can be played\n self.play_card(newcard, pile)\n if newcard.kind == 'wild':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n newcard.color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n else: # still can't play\n print(\"Sorry, you still can't play.\")\n print(str(self.name) + \" played \" + str(newcard))\n return", "def comp10001go_score_group(cards):\n \n # Put int a dictionary for each card which is scored based on its value\n # For example, J is 11, Q is 12 and K is 13, Ace is 20\n \n values = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, \n '0': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 20}\n \n # Spades and Clubs are black, Hearts and Diamonds are red\n suits = {'S': True, 'C': True, 'H': False, 'D': False}\n \n # First, find if the group is a valid N-of-a-kind (i.e there are 2 or more\n # cards of the same non_Ace value), the score is that value multiplied \n # by N facorial\n \n # Check if the group is valid for N-of-a-kind\n if validate_n_of_kind(cards, values) is True:\n # Calculate the score for the group with valid N-of-a-kind\n n = len(cards)\n card_value = values[cards[0][0]]\n score = card_value * factorial(n)\n return score\n \n # Check if the group is a valid run\n valid_run = validate_run(cards, values, suits)\n if valid_run[0] is True:\n sort_card = valid_run[1]\n score = 0\n for card in sort_card:\n score += card[0]\n return score\n \n # If the group is a singleton card or doesn't form a valid N-of-a-kind or \n # run, it should be scored as the negative sum of the scores of the \n # individual cards (scoring Aces as 20)\n else:\n if len(cards) == 1:\n return 1\n else: \n sort_card = []\n for card_num in range(len(cards)):\n value_card = values[cards[card_num][0]]\n suit_card = suits[cards[card_num][1]]\n sort_card.append((value_card, suit_card))\n \n score = 0\n for card in sort_card:\n score += (-card[0])\n return score", "def differentiate_cards(card):\n\t\tdef High_Card(numbers,colors):\n\t\t\treturn len(set(numbers)) == 5\n\t\tdef One_Pair(numbers,colors):\n\t\t\treturn len(set(numbers)) == 4\n\t\tdef Two_Pairs(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\treturn [numbers.count(i) for i in numbers].count(2) == 4\n\t\tdef Three_of_a_Kind(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\tfor i in numbers:\n\t\t\t\tif numbers.count(i) == 3:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight(numbers,colors):\n\t\t\tfor i in xrange(1,len(numbers)):\n\t\t\t\tif numbers[i] - numbers[i-1] != 1:\n\t\t\t\t\treturn False\n\t\t\treturn True\n\t\tdef Flush(numbers,colors):\n\t\t\treturn len(set(colors)) == 1\n\t\tdef Full_House(numbers,colors):\n\t\t\tnumbers_set = set(numbers)\n\t\t\tif len(numbers_set) != 2:\n\t\t\t\treturn False\n\t\t\ta = numbers[0]\n\t\t\tb= [x for x in numbers if x != a][0]\n\t\t\treturn (numbers.count(a) == 2 and numbers.count(b) == 3) or\\\n\t\t\t\t(numbers.count(a) == 3 and numbers.count(b) == 2)\n\t\tdef Four_of_a_Kind(numbers,colors):\n\t\t\tfor i in set(numbers):\n\t\t\t\tif numbers.count(i) == 4:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight_Flush(numbers,colors):\n\t\t\treturn Straight(numbers,colors) and Flush(numbers,colors)\n\t\tdef Royal_Flush(numbers,colors):\n\t\t\tRoyal = [10,11,12,13,14]\n\t\t\treturn numbers == Royal and Flush(numbers,colors)\n\n\t\tcards = {'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,\n\t\t 'T':10,'t':10,'J':11,'j':11,'Q':12,'q':12,'K':13,'k':13,'A':14,'a':14}\n\t\tnumbers = [cards[i[0]] for i in card]\n\t\tnumbers.sort()\n\t\tcolors = [i[1] for i in card]\n\t\t\n\t\tif Royal_Flush(numbers,colors):return 9\n\t\telif Straight_Flush(numbers,colors):return 8\n\t\telif Four_of_a_Kind(numbers,colors):return 7\n\t\telif Full_House(numbers,colors):return 6\n\t\telif Flush(numbers,colors):return 5\n\t\telif Straight(numbers,colors):return 4\n\t\telif Three_of_a_Kind(numbers,colors):return 3\n\t\telif Two_Pairs(numbers,colors):return 2\n\t\telif One_Pair(numbers,colors):return 1\n\t\telif High_Card(numbers,colors):return 0", "def find_card(self, query):\n\t\tresults = self._find_matches(query, 0.5)\n\n\t\tif len(results) > 0:\n\t\t\tresults.sort(key=lambda result: result[1], reverse=True)\n\t\t\treturn results[0][0]\n\t\telse:\n\t\t\treturn None", "def blackjack_result(cards):\n sum = 0\n a_cards = 0\n dictionary = {\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n 'T': 10,\n 'J': 10,\n 'Q': 10,\n 'K': 10,\n }\n for card in cards.split():\n if card in dictionary:\n sum = sum + dictionary[card]\n elif card == 'A':\n a_cards = a_cards + 1\n\n if a_cards > 0:\n for i in range(a_cards):\n if a_cards > 1:\n sum = sum + 1\n a_cards = a_cards - 1\n else:\n if sum + 11 < 22:\n sum = sum + 11\n else:\n sum = sum + 1\n\n return sum", "def get_card(self, name):\n for list in self.my_lists:\n for card in list.list_cards(card_filter='all'):\n if name in card.name:\n return card\n return 'None'", "def score_tuple( hand ):\n m = matches(hand)\n #print( m )\n #royal_flush -- a special case of straight flush.\n if flush(hand) and straight(hand) and hand[4].rank == 14:\n return (8, hand[4].rank, 0)\n #straight_flush\n elif flush(hand) and straight(hand):\n return (8, hand[4].rank, 0)\n #four_of_a_kind\n elif len(m) == 2 and m[0].count == 4:\n return (7, m[0].card.rank, 0)\n #full_house\n elif len(m) == 2 and m[0].count == 3 and m[1].count == 2:\n return (6, m[0].card.rank, m[1].card.rank)\n #flush\n elif flush(hand):\n return (5, hand[4].rank, 0)\n #straight\n elif straight(hand):\n return (4, hand[4].rank, 0)\n #three_of_a_kind\n elif len(m) == 3 and m[0].count == 3:\n return (3, m[0].card.rank, 0)\n #two_pair\n elif len(m) == 3 and m[0].count == 2 and m[1].count == 2:\n return (2, m[0].card.rank, m[1].card.rank)\n #one_pair\n elif len(m) == 4 and m[0].count == 2 and m[1].count == 1:\n return (1, m[0].card.rank, m[1].card.rank)\n # Simple high card. Is this adequate? We'll know if we get ties.\n else:\n return (0, hand[4].rank, 0) # or (0, m[0].card.rank, 0)", "def get_hand1_wins(p1_hand, p1_rank, p1_rank_value, p2_hand, p2_rank, p2_rank_value):\n if HAND_RANKS.index(p1_rank) > HAND_RANKS.index(p2_rank):\n return 1\n elif HAND_RANKS.index(p1_rank) < HAND_RANKS.index(p2_rank):\n return 0\n\n # Ranks are equal\n if p1_rank_value > p2_rank_value:\n return 1\n elif p1_rank_value < p2_rank_value:\n return 0\n\n # Ranks and rank values are equal, go by highest card until one hand wins\n for i in range(0, 5):\n val1 = VALUES[p1_hand[i][0]]\n val2 = VALUES[p2_hand[i][0]]\n if val1 > val2:\n return 1\n elif val1 < val2:\n return 0\n\n print \"WTF\"\n return 0", "def random_card(computer_hand):\n \n if len(computer_hand) != 1:\n random_index = random.randint(0,len(computer_hand)-1)\n else:\n random_index = 0\n card_to_play = computer_hand[random_index]\n print('computer hand: ', computer_hand)\n print('computer plays ', card_to_play)\n return computer_hand[random_index]", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def dealer_matching(self):\n if len([card for card in self.dealer_hand if card[1] == '8']) > 0:\n self.discard_pile = [card for card in self.dealer_hand if card[1] == '8'][0]\n self.dealer_hand.remove(self.discard_pile)\n dealer_suits = [card[0] for card in self.dealer_hand]\n self.new_suit = max(set(dealer_suits), key=dealer_suits.count)\n print(\"\\nNew suit is :\", self.new_suit)\n return 1\n if self.new_suit != '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.new_suit:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n self.new_suit = ''\n return 1\n else:\n return 0\n if self.new_suit == '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.discard_pile[0] or card[1] == self.discard_pile[1]:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n return 1\n else:\n return 0", "def _get_card(self, name: str) -> Dict:", "def __int__(self):\n return Card.ranks.index(self.rank) + Card.suits.index(self.suit) * len(Card.ranks)", "def find(self, suitList, rank):\r\n flag = False\r\n \r\n #first index of the element of suitNode\r\n cursor = suitList\r\n \r\n #loop the through the list found in suitNode, ex) loop through list of H.\r\n while cursor != None:\r\n \r\n #if rank is found in the list, break out of loop\r\n if cursor.data == rank:\r\n flag = True\r\n break\r\n\r\n cursor = cursor._next\r\n\r\n return flag", "def card_key(self):\n card_rank = self.rank\n if card_rank > 9:\n card_rank = Card.rank_short[card_rank]\n card_image_name = str(card_rank) + Card.suits_short[self.suit]\n return str(card_image_name)", "def ascii_version_of_card(*cards):\n\n # we will use this to prints the appropriate icons for each card\n name_to_symbol = {\n 'Spades': '♠',\n 'Diamonds': '♦',\n 'Hearts': '♥',\n 'Clubs': '♣',\n }\n\n def card_to_string(card):\n # 10 is the only card with a 2-char rank abbreviation\n rank = card.rank if card.rank == '10' else card.rank[0]\n\n # add the individual card on a line by line basis\n return CARD.format(rank=rank, suit=name_to_symbol[card.suit])\n\n\n return join_lines(map(card_to_string, cards))", "def hand_points(hand):\n points = [[]]\n branch = 1\n for card in hand:\n if not card[\"is_hidden\"]:\n if card[\"value\"].isnumeric():\n for possibility in range(branch):\n points[possibility].append(int(card[\"value\"]))\n elif card[\"value\"] == \"A\":\n for possibility in range(branch):\n # Ace is 1 or 11. Creating the two possibility\n points.append(points[possibility] + [11]) \n points[possibility].append(1)\n branch += 1\n else:\n # Left are the face value of 10\n for possibility in range(branch):\n points[possibility].append(10)\n\n score = list(zip([sum(branch) for branch in points], points))\n score.sort(key=lambda x: x[0], reverse=True)\n\n for total, points in score:\n if total == 21 and len(hand) == 2:\n return total, \"BlackJack!\"\n if total <= 21:\n if 1 in points and 11 in points:\n return total, None\n if 1 in points: \n return total, \"Soft\"\n if 11 in points:\n return total, \"Hard\"\n else:\n return total, None\n\n # If you get there, you have lost or you had empty hand \n # or all card in hand was hiddien\n if score:\n return score[-1][0], None\n else:\n return 0, None", "def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card", "def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit", "def get_card(self):\n return self.card", "def score_int( hand ):\n m = matches(hand)\n #print( m )\n #royal_flush -- a special case of straight flush.\n if flush(hand) and straight(hand) and hand[4].rank == 14:\n return 80000 + 100*order(hand[4])\n #straight_flush\n elif flush(hand) and straight(hand):\n return 80000 + 100*order(hand[4])\n #four_of_a_kind\n elif len(m) == 2 and m[0].count == 4:\n return 70000 + 100*order(m[0].card)\n #full_house\n elif len(m) == 2 and m[0].count == 3 and m[1].count == 2:\n return 60000 + 100*order(m[0].card) + order(m[1].card)\n #flush\n elif flush(hand):\n return 50000 + 100*order(hand[4])\n #straight\n elif straight(hand):\n return 40000 + 100*order(hand[4])\n #three_of_a_kind\n elif len(m) == 3 and m[0].count == 3:\n return 30000 + 100*order(m[0].card)\n #two_pair\n elif len(m) == 3 and m[0].count == 2 and m[1].count == 2:\n return 20000 + 100*order(m[0].card) + order(m[1].card)\n #one_pair\n elif len(m) == 4 and m[0].count == 2 and m[1].count == 1:\n return 10000 + 100*order(m[0].card) + order(m[1].card)\n # Simple high card. Is this adequate? We'll know if we get ties.\n else:\n return 100*order(hand[4]) # or 100*order(m[0].card)", "def card(bot, update):\n query = update.callback_query\n user = query.from_user\n chat_id = query.message.chat_id\n selected_card = query.data\n\n if (chats[chat_id].player1.card_played == []) and (chats[chat_id].player2.card_played == []):\n bot.send_message(text=Strings.CARD_SELECTED.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN,\n isgroup=True)\n if chats[chat_id].player1.user == user:\n chats[chat_id].player1.card_played = chats[chat_id].player1.hand[int(selected_card)]\n chats[chat_id].player1.hand.remove(chats[chat_id].player1.hand[int(selected_card)])\n\n elif chats[chat_id].player2.user == user:\n chats[chat_id].player2.card_played = chats[chat_id].player2.hand[int(selected_card)]\n chats[chat_id].player2.hand.remove(chats[chat_id].player2.hand[int(selected_card)])\n return CARD\n\n else:\n if chats[chat_id].player1.user == user and chats[chat_id].player1.card_played != []:\n bot.send_message(text=Strings.CARD_SELECTED2.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return CARD\n elif chats[chat_id].player2.user == user and chats[chat_id].player2.card_played != []:\n bot.send_message(text=Strings.CARD_SELECTED2.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return CARD\n else:\n if chats[chat_id].player1.user == user:\n chats[chat_id].player1.card_played = chats[chat_id].player1.hand[int(selected_card)]\n chats[chat_id].player1.hand.remove(chats[chat_id].player1.hand[int(selected_card)])\n\n elif chats[chat_id].player2.user == user:\n chats[chat_id].player2.card_played = chats[chat_id].player2.hand[int(selected_card)]\n chats[chat_id].player2.hand.remove(chats[chat_id].player2.hand[int(selected_card)])\n\n bot.edit_message_text(text=Strings.CARD_SELECTED.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN)\n bot.send_message(chat_id,\n Strings.SELECTION_COMPLETED,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n\n reply_markup = ReplyKeyboardMarkup(c_b_keyboard, selective=False)\n bot.send_message(chat_id,\n Strings.QUESTION,\n reply_markup=reply_markup,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return BET_CHECK", "def __init__(self, rank, suit):\n self._rank = rank\n self._suit = suit", "def get_card(self, slot):\n return self._starting_card[slot]", "def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()", "def get_ratecard(self, account_id, batch=False):\n path = 'act_%s/ratecard' % account_id\n return self.make_request(path, 'GET', batch=batch)", "def __init__(self, suit_rank):\n self.suit = suit_rank[0]\n self.rank = suit_rank[1]\n self.name, self.values = self._translate_card()\n self.image_path = \"\"\n self.image_obj = None", "def make_card(cls, suit, pip):\n return Card(suit, pip)", "def has_rank(self, rank):\n for card in self.cards:\n if card.rank == rank:\n return True\n return False", "def winner(self):\n\n assert self.is_terminal()\n\n # The player who fold will lose\n if self.status[0] == FOLDED:\n return 1\n if self.status[1] == FOLDED:\n return 0\n\n # Campare the hands\n card_type_0 = CardType([*self.hand[0], *self.pub])\n card_type_1 = CardType([*self.hand[1], *self.pub])\n\n def campare(a, b, *keys):\n if keys[0](a) > keys[0](b):\n return 0\n elif keys[0](a) < keys[0](b):\n return 1\n else:\n if len(keys) == 1:\n return -1\n else:\n return campare(a, b, *keys[1:])\n\n return campare(card_type_0, card_type_1, lambda x: x.type,\n *[lambda x: x.key[i] for i in range(len(card_type_0.key))])", "def compare_cards(card1, card2, deck, assigned_card_value):\n if card1 not in deck: \n raise ValueError(\"The card doesn't exist\")\n if card2 not in deck: \n raise ValueError(\"The card doesn't exist\")\n card1 = assigned_card_value.get(card1)\n card2 = assigned_card_value.get(card2)\n if card1 < card2:\n return 1\n elif card1 > card2:\n return 0\n elif card1 == card2:\n return -1", "def score_hands():\n #\"http://projecteuler.net/project/poker.txt\"\n poker_txt= urllib.request.urlopen( \"file:poker.txt\" ).read().decode(\"ASCII\")\n outcome= collections.Counter()\n for line in poker_txt.splitlines():\n if not line: continue\n cards= line.split()\n assert len(cards) == 10\n h_1 = to_hand(cards[:5])\n h_2 = to_hand(cards[5:])\n s_1= score_tuple(h_1)\n s_2= score_tuple(h_2)\n assert s_1 != s_2, \"Problem scoring {0!r} {1!r}\".format(h_1,h_2)\n winner= 1 if s_1 > s_2 else 2\n # The most obscure case:\n # if s_1[:2] == s_2[:2]:\n # print( \"Close\", cards[:5], s_1, cards[5:], s_2, winner )\n outcome[winner] += 1\n # Paranoid double check on two scoring systems.\n if score_int(h_1) > score_int(h_2) if winner == 1 else score_int(h_1) < score_int(h_2):\n pass\n else:\n print( \"{!r} {!r} Player {:d}\".format(cards[:5],cards[5:],winner) )\n print( s_1, \":\", s_2 )\n print( score_int(h_1), score_int(h_2) )\n raise AssertionError( \"Logic Problem\" )\n return outcome", "def get_suit(self):\r\n return self.suit", "def deal_cards(deck, card): \n player = deck[card]\n return player", "def deal_two_cards_to_each(deck):\n hands = [deck[0:2] + [sorted([deck[0][0], deck[1][0]], reverse=True)] + [deck[0][1] == deck[1][1]],\n deck[2:4] + [sorted([deck[2][0], deck[3][0]], reverse=True)] + [deck[2][1] == deck[3][1]],\n deck[4:6] + [sorted([deck[4][0], deck[5][0]], reverse=True)] + [deck[4][1] == deck[5][1]],\n deck[6:8] + [sorted([deck[6][0], deck[7][0]], reverse=True)] + [deck[6][1] == deck[7][1]],\n deck[8:10] + [sorted([deck[8][0], deck[9][0]], reverse=True)] + [deck[8][1] == deck[9][1]],\n deck[10:12] + [sorted([deck[10][0], deck[11][0]], reverse=True)] + [deck[10][1] == deck[11][1]]]\n deck = deck[12:]\n return hands, deck", "def test_seven_cards_poker(self):\n self.assertEqual(best_hand(\"6C 7C 8C 9C TC 5C JS\".split()),\n ('6C', '7C', '8C', '9C', 'TC'))\n self.assertEqual(best_hand(\"TD TC TH 7C 7D 8C 8S\".split()),\n ('TD', 'TC', 'TH', '8C', '8S'))\n self.assertEqual(best_hand(\"JD TC TH 7C 7D 7S 7H\".split()),\n ('JD', '7C', '7D', '7S', '7H'))", "def card_to_string(card):\n rankStrings = [\"ace\",\"two\",\"three\",\"four\",\"five\",\"six\",\"seven\",\n \"eight\",\"nine\",\"ten\",\"jack\",\"queen\",\"king\"]\n suitStrings = [\"spades\", \"hearts\", \"diamonds\",\"clubs\"]\n return rankStrings[card[0]] + \" of \" + suitStrings[card[1]]", "def play_card(self, rnd: PlayerRound) -> int:\n # play random\n\n # get the valid cards to play\n valid_cards = rnd.get_valid_cards()\n\n # select a random card\n return np.random.choice(np.flatnonzero(valid_cards))", "def test_suit(self):\n card = self._card\n self.assertEqual(card.suit, self._suit)", "def get_card_val(card):\n\n if card == '1':\n return 1\n if card == '2':\n return 2\n if card == '3':\n return 3\n else:\n return 4", "def get_card(self):\n\n card = random.randint(1,13)\n return card", "def get_card(name, reda):\n connection = pymongo.MongoClient(MONGO_URL)\n db = connection[DB]\n\n dbcard = db.cards.find_one({'name': name, 'redaction': reda})\n return tocard(dbcard) if dbcard is not None else None", "def play_card(self, player_index, card_index):\n card = self.hands[player_index][card_index]\n color_index = COLOR.index(card[0])\n if self.is_card_playable(card):\n # the color and the number match, add the card\n self.firework[color_index].append(card)\n # if we complete the firework for a color, we get an extra\n # blue stone\n if len(self.firework[color_index]) == 5:\n self.nb_blue_stone = min(self.nb_blue_stone + 1,\n MAX_BLUE_STONE)\n else:\n # error, the card cannot be played, remove a red_stone\n if self.nb_red_stone == 0:\n raise GameOverError(\"The card \" + card + \" cannot be\\\n played and there is no red stone anymore\")\n self.nb_red_stone = self.nb_red_stone - 1\n self.hands[player_index][card_index] = self.draw_card()\n return self.hands[player_index][card_index]" ]
[ "0.7077452", "0.66447884", "0.6558589", "0.65260136", "0.6487388", "0.64388734", "0.6393751", "0.6229948", "0.61175305", "0.60968053", "0.6085358", "0.60536987", "0.6034691", "0.59970397", "0.5968616", "0.5967686", "0.5961493", "0.596145", "0.5949978", "0.5949622", "0.58889884", "0.58835673", "0.58597386", "0.58329165", "0.5827071", "0.58164823", "0.57830244", "0.57384515", "0.5736749", "0.57106435", "0.5696334", "0.5686539", "0.5684731", "0.5657722", "0.5647824", "0.5626887", "0.5610563", "0.5608444", "0.5602802", "0.5569554", "0.55653787", "0.5524852", "0.5515857", "0.55147594", "0.55128425", "0.5510485", "0.5506762", "0.549714", "0.5495186", "0.5484604", "0.54840297", "0.5475295", "0.5466569", "0.54644406", "0.5449355", "0.54482037", "0.5440094", "0.54320675", "0.5421693", "0.54161274", "0.5415569", "0.5415111", "0.5411266", "0.54023373", "0.540203", "0.54010427", "0.5374173", "0.5372878", "0.536888", "0.53676385", "0.5366152", "0.5366059", "0.53598595", "0.53532845", "0.5348163", "0.53466356", "0.53394085", "0.53247267", "0.5320047", "0.531889", "0.5317929", "0.5312676", "0.5306694", "0.5294198", "0.5293966", "0.528607", "0.52854294", "0.5281469", "0.5279077", "0.5270009", "0.526914", "0.525899", "0.5258854", "0.525315", "0.52496284", "0.5248784", "0.5244968", "0.5243385", "0.5240925", "0.5226148" ]
0.55325085
41
Returns the comparison of which is greater based on id
def __lt__(self,other): return self.n < other.n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare(self) -> int:", "def compare_to(self, other) -> int:\n if self.id == other.id:\n return 0\n if self.status != other.status:\n return -1 if self.status < other.status else 1\n if self.last_played != other.last_played:\n return -1 if self.last_played < other.last_played else 1\n return -1 if self.id < other.id else 1", "def _cmp(a, b): # pylint: disable=invalid-name\n return (a > b) - (a < b)", "def greater_than(self) -> global___Expression:", "def cmp(x, y):\n return (x > y) - (x < y)", "def _cmp(x, y):\n if x[1].count > y[1].count:\n return CmpRelation.GREATER\n if x[1].count < y[1].count:\n return CmpRelation.LESS\n if x[1].ptn_length < y[1].ptn_length:\n return CmpRelation.GREATER\n if x[1].ptn_length > y[1].ptn_length:\n return CmpRelation.LESS\n return CmpRelation.EQUAL", "def __cmp__(self, other):\n if self.get_id() < other.get_id():\n return -1\n elif self.get_id() == self.get_id():\n return 0\n else:\n return 1", "def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other", "def __gt__(self, other):\n return self.greaterThan(other)", "def cmp(x, y):\n return (x > y) - (x < y)", "def sortById(cls,x,y):\n return(x.getRunningId()-y.getRunningId())", "def __gt__(self, other):\n return greater(self, other)", "def cmp(a, b):\n return (a > b) - (a < b)", "def __gt__(self, other):\n return self._key > other._key", "def compare(a, b):\n if a > b:\n return a\n return b", "def __gt__(self, other):\n return self._ordinals > other.ordinal()", "def __cmp__(self, other):\n return cmp(self.id, other.id)", "def __gt__(self, other):\n return self.element() > other.element()", "def greater(value, other):\n return value < other", "def __gt__(self, other):\n return other < self._cmpkey()", "def __gt__(self, other: Any) -> ColumnOperators:\n return self.operate(gt, other)", "def compare(a,b):\r\n if a>b:\r\n return 1\r\n elif a==b:\r\n return 0\r\n else:\r\n return -1", "def _greater_than_op(spec):", "def cmp(x, y):\n if x + y > y + x: return 1\n elif x + y == y + x: return 0\n else: return -1", "def __gt__(self, other):\n if other.groupnumber > self.groupnumber:\n return True\n else:\n return False", "def cmpValue(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n return val1 > val2", "def cmpValue(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n return val1 > val2", "def __gt__(self, other):\n return other < self", "def compare(self, comp_els):\n return max(comp_els, key=lambda x: x[1])[0]", "def __gt__(self, other):\n return int(self.rank) > int(other.rank)", "def __gt__(self, other):\n return self.eval_score < other.eval_score", "def __gt__(self, other):\r\n assert isinstance(other, Order)\r\n return self - other > 0", "def __gt__(self, *args):\n return _ida_hexrays.cdo_t___gt__(self, *args)", "def __gt__(self, other):\n return self.__f > other.get_f()", "def __gt__(self, obj):\r\n return assert_(self.obj > obj, '%r <= %r' % (self.obj, obj))", "def cmpId(self,other):\n selfIsInterior = self.flags & 1\n otherIsInterior = other.flags & 1\n #--Compare exterior/interior. (Exterior cells sort to top.)\n if selfIsInterior != otherIsInterior:\n #--Return -1 if self is exterior\n return (-1 + 2*(selfIsInterior))\n #--Interior cells?\n elif selfIsInterior:\n return cmp(self.cellName,other.cellName)\n #--Exterior cells?\n elif self.gridX != other.gridX:\n return cmp(self.gridX,other.gridX)\n else:\n return cmp(self.gridY,other.gridY)", "def gt(self, x, y):\n return self.lt(y,x)", "def compare(a, b, larger_is_better):\n\n if larger_is_better:\n return a > b\n else:\n return a < b", "def __gt__(self, other):\n self.conds.append((self.name, '>', other))\n return self", "def __cmp__(self, other) :\n if self.strength > other.strength:\n return 1;\n elif self.strength == other.strength :\n if self.rank > other.rank :\n return 1;\n elif self.rank == other.rank :\n return 1 if self.kickers > other.kickers else -1 if self.kickers < other.kickers else 0;\n return -1;", "def __lt__(self, other):\n return self.id < other.id", "def __lt__(self, other):\n return self.id < other.id", "def item_comparer(self):\n return self.item_comparer_value", "def less(value, other):\n return value > other", "def getChangesGreaterThan(self, last_changeid, t=None):\n assert last_changeid >= 0\n \n change_obj = rpc.RpcProxy('software_dev.commit')\n cids = change_obj.search([('id', '>', last_changeid)])\n # FIXME: defer\n changes = self.runInteractionNow(self._get_change_num, cids)\n changes.sort(key=lambda c: c.number)\n return changes", "def comparison(self):\n return self._comparison", "def __cmp__(self, x):\n if self.score < x.score: return -1\n elif self.score == x.score: return 0\n else: return 1", "def greater(lhs, rhs):\n return _make.greater(lhs, rhs)", "def compareFunction( self, first, second ):\n for ascending,column in self.sortOrder:\n aValue,bValue = column.get(first),column.get(second)\n diff = cmp(aValue,bValue)\n if diff:\n if not ascending:\n return - diff \n else:\n return diff \n return 0", "def __gt__(self,other):\r\n\t\tsorted_self = sorted(self.vector, reverse=True) #sort both lists in descending order\r\n\t\tsorted_other = sorted(other, reverse=True) \r\n\t\tcmpflag = False\r\n\t\tfor li1, li2 in zip(sorted_self, sorted_other):\r\n\t\t\tif(li1 > li2):\r\n\t\t\t\tcmpflag = True\r\n\t\treturn cmpflag", "def greater_than_or_equal(self) -> global___Expression:", "def __gt__(self, other):\n if isinstance(other, type(self)):\n return self.number > other.number\n return NotImplemented", "def __gt__ (self, other) :\n return other.__lt__(self)", "def issueIdCompare (x, y):\n return int(x) - int(y)", "def __gt__(self, *args):\n return _ida_hexrays.operand_locator_t___gt__(self, *args)", "def less_than(self) -> global___Expression:", "def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def compare_entities(e1, e2):\n sp1 = e1.sorting_priority\n sp2 = e2.sorting_priority\n if sp1 > sp2:\n return 1\n elif sp1 == sp2:\n return 0\n else:\n return -1", "def compare(self, a: long, b: long) -> int:\n ...", "def __gt__(self, other):\n return self._metric_value > other.metric_value()", "def cmp ( self, object1, object2 ):\n return cmp( self.get_raw_value( object1 ),\n self.get_raw_value( object2 ) )", "def __gt__(self, other):\n student1 = self.calculate_total()\n student2 = other.calculate_total()\n\n if student1 > student2:\n return True\n else:\n return False", "def __gt__(self, *args):\n return _ida_hexrays.cnumber_t___gt__(self, *args)", "def fragment_id_gt(frag_id1, frag_id2):\n return fragment_id_split(frag_id1) > fragment_id_split(frag_id2)", "def __gt__(self, other):\n\n if self.count == other.count:\n return self.word < other.count\n return self.count > other.count", "def statusCompare (x, y):\n xs = db.status.get(x, 'order')\n ys = db.status.get(y, 'order')\n c = float(xs) - float(ys)\n if c >= 0.0: \n return int(c)\n else:\n return -int(abs(c))", "def comparison_id(self):\n return self._benchmark_comparison_id", "def __gt__(self, other):\n return self.__cmp__(other) > 0", "def __gt__(self, other):\n if self.i1 > other.i1:\n return True\n elif self.i1 == other.i1:\n if self.i2 > other.i2:\n return True\n elif self.i2 == other.i2 and self.axial > other.axial:\n return True\n return False", "def __gt__(self, other):\n assert isinstance(other, Segment)\n return self.chain_id > other.chain_id", "def __gt__(self, other):\n return True if self._compare(other) > 0 else False", "def cmp(x, y):\n if x == y:\n return 0\n elif x is None:\n if y is None:\n return 0\n else:\n return -1\n elif y is None:\n return 1\n else:\n # TODO: consider casting the values to string or int or floats?\n # note that this is the minimal replacement function\n return (x > y) - (x < y)", "def __gt__(self, transposon):\n return self.score > transposon.score", "def __ge__(self, other):\n return greater_equal(self, other)", "def __lt__(self, rs):\n Number.comparisons += 1\n result = self.data < rs.data\n return result", "def compare(self, value: int, /) -> None:", "def lamport_compare(ts1, ts2):\n time1 = parse_op_id(ts1)\n time2 = parse_op_id(ts2)\n if time1.counter != time2.counter:\n return time1.counter - time2.counter\n if time1.actorId != time2.actorId:\n return 1 if time1.actorId > time2.actorId else -1\n return 0", "def __gt__(self, other):\n self_list = self.date.split(\"/\")\n other_list = other.date.split(\"/\")\n if self_list[2] > other_list[2]:\n return True\n else:\n if self_list[2] == other_list[2]:\n if self_list[1] > other_list[1]:\n return True\n elif self_list[1] == other_list[1]:\n if self_list[0] > other_list[0]:\n return True\n return False", "def compare(a, b):\n return a - b", "def __gt__(self, other):\n return (self.__class__.__name__, self._values()) > (other.__class__.__name__, other._values())", "def __gt__(self, *args):\n return _ida_hexrays.cexpr_t___gt__(self, *args)", "def CmpProperties(self, that):\n if not self.__entity:\n return cmp(self.__entity, that.__entity)\n\n for (identifier, order) in self.__orderings:\n value1 = self.__GetValueForId(self, identifier, order)\n value2 = self.__GetValueForId(that, identifier, order)\n\n result = cmp(value1, value2)\n if order == Query.DESCENDING:\n result = -result\n if result:\n return result\n return 0", "def __gt__(self, other: 'MaxNode') -> bool:\n if self.priority == other.priority:\n return self.value < other.value\n return self.priority < other.priority", "def greater_equal(lhs, rhs):\n return _make.greater_equal(lhs, rhs)", "def __gt__(self, *args):\n return _ida_hexrays.ccase_t___gt__(self, *args)", "def __gt__(self, *args):\n return _ida_hexrays.var_ref_t___gt__(self, *args)", "def __gt__(self, other):\n return self.weight() > other.weight()", "def _greater_than_or_equal_to_op(spec):", "def __gt__(self, *args):\n return _ida_hexrays.citem_locator_t___gt__(self, *args)", "def __gt__(self, *args):\n return _ida_hexrays.cif_t___gt__(self, *args)", "def __gt__(self, other):\n return self.weight > other.weight", "def test_greater_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def __cmp__(self, other):\n if options.rank_by.lower() != \"money\":\n \"\"\"flags ▲, money ▲, hints ▼, time ▼\"\"\"\n this, that = len(self.flags), len(other.flags)\n if this == that:\n this, that = self.money, other.money\n if this == that:\n this, that = len(other.hints), len(self.hints)\n if this == that:\n this, that = other.last_scored(), self.last_scored()\n else:\n \"\"\"money ▲, hints ▼, time ▼, flags ▲\"\"\"\n this, that = self.money, other.money\n if this == that:\n this, that = len(other.hints), len(self.hints)\n if this == that:\n this, that = other.last_scored(), self.last_scored()\n if this == that:\n this, that = len(self.flags), len(other.flags)\n if this < that:\n return 1\n elif this == that:\n return 0\n else:\n return -1", "def cmpGreaterThan(self, conn1, sql1, conn2, sql2):\n for row in self.get_query_results(conn1, sql1):\n res1 = row[0]\n for row in self.get_query_results(conn2, sql2):\n res2 = row[0]\n self.log.info(\n \"cmpGreaterThan:: task: {}, value1: {}, value2: {}\".format(\n self.task_id, str(res1), str(res2)\n )\n )\n\n if res1 <= res2:\n raise AirflowException(\n \"EtlValidation cmpGreaterThanError: query {}\".format(sql1 + \"<=\" + sql2)\n )", "def __cmp__(self, other):\n if self.weight > other.weight:\n return 1\n elif self.weight < other.weight:\n return -1\n else:\n return 0", "def __gt__(self, *args):\n return _ida_hexrays.carg_t___gt__(self, *args)", "def compare(self,node, new_node):\n if new_node.get_value() == node.get_value():\n return 0\n elif new_node.get_value() < node.get_value():\n return -1\n else:\n return 1", "def _cmp(pack, other):\n return pack.name < other.name", "def __gt__(self, other: 'MinNode') -> bool:\n if self.priority == other.priority:\n return self.value > other.value\n return self.priority > other.priority", "def test_greaterThan(self):\n self.assertTrue(Comparable(2) > Comparable(1))\n self.assertFalse(Comparable(0) > Comparable(3))", "def __gt__(self, *args):\n return _ida_frame.stkpnt_t___gt__(self, *args)" ]
[ "0.6579224", "0.64989537", "0.62860954", "0.6273539", "0.6266201", "0.6231986", "0.62169814", "0.6210893", "0.6201532", "0.6189547", "0.61581546", "0.61234397", "0.61148363", "0.608585", "0.607456", "0.6059237", "0.6042042", "0.6005502", "0.59676754", "0.596216", "0.5961682", "0.58801556", "0.5855983", "0.584502", "0.5840321", "0.58155066", "0.58155066", "0.5812717", "0.58104575", "0.5805525", "0.57722664", "0.57692164", "0.57533467", "0.5752068", "0.5740226", "0.57379293", "0.5737202", "0.5735563", "0.57280546", "0.57266897", "0.5703711", "0.5703711", "0.5696389", "0.5692238", "0.56920004", "0.5680636", "0.567149", "0.56542367", "0.5644877", "0.5642066", "0.5631667", "0.56225425", "0.56217617", "0.56209844", "0.5608039", "0.5603151", "0.5597656", "0.5579277", "0.5574999", "0.55730397", "0.5555184", "0.5554583", "0.554408", "0.55434376", "0.5538815", "0.55370116", "0.55297726", "0.5525419", "0.55191594", "0.5515566", "0.55146015", "0.5486654", "0.54859143", "0.548355", "0.54750025", "0.5465347", "0.5459101", "0.54584324", "0.5455109", "0.54514253", "0.5451324", "0.54505026", "0.5446397", "0.54462403", "0.5445416", "0.5444801", "0.5433743", "0.5431678", "0.5429633", "0.54287887", "0.5419834", "0.54129577", "0.54044014", "0.53933114", "0.5392161", "0.5389428", "0.5384884", "0.5379587", "0.5377279", "0.5372044", "0.5371585" ]
0.0
-1
Finds the rank of the card and returns rank
def rank(self): rank = self.n % 13 return rank
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rank():\n return 0", "def get_rank() -> int:\n return collective.get_rank()", "def get_rank(self) -> int:\r\n return self.rank", "def get_rank(self):\r\n return self.rank", "def __int__(self):\n return Card.ranks.index(self.rank) + Card.suits.index(self.suit) * len(Card.ranks)", "def blackjackValue(self):\n NUMBERRANKS = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n FACECARDS = [\"jack\", \"queen\", \"king\"]\n ACE = [\"ace\"]\n if self.rank in NUMBERRANKS:\n return int(self.rank)\n elif self.rank in FACECARDS:\n return 10\n elif self.rank in ACE:\n return 11", "def rank(self):\n return self.lib.calculate_rank()", "def get_rank(self):\n \n if self.rank == None:\n self.rank = self.main_ranker(self.string)\n \n return self.rank", "def rank(players, community):\n\t# Structure that holds the player ranking results\n\tclass RankResults():\n\t\tdef __init__(self):\n\t\t\tself.winner = [] # (WIN, player_name) or (TIE, [player1, player2, ...]) \n\t\t\tself.bestHands = [] # [(pl_name, bestHand, handRank), ... ]\n\t\t\tself.kicker = [] # If player hands' ranks tie but lose\n\t\t\t\t\t# by kicker, this will have one card\n\t\t\t\n\t\n\t\tdef __repr__(self):\n\t\t\tif self.winner[0] == \"Win\":\n\t\t\t\twinPlayerIndex = [player[0] for player in \\\n\t\t\t\t\t\t\tself.bestHands].index(self.winner[1])\n\t\t\telse:\n\t\t\t\twinPlayerIndex = [player[0] for player in \\\n\t\t\t\t\t\t\tself.bestHands].index(self.winner[1][0])\n\t\t\twinningRank = self.bestHands[winPlayerIndex][2]\n\t\t\t\n\t\t\t# Returns Win/Tie, player name, and winning rank\n\t \treturn str(self.winner) + \" rank = \" + str(winningRank) + \" kicker = \" \\\n\t\t\t\t+ str(self.kicker)\n\n\t### Rank function definition starts here\n\n\t# Dictionary for converting card strings to numbers\n\tcardNums = {\"A\":14, \"K\":13, \"Q\":12, \"J\":11, \"10\":10, \"9\":9, \"8\":8, \\\n\t\t\t\"7\":7, \"6\":6, \"5\":5, \"4\":4, \"3\":3, \"2\":2}\n\n\t# scan each player's hand and return their best hand\n\twinHands = []\n\tresult = RankResults()\n\tfor player in players:\n\t\tcards = player.hand + community\n\t\t(playerHand, handStrength) = best_hand(cards)\n\t\tif len(winHands) != 0:\n\t\t\t# compare current player's hand to other\n\t\t\t# players in the best hands list\n\t\t\tif handStrength > winHands[0][2]:\n\t\t\t\twinHands = [(player.name, playerHand, handStrength)]\n\t\t\telif handStrength == winHands[0][2]:\n\t\t\t\twinHands.append( (player.name, playerHand, handStrength) )\n\t\t# if first player in list, \n\t\t# create a new list with this player's hand\t\t\t\n\t\telse: \n\t\t\twinHands = [(player.name, playerHand, handStrength)]\n\t\t\t\t\t\n\n\t\t# insert each player's hand into results\n\t\tresult.bestHands.append( (player.name, playerHand, handStrength) )\n\n\t# compare results. \n\t# winHands = ((name, handStrength, hand), ...)\n\tif len(winHands) == 1:\n\t\tresult.winner = (\"Win\", winHands[0][0])\n\telse:\n\t\t# tuple the i cards of every player to facilitate\n\t\t# comparison\n\t\tzippedHands = zip(*[winner[1] for winner in winHands])\n\t\t\n\t\t# Compare top 5 cards of tied winners\n\t\tfor i in range(5):\n\t\t\ttopCards = zippedHands[i]\n\t\t\tlargestCard = max(topCards) # find largest card \n\t\t\tisPlayerRemoved = False # loser detection flag\n\t\t\tnewWinHands = []\n\t\t\tfor j in range(len(topCards)):\n\t\t\t\tif topCards[j] == largestCard:\n\t\t\t\t\tnewWinHands.append(winHands[j]) \n\t\t\t\telse:\n\t\t\t\t\t# Remove players with < max\n\t\t\t\t\tisPlayerRemoved = True\n\t\t\t\t\t#winHands.remove(winHands.index(j))\n\t\t\t\t\t\n\t\t\twinHands = newWinHands\n\t\t\t# If only one winner remaining, stop checking\n\t\t\tif len(winHands) == 1:\n\t\t\t\tresult.kicker = largestCard\n\t\t\t\tresult.winner = (\"Win\", winHands[0][0])\t\t\n\t\t\t\tprint \"best hands = \" + str(result.bestHands)\n\t\t\t\treturn result\t\n\t\t\t# If player was removed, remake zippedHands\n\t\t\tif isPlayerRemoved:\n\t\t\t\tzippedHands = zip(*[winner[1] for winner in winHands])\n\t\t\t\t\t\n\t\t\n\t\tresult.winner = (\"Tie\", [winner[0] for winner in winHands])\n\t\n\tprint \"best hands = \" + str(result.bestHands)\n\n\treturn result", "def get_rank(self):\n return self.rank", "def hand_rank(hand):\n ranks = card_ranks(hand) # ranks is a list of all the ranks. A sorted list of ranks is returned\n if straight(hand) and flush(hand): # Straight flush\n return (8, max(ranks)) # 2 3 4 5 6 (8, 6) 6 7 8 9 T (8, 10)\n elif kind(4, ranks): # Here kind(4, ranks) is used to return a bolean value\n # kind(4, ranks) returns the int when true, returns false if not true (used as boolean)\n return (7, kind(4, ranks), kind(1, ranks)) # 9 9 9 9 3 (7, 9, 3) 9 9 9 9 5 (7, 9, 5)\n elif kind(3, ranks) and kind(2, ranks): # full house\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand): # flush\n return (5, ranks)\n elif straight(ranks): # straight\n return (4, max(ranks))\n elif kind(3, ranks): # 3 of a kind\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks): # 2 pair\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks): # kind\n return (1, kind(2, ranks), ranks)\n else: # high card\n return (0, ranks)", "def rank_matches(cards, rank):\n\n return [card for card in cards if card.rank == rank]", "def get_rank(self):\n details = self._tab.find(\"table\", class_=\"details\")\n rank, = self.rank_re.match(details.find(\"td\", class_=\"value\").get_text()).groups()\n return rank", "def getRank(self):\r\n return self.rank", "def get_rank(self) -> int:\n return dist.get_rank()", "def rank() -> int:\n return dist.get_rank() if dist.is_initialized() else 0", "def get_rank(self):\n return int(self._rank)", "def hand_rank(hand):\n ranks = card_ranks(hand)\n if straight(ranks) and flush(hand):\n return (8, max(ranks))\n elif kind(4, ranks):\n return (7, kind(4, ranks), kind(1, ranks))\n elif kind(3, ranks) and kind(2, ranks):\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand):\n return (5, ranks)\n elif straight(ranks):\n return (4, max(ranks))\n elif kind(3, ranks):\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks):\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks):\n return (1, kind(2, ranks), ranks)\n else:\n return (0, ranks)", "def getRank(self):\n return self.rank", "def _compute_rank(self):\n# print(Card((self.ranks[0]),self.suits[0]))\n# print(Card((self.ranks[1]),self.suits[1]))\n# print(Card((self.ranks[2]),self.suits[2]))\n# print(Card.ranks[self.ranks[0]])\n# #print(Card.ranks[self.ranks[0]+1])\n# print(self.ranks[1])\n# print(Card.suits[self.suits[1]])\n a = ['Ace','2','3']\n newlist =[self.ranks[0],self.ranks[1],self.ranks[2]]\n newlist = sorted(newlist)\n if(Card.suits[self.suits[0]] == Card.suits[self.suits[1]] == Card.suits[self.suits[2]]):\n #a = ['Ace','2','3']\n if(Card.ranks[self.ranks[0]] in a) and (Card.ranks[self.ranks[1]] in a) and (Card.ranks[self.ranks[2]] in a):\n self.rank=5\n else:\n if(newlist[1] - newlist[0]) == 1 and (newlist[2]-newlist[1])==1:\n #StraightFlush\n self.rank=5\n else:\n #Flush\n self.rank=2\n \n #Threeofakind\n elif (Card.ranks[self.ranks[0]] == Card.ranks[self.ranks[1]] == Card.ranks[self.ranks[2]]):\n self.rank=4\n #Pair\n elif(Card.ranks[self.ranks[0]]==Card.ranks[self.ranks[1]] or Card.ranks[self.ranks[0]]==Card.ranks[self.ranks[2]] or Card.ranks[self.ranks[1]]==Card.ranks[self.ranks[2]] or Card.ranks[self.ranks[2]]==Card.ranks[self.ranks[1]]):\n self.rank=1 \n #Straight\n elif(((newlist[1] - newlist[0]) == 1) and (newlist[2]-newlist[1])==1):\n self.rank=3\n \n elif((Card.ranks[self.ranks[0]] in a) and (Card.ranks[self.ranks[1]] in a) and (Card.ranks[self.ranks[2]] in a)):\n if(Card.ranks[self.ranks[0]] != Card.ranks[self.ranks[1]] != Card.ranks[self.ranks[2]]):\n #if((Card.ranks[self.ranks[0]] != Card.ranks[self.ranks[1]]) and (Card.ranks[self.ranks[0]]!= Card.ranks[self.ranks[2]])and (Card.ranks[self.ranks[1]]!= Card.ranks[self.ranks[2]])):\n self.rank=3\n\n else:\n self.rank=0\n #pass", "def get_rank(self):\n return self.__rank", "def rank(self):\n \n if self.__rank:\n return self.__rank\n flush = True\n straight = False\n last = None\n merged = {}\n for c in self.__cards:\n if last:\n if flush and c.suit != last.suit:\n flush = False\n last = c\n if c.value in merged:\n merged[c.value] = merged[c.value] + 1\n else:\n merged[c.value] = 1\n if (len(merged)) == 5:\n # All unique cards, check for a straight\n if self.__cards[0].value - self.__cards[4].value == 4:\n straight = True\n if self.__cards[4].value == 2 and self.__cards[1].value == 5 and self.__cards[0].value == 14:\n straight = True\n # Set the value of the ace to 1 and resort so hand comparisons work correctly\n self.__cards[0].value = 1\n self.__cards = sorted(self.__cards, reverse=True)\n if straight and flush:\n if self.__cards[0].value == 14:\n self.__rank = Hand.ROYAL_FLUSH\n else:\n self.__rank = Hand.STRAIGHT_FLUSH\n elif flush:\n self.__rank = Hand.FLUSH\n elif straight:\n self.__rank = Hand.STRAIGHT\n else:\n self.__rank = Hand.HIGH_CARD\n self.__values = [c.value for c in self.__cards]\n else:\n multiples = [m for m in sorted(merged.items(), key = operator.itemgetter(1), reverse = True) if m[1] > 1]\n if len(multiples) > 1:\n if multiples[0][1] == multiples[1][1]:\n self.__rank = Hand.TWO_PAIRS\n else:\n self.__rank = Hand.FULL_HOUSE \n elif multiples:\n if multiples[0][1] > 3:\n self.__rank = Hand.FOUR_OF_A_KIND\n elif multiples[0][1] == 3:\n self.__rank = Hand.THREE_OF_A_KIND\n else:\n self.__rank = Hand.ONE_PAIR\n mvalues = sorted([m[0] for m in multiples], reverse=True)\n self.__values = mvalues + [c.value for c in self.__cards if c.value not in mvalues]\n if not self.__rank:\n self.__rank = Hand.HIGH_CARD\n\n return self.__rank", "def get_rank(self, score, answer, entities_space, num_ent):\n if answer not in entities_space:\n rank = num_ent\n else:\n answer_prob = score[entities_space.index(answer)]\n score.sort(reverse=True)\n rank = score.index(answer_prob) + 1\n return rank", "def getRank(self):\n return self._rank", "def calc_rank(id=13197473):\r\n player_url = urllib.parse.urlparse(\"http://osu.ppy.sh/pages/include/profile-general.php?u=player_id&m=0\".replace('player_id', str(id)))\r\n page = urlopen(player_url.geturl())\r\n soup = BeautifulSoup(page, features=\"html.parser\")\r\n table_divs = soup.findAll('div', attrs={'class': 'profileStatLine'})\r\n\r\n import re\r\n pattern = '\\(#\\d*,*\\d+\\)'\r\n for div in table_divs:\r\n for childdiv in div.find_all('b'):\r\n result = re.search(pattern, str(childdiv.text))\r\n my_ranking = int(result.group(0).replace(',', '').replace(\"(#\", '').replace(\")\", ''))\r\n break\r\n break\r\n return my_ranking", "def get_rank(score):\n if score in range(0, 500):\n return RANKTYPES[0]\n elif score in range(500, 1500):\n return RANKTYPES[1]\n elif score in range(1500, 2000):\n return RANKTYPES[2]\n elif score in range(2000, 2500):\n return RANKTYPES[3]\n elif score in range(2500, 3000):\n return RANKTYPES[4]\n elif score in range(3000, 4000):\n return RANKTYPES[5]\n elif score in range(4000, 5500):\n return RANKTYPES[6]\n elif score > 5500:\n return RANKTYPES[7]", "def check_hand_rank(hand):\n card_rank = ['--23456789TJQKA'.index(n) for n,h in hand]\n card_rank.sort()\n card_rank.reverse()\n #for royal straight flush\n card_rank_rsf = ['HDSC'.index(h) for n,h in hand]\n card_rank_rsf.sort()\n card_rank_rsf.reverse()\n if card_rank == [14,5,4,3,2]:\n card_rank = [5,4,3,2,1]\n if royal_straight_flush(hand):\n return 9,card_rank_rsf[0]\n elif straight_flush(hand):\n return 8,max(card_rank)\n elif four_of_a_kind(hand):\n return 7,max(card_rank)\n elif full_house(hand):\n tong = 0\n kuu = 0\n s = [n for n,h in hand]\n for i in xrange(len(s)):\n if(s.count(s[i])==3):\n tong = s[i]\n else:\n kuu = s[i]\n return 6,int(tong),int(kuu)\n elif flush(hand):\n return 5,max(card_rank)\n elif straight(hand):\n return 4,max(card_rank)\n elif three_of_a_kind(hand):\n ld = 0\n a = 0\n for i in xrange(0,3):\n if card_rank.count(card_rank[i]) > 1 :\n ld = (card_rank[i])\n else:\n a = card_rank[i]\n return 3,ld,a\n elif two_pair(hand):\n ld = []\n a = 0\n for i in xrange(0,3):\n if card_rank.count(card_rank[i]) >=2:\n ld.append(card_rank[i])\n card_rank.pop(i)\n else:\n a = card_rank[i]\n ld.sort(reverse=True)\n return 2,ld[0],ld[1],a\n elif one_pair(hand):\n ld = 0\n a = []\n for i in xrange(len(card_rank)):\n if card_rank.count(card_rank[i]) > 1 :\n ld = (card_rank[i])\n else:\n a.append(card_rank[i])\n a.sort(reverse = True)\n return 1,ld,a[0],a[1],a[2]\n else:\n return 0,max(card_rank)", "def card_ranks(cards):\n ranks = [\"--23456789TJQKA\".index(r) for r,s in cards] # Each card contains a rank and a suit, hand/cards == [(11, 'Q'), (9, 'D')] \n # Using a \"Rank Strings Array\" (i.e using an array to represent the rank strings) to index it for the ranks\n ranks.sort(reverse=True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 3, 2, 1]) else ranks", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def rank(self):\n return self._rank", "def getCard(self, rank, suit):\r\n for card in self.cards:\r\n if card.rank == rank and card.suit == suit:\r\n return card\r\n return None", "def BJValue(self):\r\n #if the face value of a card is greater or equals to 10\r\n if self.rank >= 10:\r\n #count the value as 10\r\n return 10\r\n #if the face value of a card is less than 10\r\n else:\r\n #return the face value of the card\r\n return self.rank", "def rank(self):\n if self._rank is None:\n self._rank = self.prufer_rank()\n return self._rank", "def get_value(self):\n bj_rankings = {'Ace': 11, 'King': 10, 'Queen': 10, 'Jack': 10,\n 10: 10, 9: 9, 8: 8, 7: 7, 6: 6, 5: 5, 4: 4, 3: 3, 2: 2}\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n\n if value > 21:\n bj_rankings['Ace'] = 1\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n return value", "def test_rank(self):\n card = self._card\n self.assertEqual(card.rank, self._rank)", "def get_rank(self, pb):\n\n for rank in self.RANKS:\n start = self.RANKS[rank][\"ProgressStart\"]\n # 1 is not subtracted as we're calling range\n end = start + self.RANKS[rank][\"Progress\"]\n if pb in range(start, end):\n return int(rank)\n else:\n return 35", "def card_ranks(hand):\n ranks = ['--23456789TJQKA'.index(r) for r, s in hand]\n ranks.sort(reverse = True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks", "def __rank__(self) -> int:", "def rank(self) -> int:\n return self._rank", "def test_get_ranking(self):\n card = Card.objects.create(suit=Card.CLUB, rank=\"jack\")\n self.assertEqual(card.get_ranking(), 11)", "def highest_rank(self):\n return max(self.cards).rank", "def rank():\n return int(os.environ['RANK'])", "def determine_rank(self, X, err):\n singularValues,_,_,_ = self.compute_svd(X,k=-1)\n ratio = np.array([np.linalg.norm(singularValues[k:]) / np.linalg.norm(singularValues) for k in\n range(len(singularValues) - 1, 0, -1)])\n find_idx = numpy.nonzero(ratio <= err)\n rank = find_idx[0]\n if self.global_rank==0: print('Estimated rank=',rank)\n return rank", "def test_is_rank_integer(self):\n self.assertIsInstance(cardutils.Card(10,1).rank, int)", "def get_value(self):\n if self.rank == 'A':\n return 11\n elif self.rank in ['J', 'Q', 'K']:\n return 10\n else:\n return int(self.rank)", "def read_rank(response):\n groups = re.findall(\"^Rank_\\d+:\\d+:(\\d+)$\", response.strip())\n if len(groups) == 1:\n return groups[0]\n else:\n raise ValueError", "def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count", "def rank(self) -> tskit.Rank:\n return combinatorics.RankTree.from_tsk_tree(self).rank()", "def player_rank(cls, player, date):\r\n\r\n\t\ttry:\r\n\t\t\tP_RANKS = cls.RANKS[player]\r\n\t\texcept KeyError:\t# If player does not exist\r\n\t\t\treturn False\r\n\r\n\t\tinit_date = P_RANKS[0]\r\n\r\n\t\t# If player hadn't played yet by the date specified\r\n\t\tif date < init_date:\r\n\t\t\treturn False\r\n\t\t\r\n\t\tdate_ind = DATES.day_diff(date, init_date)\r\n\r\n\t\trank = P_RANKS[date_ind + 1]\r\n\t\t\r\n\t\treturn rank", "def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)", "def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card", "def _get_rank(self,fitness):\n # infact you can get the order or rank by only once sort.\n rank=fitness[:,0].argsort().argsort() # [n]\n return rank", "def compare_rank(self, obj: int) -> int:\n def normalize_ace(a):\n return a+13 if a == 1 else a\n norm_self_rank = normalize_ace(self.rank)\n norm_obj = normalize_ace(obj)\n\n return 1 if norm_self_rank > norm_obj else (0 if norm_self_rank == norm_obj else -1)", "def _to_int(self, int_or_card):\r\n if isinstance(int_or_card, Card):\r\n return int_or_card.rank\r\n return int_or_card", "def get_score(self, card_index: int = 0) -> int:\n return self.get_score_list[card_index]", "def rank(self):\n rank = 0\n rho = self.array_form[:]\n n = self.size - 1\n size = n + 1\n psize = int(ifac(n))\n for j in xrange(size - 1):\n rank += rho[j]*psize\n for i in xrange(j + 1, size):\n if rho[i] > rho[j]:\n rho[i] -= 1\n psize //= n\n n -= 1\n return rank", "def rank_move(self, board, player, board_hash, move):\n\n\t\tif board_hash in self.tt and move == self.tt[board_hash]['best_move'] or self.is_move_checkmate(board, move):\n\t\t\trank = float('inf')\n\n\t\t\treturn rank\n\n\t\tmove_hash = str(chess.polyglot.zobrist_hash(board)) + str(player) + str(move)\n\t\trank = 0\n\n\t\tif move_hash in self.move_rank_cache:\n\t\t\trank = self.move_rank_cache[move_hash]\n\t\telse:\n\t\t\tis_attacked = board.is_capture(move) or is_square_attacked_by(board, not player, move.to_square)\n\t\t\trank = 100 * (get_move_exchange(board, move, player) + 0.01) if is_attacked else 0\n\t\t\tself.move_rank_cache[move_hash] = rank\n\t\t\n\t\trank = -rank if not player else rank\n\n\t\tif move in self.move_hist:\n\t\t\trank += self.move_hist[move]/self.move_hist_norm \n\t\telif rank == 0:\n\t\t\tplayer_delta = 1 if player else -1\n\t\t\trank += player_delta * self.valuate_move(board, move, player)\n\n\t\treturn rank", "def scan_cards(player, river):\r\n best_rank = 0\r\n cards = player.hand + river\r\n hands = combinations(cards, 5) # find all 5 card hands\r\n best_hands = []\r\n for h in hands:\r\n flat = list(sum(h, ()))\r\n prep = np.zeros(shape=(10,))\r\n j = 0\r\n for i in flat:\r\n prep[j] = i\r\n j = j+1\r\n input = np.zeros(shape=(1,10))\r\n input[0] = prep\r\n rank = np.argmax(player.ai.predict(input)[0])\r\n\r\n if rank == best_rank:\r\n best_hands.append(h)\r\n if rank > best_rank:\r\n best_rank = rank\r\n best_hands = []\r\n best_hands.append(h)\r\n final_hand = best_hand(best_hands)\r\n return (best_rank, final_hand)", "def getRank(self, subject_id: str, score: float) -> Tuple[int, int]:\n scores: List[float] = self.getScores(subject_id)\n scores.sort(reverse=True)\n rank = scores.index(score)\n return (rank, len(scores))", "def card_value (card):\r\n value = card[0]\r\n if value in ['Jack','Queen','King']:\r\n return 10\r\n if value in [2,3,4,5,6,7,8,9,10]:\r\n return value\r\n else:\r\n raise 'CardValueError'", "def get_num_hit_rank(boxes_truth, boxes_pred, rank):\n\n def is_hit(box_truth, box_pred):\n return is_label_match_rank(box_truth, box_pred, rank)\n\n return get_num_hit(boxes_truth, boxes_pred, is_hit)", "def rank(self, current_order_by_value: Comparable, current_row_number: int) -> int:", "def rank(self):\n\n if self._rank >= 0:\n return self._rank\n\n reduced, operations = self.to_row_echelon()\n non_leading_rows = 0\n for i in range(self.rows, 0, -1):\n if not reduce(lambda x,y: x or y, reduced.row(i)):\n non_leading_rows += 1\n else:\n break\n\n self._rank = self.rows - non_leading_rows\n return self._rank", "def is_one_rank_apart(card1, card2):\n def card_value(card):\n return 'A23456789TJQK'.index(solvers.deck.card_rank(card))\n\n pos1, pos2 = card_value(card1), card_value(card2)\n diff = abs(pos1 - pos2)\n return diff in (1, 12)", "def calculate_points(card):\n for value in scores.keys():\n if value == card.value:\n card_score = scores[card.value]\n return card_score", "def find_by_rank(our_data,rank):\n for album in our_data:\n if album['number'] == str(rank):\n return album\n return None", "def __rank_from_str_to_int(rank: str) -> int:\n return int(rank) - 1", "def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)", "def stat_to_rank(stat: float) -> MatchmakingRank:\n\n if stat < 640:\n return MatchmakingRank.BRONZE\n elif stat < 1205:\n return MatchmakingRank.SILVER\n elif stat < 1525:\n return MatchmakingRank.GOLD\n elif stat < 1840:\n return MatchmakingRank.PLATINUM\n elif stat < 2325:\n return MatchmakingRank.DIAMOND\n else:\n return MatchmakingRank.MASTER", "def rank(self, value):\n i = 0\n n = len(self._tree)\n rank = 0\n count = 0\n while i < n:\n cur = self._tree[i]\n if value < cur:\n i = 2 * i + 1\n continue\n elif value > cur:\n rank += self._counts[i]\n # subtract off the right tree if exists\n nexti = 2 * i + 2\n if nexti < n:\n rank -= self._counts[nexti]\n i = nexti\n continue\n else:\n return (rank, count)\n else: # value == cur\n count = self._counts[i]\n lefti = 2 * i + 1\n if lefti < n:\n nleft = self._counts[lefti]\n count -= nleft\n rank += nleft\n righti = lefti + 1\n if righti < n:\n count -= self._counts[righti]\n return (rank, count)\n return (rank, count)", "def rank(self, value):\n i = 0\n n = len(self._tree)\n rank = 0\n count = 0\n while i < n:\n cur = self._tree[i]\n if value < cur:\n i = 2 * i + 1\n continue\n elif value > cur:\n rank += self._counts[i]\n # subtract off the right tree if exists\n nexti = 2 * i + 2\n if nexti < n:\n rank -= self._counts[nexti]\n i = nexti\n continue\n else:\n return (rank, count)\n else: # value == cur\n count = self._counts[i]\n lefti = 2 * i + 1\n if lefti < n:\n nleft = self._counts[lefti]\n count -= nleft\n rank += nleft\n righti = lefti + 1\n if righti < n:\n count -= self._counts[righti]\n return (rank, count)\n return (rank, count)", "def _get_reward(self, five_cards):\n \n return 1-self.evaluator.get_five_card_rank_percentage(self.evaluator._five(five_cards))", "def _get_local_rank_size(comm):\n this_node = platform.node()\n ranks_nodes = comm.allgather((comm.Get_rank(), this_node))\n node2rankssofar = collections.defaultdict(int)\n local_rank = None\n for (rank, node) in ranks_nodes:\n if rank == comm.Get_rank():\n local_rank = node2rankssofar[node]\n node2rankssofar[node] += 1\n assert local_rank is not None\n return local_rank, node2rankssofar[this_node]", "def test_rank(self):\n self.assertEqual(self.vectors.rank('dog.n.01', 'dog.n.01'), 1)\n self.assertEqual(self.vectors.rank('dog.n.01', 'carnivore.n.01'), 3)", "def score(self) -> int:\n card_values = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '10': 10,\n 'JACK': 10,\n 'QUEEN': 10,\n 'KING': 10,\n 'ACE': 11}\n hand_value = []\n for i in self.cards:\n hand_value.append(card_values[i.value])\n while sum(hand_value) > 21 and 11 in hand_value:\n for i, j in enumerate(hand_value):\n if j == 11:\n hand_value[i] = 1\n break\n else:\n pass\n return sum(hand_value)", "def compare_cards(p1_name, p2_name, card1, card2, ranks=['2','3','4','5','6','7','8','9','10','J','Q','K','A']):\n\n rank1, rank2 = card1[:-1], card2[:-1]\n\n if rank1 not in ranks: raise ValueError(\"Card 1 does not have a valid card value!\")\n if rank2 not in ranks: raise ValueError(\"Card 2 does not have a valid card value!\")\n\n print(p1_name+\"\\'s\", card1, \"vs.\", p2_name+\"\\'s\", card2)\n\n winner = -1\n\n if (rank1 == rank2): winner = 0\n elif (rank1 == '2' and rank2 == 'A'): winner = 1\n elif (rank1 == 'A' and rank2 == '2'): winner = 2\n else: winner = 1 if (ranks.index(rank1) > ranks.index(rank2)) else 2\n\n if (winner == 0): print(\"There Was a Tie Between\", card1, \"and\", card2)\n elif (winner == 1): print(p1_name, \"Wins This Round With a\", card1, \"Against a\", card2)\n elif (winner == 2): print(p2_name, \"Wins This Round With a\", card2, \"Against a\", card1)\n\n return winner", "def workout_rank(a, rank):\r\n # Check if workout score is empty\r\n if pd.isnull(a):\r\n return np.nan\r\n else:\r\n return int(rank)", "def find_ranking(game_id):\r\n\r\n scores = []\r\n\r\n games = Game.query.all()\r\n\r\n target_score = get_avg_score(game_id)\r\n\r\n for game in games:\r\n scores.append(get_avg_score(game.game_id))\r\n \r\n rankings = sorted(scores, key=None,reverse=True)\r\n\r\n target_ranking = rankings.index(target_score)\r\n\r\n return target_ranking + 1", "def get_rank():\n if not torch.distributed.is_available():\n return 0\n if not torch.distributed.is_initialized():\n return 0\n return torch.distributed.get_rank()", "def local_rank():\n return int(os.environ['LOCAL_RANK'])", "def has_rank(self, rank):\n for card in self.cards:\n if card.rank == rank:\n return True\n return False", "def comm_rank(self):\n return self._rcomm", "def get_hand1_wins(p1_hand, p1_rank, p1_rank_value, p2_hand, p2_rank, p2_rank_value):\n if HAND_RANKS.index(p1_rank) > HAND_RANKS.index(p2_rank):\n return 1\n elif HAND_RANKS.index(p1_rank) < HAND_RANKS.index(p2_rank):\n return 0\n\n # Ranks are equal\n if p1_rank_value > p2_rank_value:\n return 1\n elif p1_rank_value < p2_rank_value:\n return 0\n\n # Ranks and rank values are equal, go by highest card until one hand wins\n for i in range(0, 5):\n val1 = VALUES[p1_hand[i][0]]\n val2 = VALUES[p2_hand[i][0]]\n if val1 > val2:\n return 1\n elif val1 < val2:\n return 0\n\n print \"WTF\"\n return 0", "def getRank(self, steamid):\r\n if self.__contains__(steamid):\r\n return self.ranks.index(steamid) + 1\r\n return self.__len__()", "def _compare(self, other): \n if(self.rank==other.rank):\n if (self.rank == 5 and other.rank==5) or (self.rank ==3 and other.rank==3):\n maxself = max(self.ranks) \n maxother = max(other.ranks)\n if(maxself>maxother):\n if(Card.ranks[maxself]=='Ace' or Card.ranks[maxother] == 'Ace'):\n maxself1 = sorted(self.ranks,reverse=True)\n maxself1 = maxself1[1]\n maxother1 = sorted(other.ranks,reverse=True)\n maxother1 = maxother1[1]\n if(maxself1>maxother1):\n return 1\n else:\n return 0\n else:\n if(Card.ranks[maxself]=='Ace' or Card.ranks[maxother] == 'Ace'):\n maxself1 = sorted(self.ranks,reverse=True)\n maxself1 = maxself1[1]\n maxother1 = sorted(other.ranks,reverse=True)\n maxother1 = maxother1[1]\n if(maxself1<maxother1):\n return -1\n else:\n return 0\n \n if (self.rank == 4 and other.rank==4):\n maxself = max(self.ranks) \n maxother = max(other.ranks)\n if(maxself>maxother):\n return 1\n else:\n return -1\n if (self.rank ==2 and other.rank==2) or (self.rank ==0 and other.rank==0):\n newself = sorted(self.ranks,reverse=True)\n newother = sorted(other.ranks,reverse=True)\n maxsel = max(newself)\n maxoth = max(newother)\n if(maxsel>maxoth):\n return 1\n elif(maxsel<maxoth):\n return -1\n else:\n maxsel1= newself[1]\n maxoth1 = newother[1]\n if(maxsel1>maxoth1):\n return 1\n elif(maxsel1<maxoth1):\n return -1\n else:\n maxsel2= newself[2]\n maxoth2 = newother[2]\n if(maxsel2>maxoth2):\n return 1\n elif(maxsel2<maxoth2):\n return -1\n else:\n return 0\n if self.rank ==1 and other.rank==1:\n pairwali1 = {}\n pairwali2={}\n for i in range(0,3):\n if other.ranks[i] not in pairwali1:\n pairwali1[other.ranks[i]] = 1\n else:\n pairwali1[other.ranks[i]]= pairwali1[other.ranks[i]]+1\n if self.ranks[i] not in pairwali2:\n pairwali2[self.ranks[i]] = 1\n else:\n pairwali2[self.ranks[i]] = pairwali2[self.ranks[i]]+1\n t = list(pairwali1.keys())[list(pairwali1.values()).index(2)]\n r = list(pairwali2.keys())[list(pairwali2.values()).index(2)]\n if t!=r:\n if t>r:\n return -1\n elif t<r:\n return 1\n elif t==r:\n t= list(pairwali1.keys())[list(pairwali1.values()).index(1)]\n r = list(pairwali2.keys())[list(pairwali2.values()).index(1)]\n if t>r:\n return -1\n elif t<r:\n return 1\n else:\n return 0\n\n else:\n if(self.rank>other.rank):\n return 1\n else:\n return -1", "def determine_winner1(self): \r\n sorted_player_rank = self._rank()\r\n print(f\"sorted player rank: {sorted_player_rank}\")\r\n print(f\"winner is player {sorted_player_rank[0]}: with points {sorted_player_rank[0][1]}\")", "def get_maximum_rank(score):\n\tscores = [0, 300, 450, 600, 750] # ranges 0-299, 300-449, etc.\n\trank = None\n\tfor i in range(len(scores)):\n\t\tif score >= scores[i]:\n\t\t\trank = i + 1\n\n\treturn rank", "def recip_rank(recs, truth):\n good = recs['item'].isin(truth.index)\n npz, = np.nonzero(good)\n if len(npz):\n return 1.0 / (npz[0] + 1.0)\n else:\n return 0.0", "def get_rank(self, points):\n sql_command = \"SELECT * FROM points WHERE amount > ?;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [points])\n\n all = cursor.fetchall()\n cursor.close()\n connection.close()\n return len(all) + 1", "def get_rank(points: int, cutoffs: List[int]) -> int:\n rank = 0\n for i, cutoff in enumerate(cutoffs):\n if points < cutoff:\n if i == 0:\n break\n else:\n rank = i - 1\n break\n else:\n rank = RANK_COUNT - 1\n\n return rank", "def rank(self):\n return self.matrix().rank()", "def card_level_rank(level):\n try:\n level = int(level)\n except ValueError:\n raise exceptions.LevelOrRankInvalid()\n\n if level not in range(0, 13):\n raise exceptions.LevelOrRankInvalid()", "def _get_team_ranks(game_json):\n try:\n home = game_json['header']['competitions'][0]['competitors'][0]\n away = game_json['header']['competitions'][0]['competitors'][1]\n home_rank = _get_rank(home)\n away_rank = _get_rank(away)\n return home_rank, away_rank\n except KeyError:\n return '-1', '-1'\n except IndexError:\n return '-1', '-1'", "def score_int( hand ):\n m = matches(hand)\n #print( m )\n #royal_flush -- a special case of straight flush.\n if flush(hand) and straight(hand) and hand[4].rank == 14:\n return 80000 + 100*order(hand[4])\n #straight_flush\n elif flush(hand) and straight(hand):\n return 80000 + 100*order(hand[4])\n #four_of_a_kind\n elif len(m) == 2 and m[0].count == 4:\n return 70000 + 100*order(m[0].card)\n #full_house\n elif len(m) == 2 and m[0].count == 3 and m[1].count == 2:\n return 60000 + 100*order(m[0].card) + order(m[1].card)\n #flush\n elif flush(hand):\n return 50000 + 100*order(hand[4])\n #straight\n elif straight(hand):\n return 40000 + 100*order(hand[4])\n #three_of_a_kind\n elif len(m) == 3 and m[0].count == 3:\n return 30000 + 100*order(m[0].card)\n #two_pair\n elif len(m) == 3 and m[0].count == 2 and m[1].count == 2:\n return 20000 + 100*order(m[0].card) + order(m[1].card)\n #one_pair\n elif len(m) == 4 and m[0].count == 2 and m[1].count == 1:\n return 10000 + 100*order(m[0].card) + order(m[1].card)\n # Simple high card. Is this adequate? We'll know if we get ties.\n else:\n return 100*order(hand[4]) # or 100*order(m[0].card)", "def main(players=2):\n Pcard = []\n i2 = 0\n while len(Pcard) < players:\n P2 = (input(\"Player \"+str(len(Pcard)+1)+\" -- input your card: \"))\n Pcard.append(P2.split())\n i2 += 1\n hand_rank = []\n print(\"==============Result==============\")\n for i in xrange(players):\n hand_rank.append(check_hand_rank(Pcard[i]))\n if hand_rank[i][0] == 0:\n print(\"Player \"+str(i+1)+\" have: High card\")\n elif hand_rank[i][0] == 1:\n print(\"Player \"+str(i+1)+\" have: One pair\")\n elif hand_rank[i][0] == 2:\n print(\"Player \"+str(i+1)+\" have: Two pair\")\n elif hand_rank[i][0] == 3:\n print(\"Player \"+str(i+1)+\" have: Three of a kind\")\n elif hand_rank[i][0] == 4:\n print(\"Player \"+str(i+1)+\" have: Straight\")\n elif hand_rank[i][0] == 5:\n print(\"Player \"+str(i+1)+\" have: Flush\")\n elif hand_rank[i][0] == 6:\n print(\"Player \"+str(i+1)+\" have: Full house\")\n elif hand_rank[i][0] == 7:\n print(\"Player \"+str(i+1)+\" have: Four of a kind\")\n elif hand_rank[i][0] == 8:\n print(\"Player \"+str(i+1)+\" have: Straight flush\")\n elif hand_rank[i][0] == 9:\n print(\"Player \"+str(i+1)+\" have: Royal straight flush\")\n if len(str(winner(hand_rank)))/2 >= 2:\n return \"-- >\" + 'Winner are players: ' +str(winner(hand_rank)) + \" < --\"\n return \"-- > The Winner is player: \" + str(winner(hand_rank))+ \" < --\"" ]
[ "0.7572013", "0.7296982", "0.7268319", "0.7267528", "0.7215522", "0.7210751", "0.7199964", "0.7196077", "0.718358", "0.71687156", "0.71248996", "0.71213835", "0.7119498", "0.7068738", "0.7030133", "0.7003381", "0.6978734", "0.6972577", "0.6972085", "0.6957598", "0.6882679", "0.68358856", "0.6834885", "0.6830628", "0.6823577", "0.6789338", "0.6772569", "0.6762043", "0.67570454", "0.67570454", "0.67570454", "0.67570454", "0.67570454", "0.6739433", "0.6706308", "0.66992646", "0.669383", "0.6662264", "0.66506165", "0.6635815", "0.6617322", "0.6588751", "0.6573818", "0.65412456", "0.65153784", "0.6511987", "0.6478524", "0.6477177", "0.64556205", "0.6449089", "0.6367824", "0.63656294", "0.6362168", "0.63561624", "0.634402", "0.6313995", "0.6310559", "0.63001204", "0.62993205", "0.6298189", "0.6291261", "0.629014", "0.62805456", "0.6277351", "0.62555933", "0.6245387", "0.6214429", "0.6187854", "0.6168419", "0.6153777", "0.61532134", "0.6149563", "0.6146421", "0.6126468", "0.612556", "0.612556", "0.612364", "0.6123091", "0.61180955", "0.6106501", "0.6090012", "0.60723484", "0.60704476", "0.6068651", "0.60647124", "0.6064316", "0.6062297", "0.60618836", "0.60585654", "0.60431695", "0.60331684", "0.6025594", "0.60220563", "0.6019526", "0.60156125", "0.59910864", "0.5987931", "0.59675586", "0.5959298", "0.5955613" ]
0.6873474
21
Finds the suit of the card and returns suit
def suit(self): suit = self.n // 13 return suit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def suit(self):\n return self._suit", "def suit(self):\n return self._suit", "def get_suit(self):\r\n return self.suit", "def get_num_suit(self):\n if self.suit == \"Diamonds\":\n return 0\n if self.suit == \"Clubs\":\n return 1\n if self.suit == \"Hearts\":\n return 2\n if self.suit == \"Spades\":\n return 3\n return -1", "def getCard(self, rank, suit):\r\n for card in self.cards:\r\n if card.rank == rank and card.suit == suit:\r\n return card\r\n return None", "def get_card(self, suit, face):\n for card in self.deck:\n if card.suit == suit and card.value == face:\n return card", "def getSuit(self):\r\n return self.suit", "def getSuit(self):\n return self.suit", "def getSuit(self):\n return self._suit", "def card_value (card):\r\n value = card[0]\r\n if value in ['Jack','Queen','King']:\r\n return 10\r\n if value in [2,3,4,5,6,7,8,9,10]:\r\n return value\r\n else:\r\n raise 'CardValueError'", "def get_card(self):\n if self.card_suit in self.RED_SUITS:\n color = 'red'\n else:\n color = 'blue'\n\n return colored(self.card_name, 'yellow') + colored(self.card_suit,\n color)", "def test_suit(self):\n card = self._card\n self.assertEqual(card.suit, self._suit)", "def get_card_val(card):\n\n if card == '1':\n return 1\n if card == '2':\n return 2\n if card == '3':\n return 3\n else:\n return 4", "def test_is_suit_integer(self):\n self.assertIsInstance(cardutils.Card(10,1).suit, int)", "def card_factory(rank,suit):\n pass", "def _translate_card(self):\n if isinstance(self.suit, int):\n\n if self.suit == 0:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of spades\".format(name)\n\n elif self.suit == 1:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of hearts\".format(name)\n\n elif self.suit == 2:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of diamonds\".format(name)\n\n elif self.suit == 3:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of clubs\".format(name)\n\n else:\n raise ValueError(\"The integer passed to the method must be 0, 1, 2, 3\")\n\n else:\n raise TypeError(\"The argument for the method must be an integer\")\n\n return self.name, self.values", "def card(n):\r\n assert type(n) == int and n > 0 and n <= 13, \"Bad card n\"\r\n specials = {1: 'A', 11: 'J', 12: 'Q', 13: 'K'}\r\n return specials.get(n, str(n))", "def test_card_suit(mock_card):\n assert mock_card.suit == Suit.SPADE", "def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card", "def same_color(suit):\n\tif suit == 's':\n\t\treturn 'c'\n\telif suit == 'c':\n\t\treturn 's'\n\telif suit == 'd':\n\t\treturn 'h'\n\telif suit == 'h':\n\t\treturn 'd'", "def get_card_str(self, card):\n card_str = str(card)\n if card == 11:\n card_str = \"Jack\"\n if card == 12:\n card_str = \"Queen\"\n if card == 13:\n card_str = \"King\"\n if card == 1:\n card_str = \"Ace\"\n \n return card_str", "def blackjackValue(self):\n NUMBERRANKS = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n FACECARDS = [\"jack\", \"queen\", \"king\"]\n ACE = [\"ace\"]\n if self.rank in NUMBERRANKS:\n return int(self.rank)\n elif self.rank in FACECARDS:\n return 10\n elif self.rank in ACE:\n return 11", "def read_card():\n suit_is_valid = False\n while not suit_is_valid:\n suit_input = input('Suit: ').upper()\n for suit in Suit:\n if suit_input == suit.name:\n card_suit = suit\n suit_is_valid = True\n\n rank_is_valid = False\n while not rank_is_valid:\n rank_input = input('Rank: ').upper()\n for rank in Rank:\n if rank_input == rank.name:\n card_rank = rank\n rank_is_valid = True\n return Card(card_suit, card_rank)", "def pick_winner(self):\r\n self.convert_face_cards_to_numbers()\r\n main_suit = self.cards_int[0][0] # suit that dominates this round\r\n winner_index = 0 # by default 1st player wins until we find a bigger card in same suit\r\n winner_card_value = self.cards_int[0][1]\r\n for index, card in enumerate(self.cards_int[1:]):\r\n if main_suit == card[0]:\r\n if winner_card_value < card[1]:\r\n winner_index = index+1\r\n winner_card_value = card[1]\r\n\r\n return winner_index", "def index(self, card):\n return self.deck.index(card)", "def deal_cards(deck, card): \n player = deck[card]\n return player", "def card_string(card):\n article = \"\"\n if card.face == 'Ace':\n article = \"an \"\n elif card.face in ['Jack', 'Queen', 'King']:\n article = \"a \"\n return article + card.face + \" of \" + card.suit", "def get_card (self, card):\n\t\treturn self._card", "def CARD_SUITS() -> tuple:\n return \"Diamonds\", \"Hearts\", \"Clubs\", \"Spades\"", "def get_card(self):\n\n card = random.randint(1,13)\n return card", "def play_card(self, rnd: PlayerRound) -> int:\n # we can check if we are playing the correct game\n assert rnd.jass_type == JASS_HEARTS\n\n # get the valid cards to play\n valid_cards = rnd.get_valid_cards()\n\n # lets divide our cards into heart and other cards\n my_heart_cards = valid_cards * color_masks[HEARTS, :]\n my_other_cards = valid_cards - my_heart_cards\n\n if rnd.nr_cards_in_trick == 0:\n # we are the first player, so we can select what to play\n # lets select some random non-heart card if we have any (not that this is necessarily\n # a good strategy :-)\n if my_other_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_other_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n else:\n # if we have to give a card, lets try to give a heart card\n if my_heart_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_heart_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n\n self._logger.debug('Played card: {}'.format(card_strings[card]))\n return card", "def score_on_hands(cards_on_hand):\r\n score = 0\r\n straightCount = 0\r\n max_card = 0\r\n suite_dict = {}\r\n face_dict = {}\r\n transfer_dict = {'A':1,'J':11,'Q':12,'K':13}\r\n card_face = []\r\n '''Circulate the player's hand, build a list of points and a suit dict'''\r\n for index in range(len(cards_on_hand)):\r\n if str(cards_on_hand[index])[1] in transfer_dict:\r\n card_face.append(transfer_dict.get(str(cards_on_hand[index])[1]))\r\n elif str(cards_on_hand[index])[1] == '1':\r\n card_face.append(10)\r\n else:\r\n card_face.append(int(str(cards_on_hand[index])[1]))\r\n suite_dict[str(cards_on_hand[index])[0]] = 1\r\n '''Because 1 can be treated as 1 or 14, so if 1 exists, add 14 to the end of the list to calculate flush'''\r\n if 1 in card_face:\r\n card_face.append(14)\r\n\r\n '''Check straight, if it is straight, straight should be 4'''\r\n for face in range(len(card_face)-1):\r\n if card_face[face] +1 == card_face[face+1] :\r\n straightCount +=1\r\n\r\n '''Detect the number of cards of the same number'''\r\n for face in card_face:\r\n\r\n if face not in face_dict:\r\n face_dict[face] = 1\r\n else:\r\n face_dict[face] += 1\r\n\r\n '''Store the maximum number of points'''\r\n max_card = card_face[len(card_face)-1]\r\n\r\n '''Calculate player score'''\r\n if straightCount == 4:\r\n score+= 8\r\n\r\n if len(suite_dict) == 1:\r\n score+= 9\r\n\r\n for values in face_dict.values():\r\n if values == 2:\r\n score += 3\r\n elif values == 3:\r\n score += 7\r\n elif values == 4:\r\n score += 11\r\n\r\n return (score, max_card)", "def CallSuitLogic(hand): #FIXME\r\n\r\n call = 0\r\n suit = 1\r\n\r\n return [call, suit]", "def card_key(self):\n card_rank = self.rank\n if card_rank > 9:\n card_rank = Card.rank_short[card_rank]\n card_image_name = str(card_rank) + Card.suits_short[self.suit]\n return str(card_image_name)", "def player_hand_contains_suit(self, user_id, suit):\n print \"player_hand_contains_suit(self, user_id, suit) \"\n print \" Checking if player hand contains expected suit: {}\".format(self.bot.leading_suit)\n for user_object in self.bot.current_game.players:\n if user_object.id == user_id:\n card_value = None\n card_suit = None\n for card_obj in user_object.cards_in_hand:\n if len(card_obj) == 2:\n card_value = str(card_obj[0])\n card_suit = card_obj[1]\n else:\n card_value = str(card_obj)\n card_suit = None\n if \"d_\" not in card_value and \"t_\" not in card_value and \"vm_\" not in card_value:\n if card_suit == suit:\n return True\n return False", "def take_card(self, card_color=None):\r\n Card = self.deck.take_card(card_color)\r\n return Card.value if Card.color == Color.BLACK else Card.value * -1", "def blackjack_result(cards):\n sum = 0\n a_cards = 0\n dictionary = {\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n 'T': 10,\n 'J': 10,\n 'Q': 10,\n 'K': 10,\n }\n for card in cards.split():\n if card in dictionary:\n sum = sum + dictionary[card]\n elif card == 'A':\n a_cards = a_cards + 1\n\n if a_cards > 0:\n for i in range(a_cards):\n if a_cards > 1:\n sum = sum + 1\n a_cards = a_cards - 1\n else:\n if sum + 11 < 22:\n sum = sum + 11\n else:\n sum = sum + 1\n\n return sum", "def card(self):\n return self.cdb.name_to_card[self.card_name]", "def __int__(self):\n return Card.ranks.index(self.rank) + Card.suits.index(self.suit) * len(Card.ranks)", "def count(self, card_suit_or_value):\n # Being passed a whole card is our fast path\n if isinstance(card_suit_or_value, Card):\n cmp = _HAND_CMP.get()\n if cmp == HandComparison.Exact:\n return super().count(card_suit_or_value)\n elif cmp == HandComparison.Values:\n card_suit_or_value = card_suit_or_value.value\n elif cmp == HandComparison.Suits:\n card_suit_or_value = card_suit_or_value.suit\n else:\n raise ValueError(\"unable to compare with {}\".format(cmp))\n\n # Convert int or str to enum types transparently\n if isinstance(card_suit_or_value, int):\n try:\n card_suit_or_value = _from_enum(Value, card_suit_or_value)\n except ValueError:\n pass\n elif isinstance(card_suit_or_value, str):\n try:\n card_suit_or_value = _from_enum(Suit, card_suit_or_value)\n except ValueError:\n try:\n card_suit_or_value = _from_enum(Value, card_suit_or_value)\n except ValueError:\n pass\n\n # If we now have a searchable type, search for it\n if isinstance(card_suit_or_value, Value):\n return sum(c.value == card_suit_or_value for c in self)\n elif isinstance(card_suit_or_value, Suit):\n return sum(c.suit == card_suit_or_value for c in self)\n return 0", "def card(phenny, input):\n if not input.group(2):\n phenny.say(input.nick + 'Perhaps you meant \".card Storm Crow\"?')\n else:\n card_name = input.group(2).strip().lower().title()\n if card_name in nick.nicknames:\n card_name = nick.nicknames[card_name]\n card_text = get_card(card_name)\n if card_text:\n phenny.reply(card_text)\n else:\n phenny.reply(\"I could not find a card by that name.\")", "def aces_high(card):\n if isinstance(card, Value):\n if card == Value.Ace:\n return 14\n return card.value\n\n if card.joker:\n return 15\n if card.value == Value.Ace:\n return 14\n return card.value.value", "def get_hand(deck):\n random.shuffle(deck)\n return deck[0:5]", "def score(self) -> int:\n card_values = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '10': 10,\n 'JACK': 10,\n 'QUEEN': 10,\n 'KING': 10,\n 'ACE': 11}\n hand_value = []\n for i in self.cards:\n hand_value.append(card_values[i.value])\n while sum(hand_value) > 21 and 11 in hand_value:\n for i, j in enumerate(hand_value):\n if j == 11:\n hand_value[i] = 1\n break\n else:\n pass\n return sum(hand_value)", "def index(self, card_suit_or_value, start=0, stop=sys.maxsize):\n # Being passed a whole card is our fast path\n if isinstance(card_suit_or_value, Card):\n cmp = _HAND_CMP.get()\n if cmp == HandComparison.Exact:\n return super().index(card_suit_or_value, start, stop)\n elif cmp == HandComparison.Values:\n card_suit_or_value = card_suit_or_value.value\n elif cmp == HandComparison.Suits:\n card_suit_or_value = card_suit_or_value.suit\n else:\n raise ValueError(\"unable to compare with {}\".format(cmp))\n\n # Convert int or str to enum types transparently\n if isinstance(card_suit_or_value, int):\n try:\n card_suit_or_value = _from_enum(Value, card_suit_or_value)\n except ValueError:\n pass\n elif isinstance(card_suit_or_value, str):\n try:\n card_suit_or_value = _from_enum(Suit, card_suit_or_value)\n except ValueError:\n try:\n card_suit_or_value = _from_enum(Value, card_suit_or_value)\n except ValueError:\n pass\n\n # If we now have a searchable type, search for it\n if isinstance(card_suit_or_value, Value):\n for i, c in enumerate(self):\n if start <= i < stop and c.value == card_suit_or_value:\n return i\n elif isinstance(card_suit_or_value, Suit):\n for i, c in enumerate(self):\n if start <= i < stop and c.suit == card_suit_or_value:\n return i\n raise ValueError(f\"{card_suit_or_value!r} is not in hand\")", "def rank(self):\n \n if self.__rank:\n return self.__rank\n flush = True\n straight = False\n last = None\n merged = {}\n for c in self.__cards:\n if last:\n if flush and c.suit != last.suit:\n flush = False\n last = c\n if c.value in merged:\n merged[c.value] = merged[c.value] + 1\n else:\n merged[c.value] = 1\n if (len(merged)) == 5:\n # All unique cards, check for a straight\n if self.__cards[0].value - self.__cards[4].value == 4:\n straight = True\n if self.__cards[4].value == 2 and self.__cards[1].value == 5 and self.__cards[0].value == 14:\n straight = True\n # Set the value of the ace to 1 and resort so hand comparisons work correctly\n self.__cards[0].value = 1\n self.__cards = sorted(self.__cards, reverse=True)\n if straight and flush:\n if self.__cards[0].value == 14:\n self.__rank = Hand.ROYAL_FLUSH\n else:\n self.__rank = Hand.STRAIGHT_FLUSH\n elif flush:\n self.__rank = Hand.FLUSH\n elif straight:\n self.__rank = Hand.STRAIGHT\n else:\n self.__rank = Hand.HIGH_CARD\n self.__values = [c.value for c in self.__cards]\n else:\n multiples = [m for m in sorted(merged.items(), key = operator.itemgetter(1), reverse = True) if m[1] > 1]\n if len(multiples) > 1:\n if multiples[0][1] == multiples[1][1]:\n self.__rank = Hand.TWO_PAIRS\n else:\n self.__rank = Hand.FULL_HOUSE \n elif multiples:\n if multiples[0][1] > 3:\n self.__rank = Hand.FOUR_OF_A_KIND\n elif multiples[0][1] == 3:\n self.__rank = Hand.THREE_OF_A_KIND\n else:\n self.__rank = Hand.ONE_PAIR\n mvalues = sorted([m[0] for m in multiples], reverse=True)\n self.__values = mvalues + [c.value for c in self.__cards if c.value not in mvalues]\n if not self.__rank:\n self.__rank = Hand.HIGH_CARD\n\n return self.__rank", "def setSuit(self, arg):\n self.suit = arg", "def deal_card(self):\n return self._deal(1)[0]", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def card_to_string(card):\n rankStrings = [\"ace\",\"two\",\"three\",\"four\",\"five\",\"six\",\"seven\",\n \"eight\",\"nine\",\"ten\",\"jack\",\"queen\",\"king\"]\n suitStrings = [\"spades\", \"hearts\", \"diamonds\",\"clubs\"]\n return rankStrings[card[0]] + \" of \" + suitStrings[card[1]]", "def score_hand(hand):\n print(hand)\n score = 0\n ace = False\n for card in hand:\n if card == 1 and not ace:\n ace = True\n score += 11\n if score > 21 and ace:\n score -= 10\n else:\n score += card\n return score", "def get_small_joker_value(deck):\n \n return max(deck) - 1", "def is_match(self, card):\n\t\treturn self.suit == card.suit or self.value == card.value", "def get_card(self, card):\n\n\t\tself.add_card_to_grps(card)\n\n\t\tself.grps = sorted(self.grps, key = lambda x: -len(x))\n\n\n\t\t# check if # of cards forming sets is more than 5; if yes, then break the set to allow computer to form runs\n\t\tnum_set_cards = 0\n\t\tpos = -1\n\t\tfor i in range(len(self.grps)):\n\t\t\tif len(self.grps[i]) > 1 and self.grps[i][0] == self.grps[i][1]:\n\t\t\t\tnum_set_cards += len(self.grps[i])\n\t\t\t\tpos = i\n\n\t\tif num_set_cards > 5:\n\t\t\tcard = self.grps[pos][-1]\n\t\t\tself.grps[pos].remove(card)\n\t\t\tlogger.info(f\"In computer.py/get_card: computer returned {card} to break too many set, computer = {self}\")\n\t\t\treturn card\n\n\n\t\t# if # of sets is fine, then remove a card from the group with least size\n\t\tcard = self.grps[-1][-1]\n\n\t\t\n\t\tif len(self.grps[-1]) == 1:\n\t\t\tself.grps.remove(self.grps[-1])\n\t\telse:\n\t\t\tself.grps[-1].remove(self.grps[-1][-1])\n\n\t\tlogger.info(f\"In computer.py/get_card: computer returned {card}, computer = {self}\")\n\n\t\treturn card", "def resolve_card(board, eng_type, scot_type, card, role, parameter, truce = False):\n\n if role == 'ENGLAND':\n which_side = eng_type\n elif role == 'SCOTLAND':\n which_side = scot_type\n\n\n if card == '1':\n movement_execution(board, which_side, role, int(card), truce)\n elif card == '2':\n movement_execution(board, which_side, role, int(card), truce)\n elif card == '3':\n movement_execution(board, which_side, role, int(card), truce)\n\n else:\n\n if role == 'ENGLAND' or not scottish_king.run_king(board, eng_type, scot_type):\n \n \n \n if card == 'SEA':\n \n if play_pass(which_side) == 'play':\n sea_execution(board, which_side, role)\n \n \n elif card == 'HER':\n \n if play_pass(which_side) == 'play':\n her_execution(board, which_side, role, eng_type, scot_type)\n \n \n elif card == 'VIC':\n if play_pass(which_side) == 'play':\n vic_execution(board, which_side, role, parameter)\n \n \n elif card == 'PIL':\n \n if play_pass(which_side) == 'play':\n pil_execution(board, which_side, role, parameter)\n \n \n elif card == 'TRU':\n \n if play_pass(which_side) == 'play':\n return True", "def testCard(self):\n # test1\n cardObj1 = Card('A','d')\n self.assertEquals(1,cardObj1.get_rank())\n self.assertEquals('d',cardObj1.get_suit())\n # test2\n cardObj2 = Card('J','d')\n self.assertEquals(10,cardObj2.get_rank())\n # test3\n cardObj3 = Card(5,'d')\n self.assertEquals(5,cardObj3.get_rank())", "def get_small_joker_value(deck: List[int]) -> int:\n\n big_joker = deck[0]\n small_joker = None\n for number in deck[1:]:\n if number > big_joker:\n small_joker = big_joker\n big_joker = number\n elif small_joker is None or small_joker \\\n < number:\n small_joker = number\n\n return small_joker", "def DetermineWinner(trickcards):\r\n\r\n if trickcards[0] <= 6: #Trump Led\r\n winner = trickcards.index(min(trickcards))\r\n elif trickcards[0] > 6 and trickcards[0] < 12: #Shortsuit led\r\n winner = trickcards.index(min(trickcards))\r\n elif trickcards[0] > 11 and trickcards[0] < 18: #Offsuit1 led\r\n winner = trickcards[0]\r\n for i in range(3):\r\n if trickcards[i+1] < winner and (trickcards[i+1] < 7 or trickcards[i+1] > 11):\r\n winner = trickcards[i+1]\r\n winner = trickcards.index(winner)\r\n else: #Offsuit 2 led\r\n winner = trickcards[0]\r\n for i in range(3):\r\n if trickcards[i+1] < winner and (trickcards[i+1] < 7 or trickcards[i+1] > 17):\r\n winner = trickcards[i+1]\r\n winner = trickcards.index(winner)\r\n\r\n return winner", "def __init__(self, suit, rank):\n self.suit = suit.lower()\n self.rank = rank.lower()", "def choose_card(playable_cards):\r\n\r\n playing = playable_cards[0]\r\n print('\\n choosing \\n', playing)\r\n\r\n return playing # for now\r", "def get_card(self):\n return self.card", "def random_card(computer_hand):\n \n if len(computer_hand) != 1:\n random_index = random.randint(0,len(computer_hand)-1)\n else:\n random_index = 0\n card_to_play = computer_hand[random_index]\n print('computer hand: ', computer_hand)\n print('computer plays ', card_to_play)\n return computer_hand[random_index]", "def card_type():\n while True: #Run until a suitable input is passed.\n question = input(\"Savings(S) or Current(C) >>> \")\n if question == \"S\": #if savings account\n return \"savings\"\n elif question == \"C\": #if current account\n return \"current\"", "def check_hand_rank(hand):\n card_rank = ['--23456789TJQKA'.index(n) for n,h in hand]\n card_rank.sort()\n card_rank.reverse()\n #for royal straight flush\n card_rank_rsf = ['HDSC'.index(h) for n,h in hand]\n card_rank_rsf.sort()\n card_rank_rsf.reverse()\n if card_rank == [14,5,4,3,2]:\n card_rank = [5,4,3,2,1]\n if royal_straight_flush(hand):\n return 9,card_rank_rsf[0]\n elif straight_flush(hand):\n return 8,max(card_rank)\n elif four_of_a_kind(hand):\n return 7,max(card_rank)\n elif full_house(hand):\n tong = 0\n kuu = 0\n s = [n for n,h in hand]\n for i in xrange(len(s)):\n if(s.count(s[i])==3):\n tong = s[i]\n else:\n kuu = s[i]\n return 6,int(tong),int(kuu)\n elif flush(hand):\n return 5,max(card_rank)\n elif straight(hand):\n return 4,max(card_rank)\n elif three_of_a_kind(hand):\n ld = 0\n a = 0\n for i in xrange(0,3):\n if card_rank.count(card_rank[i]) > 1 :\n ld = (card_rank[i])\n else:\n a = card_rank[i]\n return 3,ld,a\n elif two_pair(hand):\n ld = []\n a = 0\n for i in xrange(0,3):\n if card_rank.count(card_rank[i]) >=2:\n ld.append(card_rank[i])\n card_rank.pop(i)\n else:\n a = card_rank[i]\n ld.sort(reverse=True)\n return 2,ld[0],ld[1],a\n elif one_pair(hand):\n ld = 0\n a = []\n for i in xrange(len(card_rank)):\n if card_rank.count(card_rank[i]) > 1 :\n ld = (card_rank[i])\n else:\n a.append(card_rank[i])\n a.sort(reverse = True)\n return 1,ld,a[0],a[1],a[2]\n else:\n return 0,max(card_rank)", "def __init__(self, suit, rank):\n \n if (suit in SUITS) and (rank in RANKS):\n self.suit = suit\n self.rank = rank\n else:\n self.suit = None\n self.rank = None\n \n global outcome\n outcome = INVALID_CARD, suit, rank", "def get_card_at_top_index(deck):\n \n small_joker_value = get_small_joker_value(deck)\n if deck[0] == get_big_joker_value(deck):\n return deck[get_small_joker_value(deck)]\n else:\n return deck[deck[0]]", "def play(self,suit):\r\n\r\n if suit == \"D\":\r\n \r\n if self._headD.data == 0:\r\n \r\n self.checkAndRemove()\r\n \r\n else:\r\n temp = self._headD\r\n \r\n #delete head and next element is now the head\r\n self._headD = self._headD._next\r\n return temp.data\r\n \r\n elif suit == \"C\":\r\n \r\n if self._headC.data == 0:\r\n \r\n self.checkAndRemove()\r\n \r\n \r\n else:\r\n temp = self._headC\r\n \r\n #delete head and next element is now the head\r\n self._headC = self._headC._next\r\n \r\n return temp.data\r\n \r\n elif suit == \"H\":\r\n if self._headH.data == 0:\r\n \r\n self.checkAndRemove()\r\n \r\n else:\r\n temp = self._headH\r\n \r\n #delete head and next element is now the head\r\n self._headH = self._headH._next\r\n \r\n return temp.data\r\n \r\n elif suit == \"S\":\r\n \r\n if self._headS.data == 0:\r\n \r\n self.checkAndRemove()\r\n \r\n else:\r\n temp = self._headS\r\n \r\n #delete head and next element is now the head\r\n self._headS = self._headS._next\r\n \r\n return temp.data\r\n else:\r\n pass", "def _score_hand(hand):\n\n score = 0\n ace = False\n\n for next_card in hand:\n\n # get the value of the card\n card_value = next_card[0]\n\n # if it is an ace and we do not hold one, the value is 11 instead of 1\n if card_value == 1 and not ace:\n ace = True\n card_value = 11\n\n # add up the value to the score\n score += card_value\n\n # if we would bust, check if there is an ace and substract\n # 10 from the value (11 - 1). Also, set the ace variable to False.\n if score > 21 and ace:\n score -= 10\n ace = False\n\n return score", "def in_suit(list, entry):\n text = list.replace(\"-\", \"\")\n if (\"-\" not in entry) and (entry.isdigit() is True) and (text.isdigit() is True):\n list1 = list.split(\"-\")\n x = int(list1[0])\n suit = set()\n suit.add(x)\n while x < int(list1[len(list1) - 1]):\n x += 1\n suit.add(x)\n suit.add(int(list1[len(list1) - 1]))\n if int(entry) in suit:\n return True\n else:\n return False\n return False", "def determine_what_is_in_a_hand(hand):\n\tface_organization = [0,0,0,0,0,0,0,0,0,0,0,0,0] # deuce, three, four, five, six, seven, eight, nine, ten, jack, queen, king, ace\n\tsuit_organization = [0,0,0,0] #spades, diamonds, hearts, clubs\n\tfor face, suit in hand:\n\t\ti = 0\n\t\twhile i < len(FACES):\n\t\t\tif face.startswith(FACES[i]):\n\t\t\t\tface_organization[i] += 1\n\t\t\ti += 1\n\t\tj = 0\n\t\twhile j < len(SUITS):\n\t\t\tif suit.startswith(SUITS[j]):\n\t\t\t\tsuit_organization[j] += 1\n\t\t\tj += 1\n\thand_organization = face_organization + suit_organization\n\treturn hand_organization", "def get_card(self, name):\n for list in self.my_lists:\n for card in list.list_cards(card_filter='all'):\n if name in card.name:\n return card\n return 'None'", "def change_color_for_id(suit):\n for color in colors.keys():\n if color == suit:\n suit = colors[suit]\n return suit", "def lowest_defense(attack, hand, dank):\n\n low = RANK_NUM[attack.rank]\n # Can only defend in dank suit.\n if attack.suit == dank:\n for card in hand:\n rank = RANK_NUM[card.rank]\n if card.suit == dank and rank > low:\n return card\n\n # Can defend with any dank or higher in same suit.\n else:\n for card in hand:\n rank = RANK_NUM[card.rank]\n if (card.suit == attack.suit and rank > low) or card.suit == dank:\n return card\n\n return None", "def get_card(self, name):\n for card in self.cards:\n if card.name == name:\n return card\n\n return None", "def get_suits(hand, board):\n suits = {}\n for card in hand + board:\n if card[1] in suits:\n suits[card[1]] += 1\n else:\n suits[card[1]] = 1\n return suits", "def CountSuits(hand):\r\n numtrump = 0\r\n numss = 0\r\n numos1 = 0\r\n numos2 = 0\r\n\r\n for card in hand:\r\n if card < 7:\r\n numtrump += 1\r\n elif card < 12:\r\n numss += 1\r\n elif card < 18:\r\n numos1 += 1\r\n else:\r\n numos2 += 1\r\n \r\n numsuits = 0\r\n if numtrump != 0:\r\n numsuits += 1\r\n if numss != 0:\r\n numsuits += 1\r\n if numos1 != 0:\r\n numsuits += 1\r\n if numos2 != 0:\r\n numsuits += 1\r\n return [numtrump,numss,numos1,numos2,numsuits]", "def get_poker_hand(cards):\n cards = sorted(cards, key=aces_high, reverse=True)\n cards_low_ace = sorted(cards, key=lambda card: card.value, reverse=True)\n\n # Any jokers will have sorted to the front\n if cards and cards[0].joker:\n raise ValueError(\"Cannot calculate poker hand including jokers\")\n\n if len(cards) > 5:\n return max(map(get_poker_hand, itertools.combinations(cards, 5)))\n\n cvalues = collections.Counter(c.value for c in cards)\n suits = set(c.suit for c in cards)\n of_a_kind_card, of_a_kind = cvalues.most_common(1)[0]\n if len(cvalues) >= 2:\n second_pair_card, second_pair = cvalues.most_common(2)[-1]\n else:\n second_pair_card, second_pair = None, 0\n high_card = cards[0].value\n values = [c.value.value for c in cards]\n is_straight = len(cards) == 5 and all(\n i[0].value == i[1] for i in zip(cards, range(cards[0].value, -5, -1))\n )\n is_ace_low_straight = len(cards) == 5 and all(\n i[0].value == i[1]\n for i in zip(cards_low_ace, range(cards_low_ace[0].value, -5, -1))\n )\n\n if len(suits) == 1 and is_straight:\n return PokerHand.StraightFlush, aces_high(high_card)\n if len(suits) == 1 and is_ace_low_straight:\n return PokerHand.StraightFlush, cards_low_ace[0].value\n if of_a_kind == 4:\n return PokerHand.FourOfAKind, aces_high(of_a_kind_card)\n if of_a_kind == 3 and second_pair == 2:\n return PokerHand.FullHouse, aces_high(of_a_kind_card)\n if len(suits) == 1 and len(cards) == 5:\n return PokerHand.Flush, aces_high(high_card)\n if is_straight:\n return PokerHand.Straight, aces_high(high_card)\n if is_ace_low_straight:\n return PokerHand.Straight, cards_low_ace[0].value\n if of_a_kind == 3:\n return (PokerHand.ThreeOfAKind, aces_high(of_a_kind_card)) + (\n (aces_high(second_pair_card),) if second_pair_card else ()\n )\n if of_a_kind == 2 and second_pair == 2:\n return (PokerHand.TwoPair,) + tuple(\n map(\n aces_high,\n sorted(\n filter(None, (of_a_kind_card, second_pair_card)),\n reverse=True,\n key=aces_high,\n ),\n )\n )\n if of_a_kind == 2:\n return (PokerHand.Pair, aces_high(of_a_kind_card)) + (\n (aces_high(second_pair_card),) if second_pair_card else ()\n )\n\n return (PokerHand.HighCard,) + tuple(\n sorted((aces_high(c) for c in cvalues), reverse=True)\n )", "def __init__(self, rank=\"\", suit=\"\"):\n self.suit = suit\n self.rank = rank\n self.face_up = False", "def __str__(self):\n #Create dictionary for face cards\n translate = {11:'Jack', 12:'Queen', 13:'King', 14: 'Ace'}\n r = self._rank\n #check for face card\n if r in [11, 12, 13, 14]:\n myrank = translate[r]\n else:\n myrank = str(r)\n return myrank + \" of \" + self._suit", "def dealer_matching(self):\n if len([card for card in self.dealer_hand if card[1] == '8']) > 0:\n self.discard_pile = [card for card in self.dealer_hand if card[1] == '8'][0]\n self.dealer_hand.remove(self.discard_pile)\n dealer_suits = [card[0] for card in self.dealer_hand]\n self.new_suit = max(set(dealer_suits), key=dealer_suits.count)\n print(\"\\nNew suit is :\", self.new_suit)\n return 1\n if self.new_suit != '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.new_suit:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n self.new_suit = ''\n return 1\n else:\n return 0\n if self.new_suit == '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.discard_pile[0] or card[1] == self.discard_pile[1]:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n return 1\n else:\n return 0", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n player = random.choice(cards)\n return player", "def hand_rank(hand):\n ranks = card_ranks(hand) # ranks is a list of all the ranks. A sorted list of ranks is returned\n if straight(hand) and flush(hand): # Straight flush\n return (8, max(ranks)) # 2 3 4 5 6 (8, 6) 6 7 8 9 T (8, 10)\n elif kind(4, ranks): # Here kind(4, ranks) is used to return a bolean value\n # kind(4, ranks) returns the int when true, returns false if not true (used as boolean)\n return (7, kind(4, ranks), kind(1, ranks)) # 9 9 9 9 3 (7, 9, 3) 9 9 9 9 5 (7, 9, 5)\n elif kind(3, ranks) and kind(2, ranks): # full house\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand): # flush\n return (5, ranks)\n elif straight(ranks): # straight\n return (4, max(ranks))\n elif kind(3, ranks): # 3 of a kind\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks): # 2 pair\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks): # kind\n return (1, kind(2, ranks), ranks)\n else: # high card\n return (0, ranks)", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card", "def get_card_by_name(self,name):\n try:\n card_id = self._category2id['name'][name].values()\n except KeyError:\n print \"No card by given name! [{}]\".format(name)\n return None\n\n if len(card_id) > 1:\n print \"Multiple cards match name, returning first...\"\n\n return self._id2database[card_id[0]]", "def convert_card_names(hand):\r\n\r\n cards = []\r\n for i, color in enumerate('CDSH'):\r\n for card in hand:\r\n if card.get_suit() == i and card.get_value() < 11:\r\n cards.append('{}{}'.format(card.get_value(), color))\r\n if card.get_suit() == i and card.get_value() == 11:\r\n cards.append('{}{}'.format('J', color))\r\n if card.get_suit() == i and card.get_value() == 12:\r\n cards.append('{}{}'.format('Q', color))\r\n if card.get_suit() == i and card.get_value() == 13:\r\n cards.append('{}{}'.format('K', color))\r\n if card.get_suit() == i and card.get_value() == 14:\r\n cards.append('{}{}'.format('A', color))\r\n return cards", "def in_suit1(list, entry):\n text = list.replace(\"-\", \"\")\n if (\"-\" not in entry) and (entry.isdigit() is True) and (text.isdigit() is True):\n list1 = list.split(\"-\")\n x = int(list1[0])\n suit = set()\n suit.add(x)\n while x < int(list1[len(list1) - 1]):\n x += 1\n suit.add(x)\n suit.add(int(list1[len(list1) - 1]))\n if int(entry) in suit:\n return True\n else:\n return False\n return False", "def take_comp_turn(self, deck, pile):\n matches = [card for card in self.hand if card.is_match(pile.top_card() != 0)]\n if len(matches) > 0: # can play\n choice = random.randrange(len(matches))\n self.play_card(matches[choice-1], pile)\n if matches[choice - 1].kind == 'wild' or matches[choice - 1].kind == 'wild4':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n matches[choice - 1].color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n print(str(self.name) + \" played \" + str(matches[choice-1]))\n\n else: # comp can't play\n # check if deck is empty -- if so, reset it\n if deck.is_empty():\n deck.reset_deck(pile)\n # draw a new card from the deck\n newcard = self.draw_card(deck)\n print(\"The computer drew: \" + str(newcard))\n if newcard.is_match(pile.top_card()): # can be played\n self.play_card(newcard, pile)\n if newcard.kind == 'wild':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n newcard.color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n else: # still can't play\n print(\"Sorry, you still can't play.\")\n print(str(self.name) + \" played \" + str(newcard))\n return", "def player_discard(self, inpt):\n \n if inpt.isdigit() == False:\n return 0\n if int(inpt) > len(self.player_hand):\n print(\"\\nNumber of card entered is greater than number of cards\")\n print(\"Please try again \\n\")\n return 0\n if self.player_hand[int(inpt)-1][1] == '8':\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n while self.new_suit not in ['h','d','s','c']:\n self.new_suit = input(\"Please enter new suit: h, d, s, c\\n\")\n print(\"\\nNew suit is: \", self.new_suit)\n return 1\n if self.new_suit != '':\n if self.player_hand[int(inpt)-1][0] == self.new_suit:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n return 1\n else:\n print(\"\\nYou need to match new suit\")\n print(\"Please try again\\n\")\n return 0\n if self.new_suit == '':\n if self.player_hand[int(inpt)-1][0] == self.discard_pile[0] or \\\n self.player_hand[int(inpt)-1][1] == self.discard_pile[1]:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n return 1\n else:\n print(\"\\nYou need to match discard pile card suit or rank\")\n print(\"Please try again\\n\")\n return 0", "def poker(hands):\n try:\n print \"The winning hand had: \" + \\\n pranks[hand_rank(allmax(hands,key=hand_rank)[0])[0]]\n except ValueError:\n print \"The winning hand had: \" + \\\n pranks[hand_rank(allmax(hands,key=hand_rank))[0]]\n return allmax(hands, key=hand_rank)", "def __init__(self, suit: str, rank: str) -> None:\n self.suit = suit\n self.rank = rank\n self.value = Card.values[rank]\n self.hidden = False", "def find(self, suitList, rank):\r\n flag = False\r\n \r\n #first index of the element of suitNode\r\n cursor = suitList\r\n \r\n #loop the through the list found in suitNode, ex) loop through list of H.\r\n while cursor != None:\r\n \r\n #if rank is found in the list, break out of loop\r\n if cursor.data == rank:\r\n flag = True\r\n break\r\n\r\n cursor = cursor._next\r\n\r\n return flag", "def __attackDamage(self, attack, suit=0):\n if suit:\n for dmg in attack[SUIT_HP_COL]:\n if (dmg > 0):\n return dmg\n return 0\n else:\n for dmg in attack[TOON_HP_COL]:\n if (dmg > 0):\n return dmg\n return 0", "def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card", "def best_hand(cards):\n\n\tvalues = [card[0:-1] for card in cards]\n\tsuits = [card[-1] for card in cards]\n\n\t# Dictionary for converting card strings to numbers\n\tcardNums = {\"A\":14, \"K\":13, \"Q\":12, \"J\":11, \"10\":10, \"9\":9, \"8\":8, \\\n\t\t\t\"7\":7, \"6\":6, \"5\":5, \"4\":4, \"3\":3, \"2\":2}\n\n\t# Convert card values to real numbers\n\tunsortedValues = [cardNums[value] for value in values]\n\t# unsorted values is necessary for retrieving card + suit\n\t# later\n\tvalues = unsortedValues [:] # make a copy of list\n\tvalues.sort() \t\t# sort values \n\tvalues.reverse()\t# largest # first \n\n\t### Check for possible hands\n\n\n\t# prepare variables for holding potential hands\n\tfourkind = []\n\tflush = [] \t# stores the suit of the flush\n\tstraight = [] \t# stores the highest number of straight \n\tthreekind = [] # stores the best possible 3-of-a-kind \n\tpairs = [] \t# stores one number for each pair\n\n\t# prepare counters for tracking possible hands\n\tstraightCounter = 1 # always have a straight of 1\n\t\n\t# Check for flush\n\tfor suit in suits:\n\t\tif suits.count(suit) >= 5:\n\t\t\tflush = suit\t\n\t\t\tbreak\n\n\t# check for straight, 4-kind, 3-kind, pairs\n\tfor i in range(6): # Don't process the last card\n\n\t\t# Check for straight if still possible\n\t\tif len(straight) == 0:\n\t\t\tprint \"values = \" + str(values)\n\t\t\tstraightSeq = [values.count(values[i]-j) >= 1 for j in range(1,5)]\t\n\t\t\tprint \"straightSeq = \" + str(straightSeq)\n\t\t\tif straightSeq.count(True) == 4:\n\t\t\t\tstraight.append(values[i])\t\n\n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif values[i] == 5:\n\t\t\t\t# check for 4-2-3 first\n\t\t\t\tstraightSeq = [values.count(values[i]-j) >= 1 for j in range(1,4)]\t\n\t\t\t\t# check for Ace\n\t\t\t\tif straightSeq.count(True) == 3 and \\\n\t\t\t\t\tvalues.count(cardNums[\"A\"]) >= 1:\n\t\t\t\t\tstraight.append(values[i])\t\n\n\t\t# Check for 4-kind\n\t\tif len(fourkind) == 0 and values.count(values[i]) == 4:\n\t\t\tfourkind = [values[i]]\n\t\t# Check for 3-kind but don't add same one twice \n\t\telif values.count(values[i]) == 3 and \\\n\t\t\tthreekind.count(values[i]) == 0:\t\n\t\t\tif len(threekind) == 0:\n\t\t\t\tthreekind.append(values[i])\n\t\t\telse: # add to pairs\n\t\t\t\tpairs.append(values[i])\n\t\t# Check for pairs, don't add same pair twice\n\t\telif values.count(values[i]) == 2 and \\\n\t\t\tpairs.count(values[i]) == 0: \n\t\t\tpairs.append(values[i])\n\n\t\n\n\t### Determine hand strength based on found hands\n\t# Since values are separated from suits, have to iterate\n\t# through unsorted values to find correct index of each card\n\n\tbesthand = []\n\n\t# Straight flush\n\tif len(straight) != 0 and len(flush) != 0:\n\t\tfor i in range(5): \n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif i == 4 and straight[0] == cardNums[\"5\"]:\n\t\t\t\tcardIndex = unsortedValues.index(cardNums[\"A\"])\n\t\t\telse:\n\t\t\t\tcardIndex = unsortedValues.index(straight[0] - i)\n\n\t\t\tcard = cards[cardIndex] \n\t\t\tif card[-1] == flush:\n\t\t\t\tbesthand.append(card)\n\t\t\telse:\n\t\t\t\tbreak\n\t\tif len(besthand) == 5:\n\t\t\treturn (besthand, Ranks.StraightFlush)\n\t\telse: # not a straight flush, so re-init besthand\n\t\t\tbesthand = []\n\n\t# Four of a kind\n\tif len(fourkind) != 0:\n\t\tcardValue = convNumToCard(fourkind[0])\n\t\t# insert the 4 out of 5 cards b/c suit is known\n\t\tbesthand = [cardValue + \"S\", cardValue + \"H\", cardValue + \"C\", cardValue + \"D\"]\n\t\t# add the highest value card that isn't 4-of-a-kind\n\t\tfor i in range(7):\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != fourkind[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tbreak\n\t\treturn (besthand, Ranks.FourKind)\n\t# Full House\t\n\telif len(threekind) != 0 and len(pairs) != 0:\n\t\tfor i in range(7): # add 3-kind to besthand\n\t\t\tif unsortedValues[i] == threekind[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 3:\n\t\t\t\t\tbreak\n\t\t\n\t\tfor i in range(7): # add pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.FullHouse)\n\t# Flush\n\telif len(flush) != 0:\n\t\t# iterate through sorted cards, add that card if its\n\t\t# suit matches the flush suit\n\t\tfor i in range(7):\n\t\t\t# find card in original unsorted list\n\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\tcard = cards[cardIndex] \n\t\t\tif card[-1] == flush[0]:\n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.Flush)\n\t# Straight\n\telif len(straight) != 0:\n\n\t\tfor i in range(5): \n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif i == 4 and straight[0] == cardNums[\"5\"]:\n\t\t\t\tcardIndex = unsortedValues.index(cardNums[\"A\"])\n\t\t\telse:\n\t\t\t\tcardIndex = unsortedValues.index(straight[0] - i)\n\t\t\tcard = cards[cardIndex] \n\t\t\tbesthand.append(card)\n\t\treturn (besthand, Ranks.Straight)\n\t# Three of a kind\n\telif len(threekind) != 0:\n\t\tfor i in range(7): # add 3-kind to besthand\n\t\t\tif unsortedValues[i] == threekind[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 3:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add two high cards to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != threekind[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.ThreeKind)\n\t# Two pair\n\telif len(pairs) == 2:\n\t\tfor i in range(7): # add 1st pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 2:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add 2nd pair to besthand\n\t\t\tif unsortedValues[i] == pairs[1]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 4:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add high card to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != pairs[0] and values[i] != pairs[1]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.TwoPair)\n\t# Pair\n\telif len(pairs) == 1:\n\t\tfor i in range(7): # add pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 2:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add high card to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != pairs[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.Pair)\n\t# High card\n\telse:\n\t\tfor i in range(7):\n\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\tcard = cards[cardIndex] \n\t\t\tbesthand.append(card)\n\t\t\tif len(besthand) == 5:\n\t\t\t\treturn (besthand, Ranks.HighCard)", "def get_card_info(card):\n result = ((card-1)/13 + 1, card - ((card-1)/13)*13)\n return result", "def print_cards(cards):\r\n string = ''\r\n for c in cards:\r\n suit = c[0]\r\n if suit == 1:\r\n suit = \"\\u2665\" # heart\r\n elif suit == 2:\r\n suit = \"\\u2660\" # Spade\r\n elif suit == 3:\r\n suit = \"\\u2666\" # Diamond\r\n else:\r\n suit = \"\\u2663\" # club\r\n\r\n num = c[1]\r\n if num == 11:\r\n num = 'J'\r\n elif num == 12:\r\n num = 'Q'\r\n elif num == 13:\r\n num = 'K'\r\n else:\r\n num = str(num)\r\n\r\n string = string + num + suit + ' '\r\n return string", "def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))", "def action_peek_cards(self) -> int:\n for card in self.house.hand.cards:\n if not card.is_open:\n return int(card.value)", "def card(self) -> Optional[Card]:\n\n if all([c != Card.EMPTY for c in self]):\n raise ValueError(\"Expected player to have only one card, but found two\")\n\n for c in self:\n if c != Card.EMPTY:\n return c\n\n return None\n # raise ValueError(\"Expected player to have one card, but found empty hand\")", "def _find_joker(hand, joker):\n\n if joker in hand:\n return True\n else:\n return False" ]
[ "0.7840655", "0.7840655", "0.76949203", "0.74996036", "0.71775246", "0.7047003", "0.7040735", "0.69748276", "0.68587106", "0.6670335", "0.6659063", "0.6624975", "0.6460083", "0.6283954", "0.6283012", "0.6268685", "0.6239651", "0.6238502", "0.6206122", "0.6204729", "0.6155964", "0.6118332", "0.6048156", "0.6022777", "0.5976422", "0.59710103", "0.59587276", "0.5951771", "0.5935404", "0.59203064", "0.58534944", "0.58326584", "0.58181125", "0.5809168", "0.58040434", "0.5797346", "0.57855195", "0.57853216", "0.57679534", "0.57613015", "0.57590693", "0.5753494", "0.5744723", "0.5736277", "0.57191485", "0.5717325", "0.57167536", "0.5671481", "0.5671438", "0.566372", "0.56533027", "0.564711", "0.56419206", "0.5633825", "0.5630194", "0.562208", "0.56203634", "0.56202984", "0.5618466", "0.56061184", "0.5605521", "0.56022197", "0.56011844", "0.55999154", "0.5596817", "0.559402", "0.55936056", "0.55933946", "0.55910325", "0.55878913", "0.5586514", "0.5585567", "0.55773044", "0.55760527", "0.5572682", "0.5568587", "0.55675095", "0.55639654", "0.5562067", "0.556065", "0.55588514", "0.5550066", "0.5547997", "0.5541014", "0.5537215", "0.55313605", "0.5520579", "0.5517404", "0.551216", "0.54861164", "0.5476784", "0.5456985", "0.5454666", "0.5450554", "0.5450552", "0.5446651", "0.5446171", "0.54453945", "0.5444622", "0.5438057" ]
0.78034705
2
Returns the point value of the card based on the point_sysm
def points(self): if self.rank() >= 9: return self.point_sysm[self.rank()] else: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_points(self):\n return self.card_points", "def points(self):\r\n\t\tif self.rank() in self.point_sysm:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn (self.rank() + 2)", "def get_value(self) -> float:\n return self.points[0, 0]", "def get_value(self):\n return complex(*self.points[0, :2])", "def get_point(self):\n return self.point", "def get_point(self, params):\n spm = self.spm\n cpoint = self.current_point\n x = params.get('X',cpoint[0])\n y = params.get('Y',cpoint[1])\n z = params.get('Z',cpoint[2])\n a = cpoint[3]\n b = cpoint[4]\n if 'E' in params:\n a = params.get('E',cpoint[3])\n elif 'A' in params:\n a = params.get('A',cpoint[3])\n elif 'B' in params:\n b = params.get('B',cpoint[4])\n point = [x,y,z,a,b]\n return point", "def fd_pasym_head(self,pos):\n\t\tvalue = 0.0\n\t\ty = self.y\n\t\t#print('point:%d' % pos)\n\t\tfor j in range(-pos, 0):\n\t\t\t#print( \"\ty[%d] - y[%d] * c[%d][%d] \" % (pos+j, pos, pos, j) )\n\t\t\tvalue = value + ( y[pos+j] - y[pos] ) * self.coef_asym[pos][j]\n\t\tfor j in range(1,self.order - pos+1):\n\t\t\t#print( \"\ty[%d] - y[%d] * c[%d][%d] \" % (pos+j, pos, pos, j) )\n\t\t\tvalue = value + ( y[pos+j] - y[pos] ) * self.coef_asym[pos][j]\n\t\treturn value", "def getPoint(self):\n return self.point", "def getPoint(self):\n return self.point", "def getCoordinate(self, value, cardinal_point):\n d, m, s = value\n\n if cardinal_point in ['S', 'W']:\n d = -d\n m = -m\n s = -s\n\n return d + m / 60.0 + s / 3600.0", "def _get_visual_position(self, point: int) -> float:\n return point / self._param[\"n_v\"] + np.random.uniform() / \\\n self._param[\"n_v\"]", "def calculate_points(card):\n for value in scores.keys():\n if value == card.value:\n card_score = scores[card.value]\n return card_score", "def get_value(self, point: Point) -> FieldState:\n return self.arr[point.y][point.x]", "def get_setpoint(self):\n value = self.synth.cbox.get_dacs()[self.id_]\n value = value / self.conf['PSICONV']\n log.debug(\"Current setpoint on regulator %d = %f\",\n self.id_, value)\n return value", "def fd_pasym_tail(self,pos):\n\t\tvalue = 0.0\n\t\ty = self.y\n\t\tk = self.nx-1-pos\n\t\t#print( 'point:%d, eqauls to point %d' % (pos, k))\n\t\tfor j in range(-k,0):\n\t\t\t#print( '\ty[%d] - y[%d] * c[%d][%d] ' %( pos , pos-j, pos, j) )\n\t\t\tvalue = value + ( y[pos] - y[pos-j] ) * self.coef_asym[pos][j]\n\t\tfor j in range(1,self.order-k+1):\n\t\t\t#print( '\ty[%d] - y[%d] * c[%d][%d] ' %( pos , pos-j, pos, j) )\n\t\t\tvalue = value + ( y[pos] - y[pos-j] ) * self.coef_asym[pos][j]\n\t\treturn value", "def getPoint(self):\n return self._point", "def point(self):\n bfsize = card(self.basefield)\n one = self.basefield.one\n t = self.basefield.zero\n if len(self) == 2 or (self.a1 == self.a2 == self.a3 == self.basefield.zero):\n while self.basefield.Legendre(t) != 1:\n s = self.basefield.createElement(bigrandom.randrange(bfsize))\n t = self.cubic(s)\n if not t:\n return [s, t]\n t = self.basefield.sqrt(t)\n r = bigrandom.randrange(2)\n if r:\n return [s, -t]\n return [s, t]\n elif self.ch != 2 and self.ch != 3:\n sform = self.simple()\n while sform.basefield.Legendre(t) != 1:\n s = sform.basefield.createElement(bigrandom.randrange(bfsize))\n t = (s**3+sform.a*s+sform.b)\n x = (s-3*self.b2) // (36*one)\n y = (sform.basefield.sqrt(t) // (108*one)-self.a1*x-self.a3)//(2*one)\n return [x, y]\n elif self.ch == 3:\n while sform.basefield.Legendre(t) != 1:\n s = self.basefield.createElement(bigrandom.randrange(bfsize))\n t = (s**3+self.a2*s**2+self.a4*s+self.a6)\n return [s, self.basefield.sqrt(t)]\n else:\n raise NotImplementedError(\"This is not implemented.\")", "def getP2(self):\n return self.points[1]", "def get_points(self):\n\t\treturn self.points", "def get_points(self):\r\n return self.points", "def value(self, card):\n return self.valores[self.deck.index(card)]", "def get_distance(self, point, cpoint):\n distance = 0.0\n for m, s in zip(point, cpoint):\n distance += pow(m - s, 2)\n distance = math.sqrt(distance)\n return distance", "def calcul_point_plan_projection(cls,cx,cy,cz,spx,spy,axe_x,axe_y):\n projX=gs.Vector3(spx*axe_x.x,spx*axe_x.y,spx*axe_x.z)\n projY=gs.Vector3(spy*axe_y.x,spy*axe_y.y,spy*axe_y.z)\n point=gs.Vector3(projX+projY)+gs.Vector3(cx,cy,cz)\n return point", "def findpc(self):\n u = -(-can.C.len() + self.P.len() + can.R)/self.V.len()\n if u >= 0:\n return self.P + self.V.scale(u), u\n else:\n u = (can.C.len() - self.P.len() + can.R)/self.V.len()\n return self.P + self.V.scale(u), u", "def get_point_on(self, s):\n\n x = self.n1.x * (1 - s) + self.n2.x * s\n y = self.n1.y * (1 - s) + self.n2.y * s\n z = self.n1.z * (1 - s) + self.n2.z * s\n\n return [x, y, z]", "def get_com(points):\n com = vpy.vec(0, 0, 0)\n for obj in points:\n com += obj.pos\n return com/len(points)", "def inner_point(self, point) -> Vec:\n return self.pos - point", "def apoint(rpoint):\r\n tempy = gv[\"fixedLL\"][1] + gv[\"globalscale\"]*rpoint[1]*(gv[\"fixedUR\"][1]-gv[\"fixedLL\"][1])\r\n if gv[\"localxscale\"] != -1:\r\n tempx = gv[\"fixedLL\"][0] + gv[\"localxscale\"]*gv[\"globalscale\"]*rpoint[0]*(gv[\"fixedUR\"][0]-gv[\"fixedLL\"][0])\r\n else:\r\n tempx = gv[\"fixedLL\"][0] + gv[\"globalscale\"]*rpoint[0]*(gv[\"fixedUR\"][0]-gv[\"fixedLL\"][0])\r\n if tempx - gv[\"fixedUR\"][0] > 0 and tempx - gv[\"fixedUR\"][0] < 1e-7:\r\n tempx = gv[\"fixedUR\"][0]\r\n if tempx > gv[\"fixedUR\"][0]:\r\n print ( \"problem x value : \",tempx, \" max x allowed : \",gv[\"fixedUR\"][0])\r\n return [tempx,tempy]", "def transformPos(self, point):\n return point / self.scale - self.offsetToCenter()", "def s(self, position: Vector) -> float:\n return self.local_coordinates(position)[0]", "def pointPotential(x,y,q,posx,posy): \n\tk=8.98e9; q=1.6e-19 \n PointP = (k*q)/sqrt((x-posx)**2+(y-posy)**2)\n return PointP", "def getP1(self):\n return self.points[0]", "def apply(self, point):\n m = numpy.dot(self.matrix, numpy.array([point[0], point[1], 1.0]).transpose())\n return pygame.Vector2(m[0], m[1])", "def _get_sensor_position(self, cam): \r\n sensor_transform = self._sensors[cam].get_transform()\r\n \r\n return sensor_transform", "def distanceFrom(self, point = (-1, -1)):\n if (point[0] == -1 or point[1] == -1):\n point = np.array(self.image.size()) / 2\n return spsd.euclidean(point, [self.x, self.y])", "def point(self):\n return self.x, self.y, self.z", "def closest_point(self, point, maxdist=0.0, return_param=False):\n return self.xyz", "def _point(self):\n raise NotImplementedError", "def _world_point(self, point_3d):\n return self.obj.matrix_world @ point_3d", "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def getPoint(self):\n return Point(*self.position)", "def get(self, point):\n\t\treturn self._grid.get(point)", "def calc_dist(self, points): \n dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]\n dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]\n dist = np.hypot(dist_x,dist_y) \n if len(dist) > 0:\n return min(dist) \n else: \n return 0", "def Pcoord(modName=\"CylR\"):\n\t\tmodelNode = slicer.util.getNode(modName) # Read the node (module)\n\t\tsr = modelNode.GetPolyData() # module turn polygons\n\t\tpxyz = [0, 0, 0]\n\t\tNumP = sr.GetNumberOfPoints() # The number of points in the polygon\n\t\tfor i in range(NumP // 2): # circulate: i=NumP//2\n\t\t\tsr.GetPoint(i, pxyz) # Get the point coordinates in turn\n\t\t\t# becomes a matrix\n\t\t\tif i == 0:\n\t\t\t\tPxyz = np.array([pxyz])\n\t\t\telse:\n\t\t\t\tPxyz = np.append(Pxyz, np.array([pxyz]), axis=0)\n\t\taxisMed0 = (Pxyz[0] + Pxyz[NumP // 4]) / 2\n\t\taxisMed1 = (Pxyz[1] + Pxyz[1 + NumP // 4]) / 2\n\t\tdimeter = np.linalg.norm(Pxyz[0] - Pxyz[NumP // 4])\n\t\treturn np.array([axisMed0, axisMed1]), np.around(dimeter), Pxyz", "def get_points(self):\r\n return self.nx*self.ny*self.nz", "def _one_sided_p_value(t, df):\n return scipy.stats.t.sf(t, df=df)", "def get_position(self, t0):\n my_pos_x=np.random.uniform(-20, 20)\n my_pos_y=np.random.uniform(-20, 20)\n r=np.array([my_pos_x, my_pos_y])\n x_y=np.zeros(shape=(self.no_planets-1, 2))\n tol=1e-5\n diff=np.zeros(self.no_planets-1)\n for k in range(self.no_planets-1):\n r1=np.linalg.norm(r)\n r2=np.linalg.norm(r-self.positionFunction(t0)[:, k])\n r3=np.linalg.norm(r-self.positionFunction(t0)[:, k+1])\n x1=0\n y1=0\n x2=self.positionFunction(t0)[0,k]\n y2=self.positionFunction(t0)[1,k]\n x3=self.positionFunction(t0)[0,k+1]\n y3=self.positionFunction(t0)[1, k+1]\n x,y,difference=self.triangulate_analytic(x1,y1,r1,x2,y2,r2,x3,y3,r3)\n x_y[k, 0]=x\n x_y[k, 1]=y\n diff[k]=difference\n if (diff > tol).any():\n print diff.max()\n print \"Oh no, one failed :(\"\n sys.exit(1)\n print \"My pos x:\", my_pos_x\n print \"My pos y:\", my_pos_y\n #return x1, y1, r1, x2, y2, r2, x3, y3, r3", "def get_points(self, guess, drawn_card, prev_card):\n points_owed = 0\n\n if guess.lower() == \"higher\":\n if drawn_card >= prev_card:\n points_owed = 100\n else:\n points_owed = -75\n\n elif guess.lower() == \"lower\":\n if drawn_card <= prev_card:\n points_owed = 100\n else:\n points_owed = -75\n\n return points_owed", "def get_points(self):\n\t\treturn self._points", "def param_to_point(self, param):\n return self.p1 + param * (self.p2 - self.p1)", "def tValueForPoint(self, point):\n if self.segmentType == \"curve\":\n on1 = self.previousOnCurve\n off1 = self.points[0].coordinates\n off2 = self.points[1].coordinates\n on2 = self.points[2].coordinates\n return _tValueForPointOnCubicCurve(point, (on1, off1, off2, on2))\n elif self.segmentType == \"line\":\n return _tValueForPointOnLine(point, (self.previousOnCurve, self.points[0].coordinates))\n elif self.segmentType == \"qcurve\":\n raise NotImplementedError\n else:\n raise NotImplementedError", "def M0(self):\n return sum(ps.M0 for ps in self.pointsources)", "def compute_cardinal_points(self, in_ro, out_ro, out_rd):\r\n tf = -out_ro.x / out_rd.x\r\n tp = (in_ro.x - out_ro.x) / out_rd.x\r\n return -(out_ro + out_rd * tf).z, -(out_ro + out_rd * tp).z", "def calculate_points(self):\n points = 0\n for power in self.stats['powers']:\n points += self.stats['powers'][power]\n return points", "def project(self, point):\n return np.round(project(self.camera.P, point)).astype(int)", "def distanceFrom(self, point = (-1, -1)):\n if (point[0] == -1 or point[1] == -1 and len(self)):\n point = self[0].image.size()\n\n return spsd.cdist(self.coordinates(), [point])[:,0]", "def reflectivity(self, point):\n return self._r", "def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card", "def getPoints(self,currPt,xform):\n if self.isAbs:\n # Absolute values.\n newPts = []\n for pt in self.points:\n x,y = xform.transformPoint(pt[0],\n pt[1])\n newPts.append(Point(x,y))\n\n return newPts\n else:\n # Relative points, offset with the currPt\n pts = []\n for pt in self.points:\n sx,sy = xform.scalePoint(pt[0],\n pt[1])\n x = sx + currPt[0]\n y = sy + currPt[1]\n pts.append(Point(x,y))\n\n return pts", "def get_points(self):\n try:\n return self.current_3D_points\n except:\n print('no such current_image')", "def get_points(self, guess, current_card, next_card):\n\n # (AH)\n if guess.lower() == \"h\" and next_card > current_card:\n points = 100\n elif guess.lower() == \"l\" and next_card < current_card:\n points = 100\n else:\n points = -75\n\n # (AH) points will be added to the total score in Director class.\n return points", "def fd_psym(self, pos):\n\t\tvalue = 0.0\n\t\ty = self.y\n\t\tfor k in range(1,self.N+1):\n\t\t\tvalue = value + (y[pos+k] - y[pos-k]) * self.coef_sym[k-1]\n\t\treturn value", "def return_zeropoint():\n return 22.5", "def fit_plane_to_point_cloud(pc: np.ndarray) -> Tuple[Any, Any, Any, Any]:\n center = pc.sum(axis=0) / pc.shape[0]\n u, s, vh = np.linalg.svd(pc - center)\n\n # Get the unitary normal vector\n u_norm = vh[2, :]\n d = -np.dot(u_norm, center)\n a, b, c = u_norm\n return a, b, c, d", "def point(self) -> Point:\n return self._point", "def get_pos(self, frame):\n frame = self.perspective_shift(frame)\n \n puck_mask = self.color_mask(frame, self.color_green, thresh=15)\n striker_mask = self.color_mask(frame, self.color_orange, thresh=25, blur=5)\n \n puck_loc, _ = self.find_centroids(puck_mask)\n striker_locs, _ = self.find_centroids(striker_mask, 2)\n \n p_pos = self.abs_to_meter(puck_loc[0])\n # cases: (pos,pos), (pos,None), (None,None)\n if striker_locs[0] is not None:\n pos_1 = self.abs_to_meter(striker_locs[0])\n pos_2 = self.abs_to_meter(striker_locs[1])\n s1_pos = pos_1 if pos_1[1]<0 else pos_2\n s2_pos = pos_2 if pos_1[1]<0 else pos_1\n else:\n s1_pos, s2_pos = None, None \n \n return [p_pos, s1_pos, s2_pos]", "def calculate_points(hand): \r\n hand_value = 0\r\n ace_count = 0 \r\n \r\n #Finds value of non-Ace cards, and counts number of Aces.\r\n for card in hand:\r\n if card[0] == 'Ace':\r\n ace_count += 1\r\n else:\r\n # Calls card_value function to evaluate the card.\r\n hand_value += card_value(card) \r\n \r\n #Ace card present\r\n if ace_count > 0:\r\n return ace_hand_value(ace_count, hand_value)\r\n \r\n #Implied \"if ace_count == 0:\"\r\n return hand_value", "def get_value(character):\n\n point = -1\n if character >= '1' and character <= '9':\n point = int(character)\n elif character in ['x', '/']:\n point = Constants.MAXIMUM_POINT\n elif character == '-':\n point = Constants.MINIMUM_POINT\n else:\n raise ValueError()\n return point", "def pointPotential(x,y,q,posx,posy):\n k = 8.99e9\n V = (k * q) / (sqrt(x**2 + (y - sqrt((posx**2 + posy**2)))**2))\n return V", "def __getElementFromPairs(self, point):\n return self.maze[point[0]][point[1]]", "def get_point(self):\n return self._x, self._y", "def point(self):\n return shapely.geometry.Point(self._x[0], self._x[1])", "def get_projection_point(self, point, plane, test=False):\n return point_on_plane_projection(point, plane, test=test)", "def _p_value(self):\n pval = chi2.sf(self.chi_square, self.degrees_of_freedom)\n\n return pval", "def __getitem__(self,point):\n point=point.normalize(self.size)\n return self.terrain[point.y][point.x]", "def get_center_of_mass_allies(self,obs):", "def point_to_param(self, pt):\n r = self.p2 - self.p1\n return (pt - self.p1).dot(r) / r.square()", "def get_Es_point(self, source_point, target_point):\n normalize_by = np.bitwise_and(\n self.overlap_mask, self.target_patch_masks[target_point]\n ).sum()\n return (\n np.linalg.norm(np.array(source_point) - np.array(target_point))\n + np.linalg.norm(np.array(target_point) - np.array(source_point)) / normalize_by\n )", "def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())", "def twoCardReturnPoints(self, valueLs):\n colSum = 0\n colSum += valueLs[0]\n colSum += valueLs[1]\n if valueLs[0] == 1 and valueLs[1] == 1: # two Aces\n colSum = 12\n elif (valueLs[0] == 1 or valueLs[1] == 1): # one Ace\n colSum += 10\n # count actual points\n if colSum == 21: # black jack \n points = 10\n else:\n points = self.countPoints(colSum)\n return points", "def obtain_points(self):\n # Swapaxes makes the output a column rather than a row\n return np.swapaxes(np.array([np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateX\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateY\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateZ\"][\" data\"][:, :, :])]), 0, 1)", "def get_pressure_setpoint(self) -> float:\n\n return self.send(self.cmd.GET_VACUUM_SET)", "def get_2d_point(point_3d_homogeneous, projection_matrix):\r\n\r\n temp = np.matmul(projection_matrix, point_3d_homogeneous)\r\n\r\n return cv.convertPointsFromHomogeneous(np.array([temp], np.float))", "def getPoint(self, xyz):\n return Point( (xyz[0] + 0.5) * self.resolution\n , (xyz[1] + 0.5) * self.resolution\n , (xyz[2] + 0.5) * self.resolution\n )", "def point_to_number(self, point: Sequence[float]) -> complex:\n\n x, y = self.point_to_coords(point)\n return complex(x, y)", "def point_to_parameter(self, pt):\n uv = ShapeAnalysis_Surface(self.surface()).ValueOfUV(\n gp_Pnt(pt[0], pt[1], pt[2]), 1e-9\n )\n return np.array(uv.Coord())", "def value(self,pt,eta,mode):\n\n self._eta_range= get_eta_key(eta)+'_2012B'\n self._pt_range= get_pt_key(pt)\n\n if mode == '0' :\n return self._map['DoubleMu17Mu8_Mu8_Tight'][self._eta_range][self._pt_range]['data']['efficiency'] \n elif mode == '+1' :\n return self._map['DoubleMu17Mu8_Mu8_Tight'][self._eta_range][self._pt_range]['data']['err_hi']\n elif mode == '-1' : \n return self._map['DoubleMu17Mu8_Mu8_Tight'][self._eta_range][self._pt_range]['data']['err_low']\n else: \n print 'ERROR: wrong \\'mode\\' specified: try \\'0\\',\\'+1\\' or \\'-1\\'' \n return 0", "def get_xyz(self, xyz):\n if cm.mag(xyz) < self.get_actual_inner_boundary():\n val = np.array([np.NaN, np.NaN, np.NaN])\n else:\n points = self.__get_points_object__([xyz])\n val = self.__get_data_at_points__(points)[0]\n\n # print (val)\n return val", "def value(self,pt,eta,mode):\n \n self._eta_range= get_eta_key(eta)+'_2012B'\n self._pt_range= get_pt_key(pt)\n\n if mode == '0' :\n return self._map['DoubleMu17Mu8_Mu17_Tight'][self._eta_range][self._pt_range]['data']['efficiency'] \n elif mode == '+1' :\n return self._map['DoubleMu17Mu8_Mu17_Tight'][self._eta_range][self._pt_range]['data']['err_hi']\n elif mode == '-1' : \n return self._map['DoubleMu17Mu8_Mu17_Tight'][self._eta_range][self._pt_range]['data']['err_low']\n else: \n print 'ERROR: wrong \\'mode\\' specified: try \\'0\\',\\'+1\\' or \\'-1\\''\n return 0", "def _get_plunger_position(self, position):\n try:\n value = self.positions[position]\n if isinstance(value, (int, float, complex)):\n return value\n else:\n raise RuntimeError(\n 'Plunger position \"{}\" not yet calibrated'.format(\n position))\n except KeyError:\n raise RuntimeError(\n 'Plunger position \"{}\" does not exist'.format(\n position))", "def normal(self,points):\n ez=np.array([[0,0,1]])\n v=((points-self.pos()*ez)*self.C-ez)\n return (v/np.linalg.norm(v,axis=1)[:,np.newaxis])#*np.sign(self.C)", "def point(self, uv):\n pt = self.surface().Value(uv[0], uv[1])\n return geom_utils.gp_to_numpy(pt)", "def get_points(self):\n return self._points", "def get_points(self):\n return self._points", "def point(k, steer):\r\n\tglobal translation\r\n\tdirection, sens = translation[steer]\r\n\tfront = (sens+1)+int(direction==\"y\")\r\n\tif front != k[\"front\"]:\r\n\t\tk[\"front\"] = front # Change le sens\r\n\t\tk[\"c\"] = k[\"graphism\"][front] # Met à jour le caractère\r\n\t\treturn True", "def pcc_pos(self, row1, row2):\n mean1 = np.mean(row1)\n mean2 = np.mean(row2)\n\n a = 0\n x = 0\n y = 0\n for n1, n2 in zip(row1, row2):\n a += (n1 - mean1) * (n2 - mean2)\n x += (n1 - mean1) ** 2\n y += (n2 - mean2) ** 2\n \n if a == 0:\n return 0\n else:\n return a / sqrt(x * y)", "def rapoint(rpoint):\r\n return [rpoint[0]*gv[\"globalscale\"]*(gv[\"fixedUR\"][0]-gv[\"fixedLL\"][0]),\r\n rpoint[1]*gv[\"globalscale\"]*(gv[\"fixedUR\"][1]-gv[\"fixedLL\"][1])]", "def coordinate_point_to_coordinate(self, point):\n return self.gen(self._point_to_ray[point])", "def GetPoint2(self):\n ...", "def GetPoint2(self):\n ..." ]
[ "0.62925595", "0.62118644", "0.61739796", "0.6122231", "0.60565126", "0.6052252", "0.59305984", "0.5907347", "0.5907347", "0.58484674", "0.583888", "0.5818497", "0.57965267", "0.57405937", "0.57035714", "0.56290007", "0.5627985", "0.56179416", "0.5547793", "0.55453503", "0.553879", "0.55328524", "0.55281186", "0.5486932", "0.5485947", "0.5482049", "0.5465262", "0.5463044", "0.54521215", "0.5449991", "0.54411495", "0.5435687", "0.54342544", "0.5415536", "0.5392747", "0.5390073", "0.5382654", "0.53799367", "0.53796506", "0.5379058", "0.53686863", "0.53550637", "0.5353308", "0.53496855", "0.53450704", "0.53378874", "0.53217024", "0.5321258", "0.53183687", "0.53169596", "0.5313616", "0.53071594", "0.53037536", "0.5296533", "0.5295469", "0.528248", "0.52792215", "0.52727574", "0.52726626", "0.5268858", "0.52560544", "0.52511936", "0.52508515", "0.5250291", "0.5249501", "0.5244459", "0.5243168", "0.5233364", "0.5232746", "0.5227314", "0.52244806", "0.5223694", "0.52230585", "0.52196133", "0.5213492", "0.52043813", "0.5183978", "0.5158926", "0.51566213", "0.51522017", "0.51455176", "0.5144891", "0.514364", "0.51406896", "0.51313984", "0.5128396", "0.51162106", "0.51147676", "0.51115674", "0.51014763", "0.5099018", "0.509808", "0.50933874", "0.50933874", "0.50926894", "0.5089905", "0.5088027", "0.5078489", "0.50782335", "0.50782335" ]
0.6351527
0
Returns True if the self rank is higher than the other rank
def __lt__(self,other): return self.rank() < other.rank()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __gt__(self, other):\n return int(self.rank) > int(other.rank)", "def __ge__(self, other):\n return int(self.rank) >= int(other.rank)", "def __le__(self, other):\n return int(self.rank) <= int(other.rank)", "def __gt__(self, other):\n return self.eval_score < other.eval_score", "def __lt__(self, other):\n return int(self.rank) < int(other.rank)", "def __gt__(self, hand2):\n # TODO: Implement\n if self.type > hand2.type:\n return True\n elif self.type < hand2.type:\n return False\n elif self.type == hand2.type:\n # NOTE: This ignores the case in which both hands have the same type\n # and rank. I think this is okay for now.\n return self.rank > hand2.rank", "def __gt__(self, other: 'MaxNode') -> bool:\n if self.priority == other.priority:\n return self.value < other.value\n return self.priority < other.priority", "def __gt__(self, other):\n return self.weight() > other.weight()", "def same_rank(self, other: 'Piece') -> bool:\n\n return self.rank == other.rank", "def __gt__(self, other):\n student1 = self.calculate_total()\n student2 = other.calculate_total()\n\n if student1 > student2:\n return True\n else:\n return False", "def _compare(self, other): \n if(self.rank==other.rank):\n if (self.rank == 5 and other.rank==5) or (self.rank ==3 and other.rank==3):\n maxself = max(self.ranks) \n maxother = max(other.ranks)\n if(maxself>maxother):\n if(Card.ranks[maxself]=='Ace' or Card.ranks[maxother] == 'Ace'):\n maxself1 = sorted(self.ranks,reverse=True)\n maxself1 = maxself1[1]\n maxother1 = sorted(other.ranks,reverse=True)\n maxother1 = maxother1[1]\n if(maxself1>maxother1):\n return 1\n else:\n return 0\n else:\n if(Card.ranks[maxself]=='Ace' or Card.ranks[maxother] == 'Ace'):\n maxself1 = sorted(self.ranks,reverse=True)\n maxself1 = maxself1[1]\n maxother1 = sorted(other.ranks,reverse=True)\n maxother1 = maxother1[1]\n if(maxself1<maxother1):\n return -1\n else:\n return 0\n \n if (self.rank == 4 and other.rank==4):\n maxself = max(self.ranks) \n maxother = max(other.ranks)\n if(maxself>maxother):\n return 1\n else:\n return -1\n if (self.rank ==2 and other.rank==2) or (self.rank ==0 and other.rank==0):\n newself = sorted(self.ranks,reverse=True)\n newother = sorted(other.ranks,reverse=True)\n maxsel = max(newself)\n maxoth = max(newother)\n if(maxsel>maxoth):\n return 1\n elif(maxsel<maxoth):\n return -1\n else:\n maxsel1= newself[1]\n maxoth1 = newother[1]\n if(maxsel1>maxoth1):\n return 1\n elif(maxsel1<maxoth1):\n return -1\n else:\n maxsel2= newself[2]\n maxoth2 = newother[2]\n if(maxsel2>maxoth2):\n return 1\n elif(maxsel2<maxoth2):\n return -1\n else:\n return 0\n if self.rank ==1 and other.rank==1:\n pairwali1 = {}\n pairwali2={}\n for i in range(0,3):\n if other.ranks[i] not in pairwali1:\n pairwali1[other.ranks[i]] = 1\n else:\n pairwali1[other.ranks[i]]= pairwali1[other.ranks[i]]+1\n if self.ranks[i] not in pairwali2:\n pairwali2[self.ranks[i]] = 1\n else:\n pairwali2[self.ranks[i]] = pairwali2[self.ranks[i]]+1\n t = list(pairwali1.keys())[list(pairwali1.values()).index(2)]\n r = list(pairwali2.keys())[list(pairwali2.values()).index(2)]\n if t!=r:\n if t>r:\n return -1\n elif t<r:\n return 1\n elif t==r:\n t= list(pairwali1.keys())[list(pairwali1.values()).index(1)]\n r = list(pairwali2.keys())[list(pairwali2.values()).index(1)]\n if t>r:\n return -1\n elif t<r:\n return 1\n else:\n return 0\n\n else:\n if(self.rank>other.rank):\n return 1\n else:\n return -1", "def __gt__(self, transposon):\n return self.score > transposon.score", "def __gt__(self, other: 'MinNode') -> bool:\n if self.priority == other.priority:\n return self.value > other.value\n return self.priority > other.priority", "def __gt__(self, other):\n return self.weight > other.weight", "def __gt__(self, other):\n return self.x ** 2 + self.y ** 2 > other.x ** 2 + other.y ** 2", "def __gt__(self, other):\n return self.abs2phy.__gt__(other)", "def __gt__(self, other):\n return self.abs2phy.__gt__(other)", "def __gt__(self, other):\n if isinstance(other, Hand):\n return self.hands_list > other.hands_list", "def __gt__(self, other):\n if other.groupnumber > self.groupnumber:\n return True\n else:\n return False", "def __gt__(self, other):\n return True if self._compare(other) > 0 else False", "def __gt__(self, other):\n if self.i1 > other.i1:\n return True\n elif self.i1 == other.i1:\n if self.i2 > other.i2:\n return True\n elif self.i2 == other.i2 and self.axial > other.axial:\n return True\n return False", "def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True", "def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True", "def __gt__(self, other): \n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator>other.numerator):\n return True\n else:\n return False", "def __ge__(self,f2):\n return self > f2 or self == f2", "def __cmp__(self, other) :\n if self.strength > other.strength:\n return 1;\n elif self.strength == other.strength :\n if self.rank > other.rank :\n return 1;\n elif self.rank == other.rank :\n return 1 if self.kickers > other.kickers else -1 if self.kickers < other.kickers else 0;\n return -1;", "def __gt__(self, other):\n if isinstance(other, float):\n return self.floatvalue > other\n else:\n return not self.negative and not self == other", "def __gt__(self, other):\n return self >= other and self != other", "def __gt__(self, other):\n return self >= other and self != other", "def __gt__(self, other):\n return self.__ge__(other) and self.__ne__(other)", "def all_gt(self, other):\n return self.x > other.x and self.y > other.y", "def __gt__(self, other):\n return other < self", "def __gt__(self, other):\n return not (self <= other)", "def __gt__(self, other):\n return self.__f > other.get_f()", "def __cmp__(self, other):\n \n result = cmp(self.rank(), other.rank())\n if (result == 0):\n # Compare hand values\n for i in range(len(self.values())):\n result = cmp(self.values()[i], other.values()[i])\n if (result != 0):\n return result\n return result", "def compare(self, t2) -> bool:\n return True if self.get_edge(t2) >= 0 else False", "def __le__(self,f2):\n return not self > f2 or self == f2", "def __gt__(self, other):\n return not self <= other", "def __gt__(self, other):\n return self >= other and not self <= other", "def __gt__(self, other):\n if not isinstance(other, HuffNode):\n raise TypeError('not an instance of HuffNode')\n\n return self.freq > other.freq", "def pareto_better(self, other: \"EvalItem\") -> bool:\n return self.size <= other.size and other.result <= self.result", "def compare_rank(self, obj: int) -> int:\n def normalize_ace(a):\n return a+13 if a == 1 else a\n norm_self_rank = normalize_ace(self.rank)\n norm_obj = normalize_ace(obj)\n\n return 1 if norm_self_rank > norm_obj else (0 if norm_self_rank == norm_obj else -1)", "def __gt__(self, other):\r\n assert isinstance(other, Order)\r\n return self - other > 0", "def __gt__(self, other):\n return self.__cmp__(other) > 0", "def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other", "def __gt__(self,other):\r\n\t\tsorted_self = sorted(self.vector, reverse=True) #sort both lists in descending order\r\n\t\tsorted_other = sorted(other, reverse=True) \r\n\t\tcmpflag = False\r\n\t\tfor li1, li2 in zip(sorted_self, sorted_other):\r\n\t\t\tif(li1 > li2):\r\n\t\t\t\tcmpflag = True\r\n\t\treturn cmpflag", "def __gt__(self, other):\n return self.element() > other.element()", "def __gt__(self, other: 'LTL'):\n gt = self >= other\n neq = self != other\n return gt and neq", "def __le__(self, other) -> bool:\n if isinstance(other, int) or isinstance(other, float):\n return self.balance <= other\n else:\n raise TypeError", "def __gt__(self, other):\n if isinstance(other, type(self)):\n return self.number > other.number\n return NotImplemented", "def __gt__(self, other):\n return self.head_vertex > other.head_vertex and self.tail_vertex > other.tail_vertex", "def __gt__(self, other):\n self.numerator = abs(self.numerator)\n self.denominator = abs(self.denominator)\n other.numerator = abs(other.numerator)\n other.denominator = abs(other.denominator)\n\n num1 = (self.numerator/other.numerator)\n num2 = (self.denominator/other.denominator)\n\n if num1 > num2:\n return True\n else:\n return False", "def __gt__(self, other):\n return self._metric_value > other.metric_value()", "def is_better_than(self, other):\n return better_candidate(self, other) is self", "def __gt__(self, vs) -> bool:\n return vs <= self", "def __ge__(self, other) -> bool:\n if isinstance(other, int) or isinstance(other, float):\n return self.balance >= other\n else:\n raise TypeError", "def __gt__(self, other: Card) -> bool:\n return not self.__le__(other)", "def real_result(self, other):\r\n self_in_game_skill = np.random.normal(self.skill,self.var)\r\n other_in_game_skill = np.random.normal(other.skill,other.var)\r\n if self_in_game_skill > other_in_game_skill:\r\n return 1\r\n else:\r\n return 0", "def __gt__(self, Other):\n return not self <= Other", "def __gt__(self, other):\n return greater(self, other)", "def __le__(self, other):\n if self.head_vertex > other.head_vertex:\n return False\n elif self.tail_vertex > other.tail_vertex:\n return False\n elif self.weight > other.weight:\n return False\n return True", "def __le__(self, other):\n if self.head_vertex > other.head_vertex:\n return False\n elif self.tail_vertex > other.tail_vertex:\n return False\n elif self.weight > other.weight:\n return False\n return True", "def __gt__(self, other):\n result = False\n if isinstance(other, Shape):\n result = not self.__le__(other)\n return result", "def __ge__(self, other):\n return type(other) == Pred or (isinstance(other, RealPred) and other <= self)", "def __ge__(self, other):\n return greater_equal(self, other)", "def __lt__(self, other):\n if bool(random.getrandbits(1)):\n return self.get_f_score() < other.get_f_score()\n else:\n return self.get_f_score() <= other.get_f_score()", "def __gt__(self: _TT, other: _TT) -> bool:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return self.value > other.value", "def __lt__(self, other: 'MaxNode') -> bool:\n if self.priority == other.priority:\n return self.value > other.value\n return self.priority > other.priority", "def __lt__(self, other):\n return self.score < other.score", "def __gt__(self, other):\n self_list = self.date.split(\"/\")\n other_list = other.date.split(\"/\")\n if self_list[2] > other_list[2]:\n return True\n else:\n if self_list[2] == other_list[2]:\n if self_list[1] > other_list[1]:\n return True\n elif self_list[1] == other_list[1]:\n if self_list[0] > other_list[0]:\n return True\n return False", "def __ge__(self, other):\n return other <= self", "def __gt__(self, other):\n return self._ordinals > other.ordinal()", "def check_higher(value_1,value_2, *args, **kwargs):\n if max(value_1, value_2, *args, **kwargs) == value_1:\n return True\n else:\n return False", "def __ge__(self, other):\n\t\treturn self.__gt__(other) or self.__eq__(other)", "def __gt__(self, other):\n return self.greaterThan(other)", "def __le__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator<=other.numerator):\n return True\n else:\n return False", "def __ge__(self, other):\n return type(other) == Pred or (isinstance(other, GPred) and other <= self)", "def __gt__(self, other):\n return self.estimated_cost > other.estimated_cost", "def find_lone(self, board):\n res = False\n if (self.player):\n if (self.board.p2vec[0] > board.p2vec[0]):\n res = True\n else:\n if (self.board.p1vec[0] > board.p1vec[0]):\n res = True\n return res", "def __gt__(self, other):\n if type(self) is not type(other):\n return NotImplemented\n \n # Month has priority over day.\n self_month = self.month\n other_month = other.month\n \n if self_month > other_month:\n return True\n \n if self_month < other_month:\n return False\n \n self_day = self.day\n other_day = other.day\n \n if self_day > other_day:\n return True\n \n if self_day < other_day:\n return False\n \n # And lastly the name\n self_name = self.name\n other_name = other.name\n \n if self_name > other_name:\n return True\n \n if self_name < other_name:\n return False\n \n if self.color_code > other.color_code:\n return True\n \n return False", "def __gt__(self, other):\n return self._key > other._key", "def __cmp__(self, _other):\n return cmp(self.fitness(), _other.fitness())", "def __lt__(self, other_node):\n return self.split_info.gain > other_node.split_info.gain", "def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False", "def __gt__ (self, other) :\n return other.__lt__(self)", "def __gt__(self, other):\n return other < self._cmpkey()", "def __gt__(self, other):\n\n if self.count == other.count:\n return self.word < other.count\n return self.count > other.count", "def __ge__(self, other):\n return True if self._compare(other) >= 0 else False", "def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2", "def __ge__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator>=other.numerator):\n return True\n else:\n return False", "def __ge__(self, other):\n if self.head_vertex < other.head_vertex:\n return False\n elif self.tail_vertex < other.tail_vertex:\n return False\n elif self.weight < other.weight:\n return False\n return True", "def __ge__(self, other):\n if self.head_vertex < other.head_vertex:\n return False\n elif self.tail_vertex < other.tail_vertex:\n return False\n elif self.weight < other.weight:\n return False\n return True", "def __gt__(self, other: Any) -> bool:\n return not self.__lt__(other)", "def __ge__(self, other):\n return self.element() >= other.element()", "def __ge__(self, other):\n # self >= other\n return self.runtime.greater_than_equal(self, other)", "def __gt__(self,other):\n if isinstance(other, RegularPoly):\n return(self.vert_count > other.vert_count)\n else:\n raise NotImplementedError('Incorrect data type')", "def __gt__(self, other: Union[Range, 'RangeSet']) -> bool:\n if isinstance(other, RangeSet):\n # return the first difference between this range and the next range\n for my_val, their_val in zip(self._ranges, other._ranges):\n if my_val != their_val:\n return my_val > their_val\n return len(self._ranges) > len(other._ranges)\n elif isinstance(other, Range):\n # return based on the first range in this RangeSet\n return len(self._ranges) >= 1 and self._ranges[0] > other\n else:\n return False", "def __ge__(self, other):\n return self.abs2phy.__ge__(other)", "def __ge__(self, other):\n return self.abs2phy.__ge__(other)", "def __ge__(self, other: Any) -> bool:\n return self.__gt__(other) or self.__eq__(other)" ]
[ "0.8275582", "0.79964995", "0.7796771", "0.72897935", "0.716286", "0.714543", "0.7067061", "0.7035331", "0.7034307", "0.70110303", "0.69910157", "0.69268256", "0.6925097", "0.69192606", "0.6889218", "0.68517315", "0.68517315", "0.6849785", "0.6836221", "0.6831901", "0.6809451", "0.6807295", "0.6807295", "0.6744248", "0.6738268", "0.66884166", "0.66818535", "0.6677014", "0.6677014", "0.66747445", "0.66579133", "0.6642781", "0.66119176", "0.65938795", "0.65778726", "0.65516055", "0.65512556", "0.65416676", "0.65369076", "0.6535139", "0.6516867", "0.6514797", "0.6486226", "0.6480783", "0.64774966", "0.6477196", "0.64703643", "0.64654255", "0.6452667", "0.64507145", "0.6449388", "0.6446896", "0.6426875", "0.6398908", "0.63979733", "0.6393329", "0.63758785", "0.6367356", "0.63606113", "0.63587177", "0.63556993", "0.63556993", "0.63512135", "0.6349977", "0.63448495", "0.6344596", "0.6342977", "0.63197327", "0.6305933", "0.6274632", "0.6266883", "0.6260282", "0.62442654", "0.6240498", "0.6231306", "0.6223073", "0.620513", "0.619349", "0.6181513", "0.6178194", "0.6170888", "0.6165273", "0.61630523", "0.61584747", "0.61407906", "0.6135951", "0.61353534", "0.6133654", "0.61290634", "0.6124549", "0.61232686", "0.61232686", "0.611856", "0.61153716", "0.61099154", "0.6108031", "0.6107249", "0.6106145", "0.6106145", "0.61009675" ]
0.69531363
11
Returns the point value for the card based on Blackjack scoring rules
def points(self): if self.rank() in self.point_sysm: return self.point_sysm[self.rank()] else: return (self.rank() + 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_points(card):\n for value in scores.keys():\n if value == card.value:\n card_score = scores[card.value]\n return card_score", "def hand_points(hand):\n points = [[]]\n branch = 1\n for card in hand:\n if not card[\"is_hidden\"]:\n if card[\"value\"].isnumeric():\n for possibility in range(branch):\n points[possibility].append(int(card[\"value\"]))\n elif card[\"value\"] == \"A\":\n for possibility in range(branch):\n # Ace is 1 or 11. Creating the two possibility\n points.append(points[possibility] + [11]) \n points[possibility].append(1)\n branch += 1\n else:\n # Left are the face value of 10\n for possibility in range(branch):\n points[possibility].append(10)\n\n score = list(zip([sum(branch) for branch in points], points))\n score.sort(key=lambda x: x[0], reverse=True)\n\n for total, points in score:\n if total == 21 and len(hand) == 2:\n return total, \"BlackJack!\"\n if total <= 21:\n if 1 in points and 11 in points:\n return total, None\n if 1 in points: \n return total, \"Soft\"\n if 11 in points:\n return total, \"Hard\"\n else:\n return total, None\n\n # If you get there, you have lost or you had empty hand \n # or all card in hand was hiddien\n if score:\n return score[-1][0], None\n else:\n return 0, None", "def get_points(self, guess, current_card, next_card):\n\n # (AH)\n if guess.lower() == \"h\" and next_card > current_card:\n points = 100\n elif guess.lower() == \"l\" and next_card < current_card:\n points = 100\n else:\n points = -75\n\n # (AH) points will be added to the total score in Director class.\n return points", "def get_points(self):\n return self.card_points", "def calculate_points(hand): \r\n hand_value = 0\r\n ace_count = 0 \r\n \r\n #Finds value of non-Ace cards, and counts number of Aces.\r\n for card in hand:\r\n if card[0] == 'Ace':\r\n ace_count += 1\r\n else:\r\n # Calls card_value function to evaluate the card.\r\n hand_value += card_value(card) \r\n \r\n #Ace card present\r\n if ace_count > 0:\r\n return ace_hand_value(ace_count, hand_value)\r\n \r\n #Implied \"if ace_count == 0:\"\r\n return hand_value", "def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card", "def get_value(self):\n #Finds all of the values in the cards\n score_list=[Card.get_value(card) for card in self.cards]\n #Sums the scores\n if self.num_cards() > 0:\n total_score=reduce((lambda x,y: x+y),score_list)\n return total_score\n else:\n return 0", "def get_points(self, guess, drawn_card, prev_card):\n points_owed = 0\n\n if guess.lower() == \"higher\":\n if drawn_card >= prev_card:\n points_owed = 100\n else:\n points_owed = -75\n\n elif guess.lower() == \"lower\":\n if drawn_card <= prev_card:\n points_owed = 100\n else:\n points_owed = -75\n\n return points_owed", "def updates(self, next_card):\n # calls the get_points function from Dealer_card class, and store the points into a value called score\n points = self.next_card.get_points(next_card, self.current_card)\n final_score = self.score + points\n \n return final_score", "def get_value(self):\n \n value = 0\n ace = False\n\n for card in self.hand:\n value += VALUES[card.get_rank()]\n \n if (card.get_rank() == 'A'):\n ace = True\n \n if not ace:\n return value\n else:\n if (value + 10) <= 21:\n return (value + 10)\n else:\n return value", "def scoreGame(self):\n # create valueLs[card1,card2,...], pass it to sumHandReturnPoints(valueLs) or twoCardReturnPoints(valueLs)\n scoreLs = []\n ### Score of row\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n points = self.sumHandReturnPoints(valueLs)\n scoreLs.append(points)\n\n ### Score of 4-card column\n for offset in range(0,3): # 0,1,2\n tmpLs = []\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n if len(valueLs) == 5:\n iterStart = 1\n else:\n iterStart = 0\n card = valueLs[iterStart+offset]\n tmpLs.append(card)\n points = self.sumHandReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Score of 2-card column\n #(1) 1st column\n valueLs1 = self.table['row1']\n valueLs2 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs1[0].get_rank())\n tmpLs.append(valueLs2[0].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points)\n #(2) 5th column\n valueLs3 = self.table['row1']\n valueLs4 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs3[-1].get_rank())\n tmpLs.append(valueLs4[-1].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Add up scoreLs\n sumPoints = 0\n for points in scoreLs:\n sumPoints += points\n return sumPoints", "def value(self, card):\n return self.valores[self.deck.index(card)]", "def score_on_hands(cards_on_hand):\r\n score = 0\r\n straightCount = 0\r\n max_card = 0\r\n suite_dict = {}\r\n face_dict = {}\r\n transfer_dict = {'A':1,'J':11,'Q':12,'K':13}\r\n card_face = []\r\n '''Circulate the player's hand, build a list of points and a suit dict'''\r\n for index in range(len(cards_on_hand)):\r\n if str(cards_on_hand[index])[1] in transfer_dict:\r\n card_face.append(transfer_dict.get(str(cards_on_hand[index])[1]))\r\n elif str(cards_on_hand[index])[1] == '1':\r\n card_face.append(10)\r\n else:\r\n card_face.append(int(str(cards_on_hand[index])[1]))\r\n suite_dict[str(cards_on_hand[index])[0]] = 1\r\n '''Because 1 can be treated as 1 or 14, so if 1 exists, add 14 to the end of the list to calculate flush'''\r\n if 1 in card_face:\r\n card_face.append(14)\r\n\r\n '''Check straight, if it is straight, straight should be 4'''\r\n for face in range(len(card_face)-1):\r\n if card_face[face] +1 == card_face[face+1] :\r\n straightCount +=1\r\n\r\n '''Detect the number of cards of the same number'''\r\n for face in card_face:\r\n\r\n if face not in face_dict:\r\n face_dict[face] = 1\r\n else:\r\n face_dict[face] += 1\r\n\r\n '''Store the maximum number of points'''\r\n max_card = card_face[len(card_face)-1]\r\n\r\n '''Calculate player score'''\r\n if straightCount == 4:\r\n score+= 8\r\n\r\n if len(suite_dict) == 1:\r\n score+= 9\r\n\r\n for values in face_dict.values():\r\n if values == 2:\r\n score += 3\r\n elif values == 3:\r\n score += 7\r\n elif values == 4:\r\n score += 11\r\n\r\n return (score, max_card)", "def twoCardReturnPoints(self, valueLs):\n colSum = 0\n colSum += valueLs[0]\n colSum += valueLs[1]\n if valueLs[0] == 1 and valueLs[1] == 1: # two Aces\n colSum = 12\n elif (valueLs[0] == 1 or valueLs[1] == 1): # one Ace\n colSum += 10\n # count actual points\n if colSum == 21: # black jack \n points = 10\n else:\n points = self.countPoints(colSum)\n return points", "def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count", "def get_value(self):\r\n value, aces = 0, 0\r\n for card in self.hand:\r\n value += VALUES[card.get_rank()]\r\n # Keep track of the aces in Hand\r\n if card.get_rank() == \"A\":\r\n aces += 1\r\n if aces >= 1 and value + 10 <= 21:\r\n value += 10\r\n return value", "def card_value (card):\r\n value = card[0]\r\n if value in ['Jack','Queen','King']:\r\n return 10\r\n if value in [2,3,4,5,6,7,8,9,10]:\r\n return value\r\n else:\r\n raise 'CardValueError'", "def BJValue(self):\r\n #if the face value of a card is greater or equals to 10\r\n if self.rank >= 10:\r\n #count the value as 10\r\n return 10\r\n #if the face value of a card is less than 10\r\n else:\r\n #return the face value of the card\r\n return self.rank", "def calculate_points(this_hand):\n # Check to see if hand got dealt an Ace and whether 11 points or 1 point\n total_points = 0\n int_ace_count = 0\n\n # For each card, add together all the points\n for each_card in this_hand:\n total_points += each_card.get_points()\n\n # Check for Aces, get the name of the card\n this_card_name = each_card.get_name()\n\n if this_card_name == \"A\":\n int_ace_count += 1\n\n # How to determine if Aces are worth 1 or 11\n # A - 1 or 11\n # AA - 2 or 12\n # AAA - 3 or 13\n # AAAA - 4 or 14\n\n if int_ace_count > 0:\n # Add 10 points to the total if it doesn't bust the hand\n if (total_points + 10) <= 21:\n total_points += 10\n\n return total_points", "def get_value(self):\n bj_rankings = {'Ace': 11, 'King': 10, 'Queen': 10, 'Jack': 10,\n 10: 10, 9: 9, 8: 8, 7: 7, 6: 6, 5: 5, 4: 4, 3: 3, 2: 2}\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n\n if value > 21:\n bj_rankings['Ace'] = 1\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n return value", "def get_value(self):\n global VALUES\n hand_value = 0\n has_ace = False\n\n for card in self.hand:\n v = VALUES[card.get_rank()]\n hand_value += v\n if card.get_rank() is 'A':\n has_ace = True\n\n if not has_ace:\n return hand_value\n else:\n if hand_value + 10 <= 21:\n return hand_value + 10\n else:\n return hand_value", "def score(self) -> int:\n card_values = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '10': 10,\n 'JACK': 10,\n 'QUEEN': 10,\n 'KING': 10,\n 'ACE': 11}\n hand_value = []\n for i in self.cards:\n hand_value.append(card_values[i.value])\n while sum(hand_value) > 21 and 11 in hand_value:\n for i, j in enumerate(hand_value):\n if j == 11:\n hand_value[i] = 1\n break\n else:\n pass\n return sum(hand_value)", "def blackjackValue(self):\n NUMBERRANKS = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n FACECARDS = [\"jack\", \"queen\", \"king\"]\n ACE = [\"ace\"]\n if self.rank in NUMBERRANKS:\n return int(self.rank)\n elif self.rank in FACECARDS:\n return 10\n elif self.rank in ACE:\n return 11", "def score(self, hand, position):\n\n try:\n assert self.grid[position][1] == \"---\"\n except AssertionError:\n print self\n print position\n raise FilledInError\n except KeyError:\n print \"\\nCheck your code. This is not a valid position:\", position, \"\\n\"\n raise\n\n if position.startswith(\"n\"): # Return sum of relevant number\n n = int(position[1])\n return sum(d for d in hand.dice if d == n)\n\n elif position in [\"k3\", \"k4\", \"ch\"]: # Return total sum\n if position == \"k3\" and hand.max_tally()[0] < 3:\n return 0 # The is not a three of a kind\n elif position == \"k4\" and hand.max_tally()[0] < 4:\n return 0 # The is not a four of a kind\n return sum(hand.dice)\n\n elif position in [\"fh\", \"ss\", \"ls\", \"yz\", \"yb\"]: # Return fixed score\n if position == \"fh\":\n tallies = hand.get_dicedict().values()\n if 1 in tallies:\n return 0 # This is not a full house\n\n elif position in [\"ss\", \"ls\"]:\n ds = \"\".join(str(x) for x in hand.sort_by_value())\n if position == [\"ss\"]:\n if \"1234\" not in ds and \"2345\" not in ds and \"3456\" not in ds:\n return 0\n else:\n if \"12345\" not in ds and \"23456\" not in ds:\n return 0\n\n else:\n if hand.max_tally()[0] < 5:\n return 0 # This is not a yahtzee\n if position == \"yb\" and self.grid[\"yz\"] == \"---\":\n return 0 # YB only scores points if there already is a YZ\n\n return fixed_scores[position]\n\n else:\n raise InvalidPositionError", "def sumHandReturnPoints(self, valueLs): # card is the element in valueLs\n #Ace is dealt with here, assume Ace to be 11 initially, decreasing by 10 per Ace if sum > 21\n rowSum = 0\n AceCount = 0\n for ele in valueLs:\n rank = ele.get_rank()\n if rank == 1:\n rank = 11\n AceCount += 1 # serve as flag\n rowSum += rank\n while(AceCount!=0):\n if rowSum > 21:\n rowSum -= 10\n AceCount -= 1\n points = self.countPoints(rowSum)\n return points", "def get_score(self, card_index: int = 0) -> int:\n return self.get_score_list[card_index]", "def calculate_score(player_cards):\n score = sum(player_cards)\n return score", "def score_card_response(card, response):\n\n # If not a choice card, raise Exception(\"No method implemented.\")\n\n for opt in card['options']:\n if response == opt['value']:\n if opt['correct']:\n return 1.0, opt['feedback']\n else:\n return 0.0, opt['feedback']\n\n return 0.0, 'Default error ajflsdvco'", "def calculate_value(self, hand):\n global FACE_CARDS\n #could refactor the 2 hand possiblities into methods of a Dealer and Player Class\n if hand == \"player\":\n if self.player_hand[-1].value in FACE_CARDS:\n self.player_value += 10\n elif self.player_hand[-1].value == \"A\":\n self.player_value += 11\n self.player_ace_count += 1\n else:\n self.player_value += int(self.player_hand[-1].value)\n\n if self.player_value > 21:\n if self.player_ace_count > self.player_almost_bust:\n #To prevent a Bust, your Ace became a one\n self.player_value -= 10\n self.player_almost_bust += 1\n else:\n self.player_lose()\n elif self.player_value == 21:\n self.blackjack = True\n self.endgame()\n\n elif hand == \"dealer\":\n if len(self.dealer_hand) > 1:\n if self.dealer_hand[-1].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[-1].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[-1].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n elif self.dealer_value == 21:\n self.player_lose()", "def get_value(self) -> float:\n return self.points[0, 0]", "def get_score(self):\r\n return self.lcp.get_score()", "def aces_high(card):\n if isinstance(card, Value):\n if card == Value.Ace:\n return 14\n return card.value\n\n if card.joker:\n return 15\n if card.value == Value.Ace:\n return 14\n return card.value.value", "def _score_hand(hand):\n\n score = 0\n ace = False\n\n for next_card in hand:\n\n # get the value of the card\n card_value = next_card[0]\n\n # if it is an ace and we do not hold one, the value is 11 instead of 1\n if card_value == 1 and not ace:\n ace = True\n card_value = 11\n\n # add up the value to the score\n score += card_value\n\n # if we would bust, check if there is an ace and substract\n # 10 from the value (11 - 1). Also, set the ace variable to False.\n if score > 21 and ace:\n score -= 10\n ace = False\n\n return score", "def getScore(data):\n return score", "def sum_(hand: list):\n vals = [card.rank for card in hand]\n intvals = []\n while len(vals) > 0:\n value = vals.pop()\n try:\n intvals.append(int(value))\n except ValueError:\n if value in ['K', 'Q', 'J']:\n intvals.append(10)\n elif value == 'A':\n intvals.append(1) # Keep it simple for the sake of example\n if intvals == [1, 10] or intvals == [10, 1]:\n print(\" Blackjack!\")\n return(21)\n else:\n points = sum(intvals)\n print(\" Current score: {}\".format(str(points)))\n return(points)", "def sum_points(self) -> int:\n return sum([card.rank_value for card in self.deck.cards])", "def getScore(self,board):\n return board.getScore()[self.tile]", "def _calculate_score_with_threshold(self):\n\n clue_number = 0\n positive_score, negative_score = 0, 0\n negative_number = 0\n total_score = 0\n\n # find largest negative score\n largest_negative_score = -1.\n for ix, (card, score) in enumerate(self.sorted_card_score_pairs):\n # find maximum score of negative word\n if card.color not in [self.team, \"DOUBLE\"]:\n largest_negative_score = score\n break\n\n # add scores higher than threshold + largest negative score to positive_score\n for card, score in self.sorted_card_score_pairs:\n if (score > (self.delta+largest_negative_score)\n and card.color in [self.team, \"DOUBLE\"]):\n clue_number += 1\n positive_score += score\n elif card.color not in [self.team, \"DOUBLE\"]:\n negative_score += score\n negative_number += 1\n else:\n continue\n\n if not self.penalize_negative:\n self.logger.info(\"negative score set to 0.\")\n negative_score = 0\n\n # if threshold(delta) is large, there will be no clues.\n # try to give at least one clue\n # select the positive card with score larger than largest_negative_score.\n if clue_number == 0:\n self.logger.debug(\"clue number: 0.\")\n for card, score in self.sorted_card_score_pairs:\n if card.color in [self.team, \"DOUBLE\"]:\n positive_score = score\n clue_number += 1\n self.cropped_threshold = score - largest_negative_score\n else:\n positive_score = 0\n break\n\n if self.normalize_negative:\n total_score = (1-self.alpha) * positive_score - self.alpha * negative_score / negative_number\n else:\n total_score = (1-self.alpha) * positive_score - self.alpha * negative_score\n self.logger.debug(\"word: {}, positive_score: {}, negative_score: {}, total_score: {}\".format(self.clue, positive_score, negative_score, total_score))\n return total_score, clue_number", "def policy_value_fn(board):\n # return uniform probabilities and 0 score for pure MCTS", "def calculate_score(hand,hand_value):\n first,second,third,fourth,fifth,*_=[rank for rank,suit in hand]\n if fifth==12:\n fifth=-1\n return calculate_score_pairs(hand_value,first,second,third,fourth,fifth)", "def _update_value(self) -> int:\n\n value_list = [card.value if card.value <= 10 else 10 for card in self]\n hand_value = sum(value_list)\n\n # Checks to see if any Aces can be worth 11 points instead of 1 point\n while value_list.count(1) > 0 and (21 - hand_value) >= 10:\n value_list[value_list.index(1)] = 11\n hand_value = sum(value_list)\n\n self._value = hand_value", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def score(self, hand, top_card):\n powerset = GameState.powerset(list(hand).append(top_card))\n score = 0\n for meld in powerset:\n score += self.score_meld(meld)\n return score", "def get_score(self):\n return self.score", "def _get_reward(self, five_cards):\n \n return 1-self.evaluator.get_five_card_rank_percentage(self.evaluator._five(five_cards))", "def ace_hand_value(ace_count, hand_value):\r\n #case1, the case where the Ace in question is worth 11 points,\r\n # doesn't reduce 11 to 10 in order to be more clear about where these\r\n # values are coming from. ace_count is reduced by 1 to offset 11 being\r\n # counted separately. \r\n case1 = hand_value + 11 + (ace_count - 1)\r\n if case1 <= 21:\r\n return case1\r\n \r\n #Implied \"if case1 > 21:\"\r\n #case2 is the case where the Ace in question is worth 1 point.\r\n case2 = hand_value + ace_count\r\n return case2", "def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)", "def take_card(self, card_color=None):\r\n Card = self.deck.take_card(card_color)\r\n return Card.value if Card.color == Color.BLACK else Card.value * -1", "def score(dart_pos):\n \n # polar coordinates\n r = np.linalg.norm(dart_pos)\n theta = (360/(2*np.pi)*atan2(dart_pos[0], dart_pos[1]))%360\n \n # number section\n segment_values = [20, 1, 18, 4, 13, 6, 10, 15, 2, 17, 3, 19, 7, 16, 8, 11, 14, 9, 12, 5, 20]\n number = segment_values[int((theta + 9)//18)]\n \n # bonus\n if r <= 6.35:\n score = 50\n elif 6.35 < r <= 15.9:\n score = 25\n elif r > 170:\n score = 0\n elif 162 < r <= 170:\n score = 2*number\n elif 99 < r <= 107:\n score = 3*number\n else:\n score = number\n \n return score", "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def get_score_pos(pos, board):\n if piece_on_pos(pos, board):\n return board[pos[0]][pos[1]].points\n else:\n return 0", "def score(self):", "async def get_card_value(card):\n return ex.first_result(await ex.conn.fetchrow(\"SELECT value FROM blackjack.cards WHERE id = $1\", card))", "def get_balance(card):\n data = {\n \"Card.Number\": card[0],\n \"Card.Pin\": card[1],\n }\n\n response = requests.post(BALANCE_URL, data=data, headers=HEADERS)\n if response.status_code == 200:\n match = BALANCE_RE.search(response.text)\n if match:\n return float(match.group(1))", "def raw_score(self,X,Y):\n return self.rf.score(X,Y)", "def hit(self, card):\n self.append(card)\n values=[]\n values.append(card.value())\n if values[0] < 2:\n values.append(values[0]+ 10)\n new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])\n new_sums =sorted(new_sums)\n if len(new_sums) ==0:\n self.hand=-1\n else:\n self.hand = new_sums[-1]\n self.possible_sums = new_sums", "def score_hand(hand):\n print(hand)\n score = 0\n ace = False\n for card in hand:\n if card == 1 and not ace:\n ace = True\n score += 11\n if score > 21 and ace:\n score -= 10\n else:\n score += card\n return score", "def get_score(self):\r\n return None", "def get_score(self, game_state):\n if self.red:\n return game_state.get_score()\n else:\n return game_state.get_score() * -1", "def policy_value_fn(board):\n # return uniform probabilities and 0 score for pure MCTS\n action_probs = np.ones(len(board.availables))/len(board.availables)\n return zip(board.availables, action_probs), 0", "def calculate_player_position_score(marbles: list):\n prime = Evaluator.prime_positions\n good = Evaluator.good_positions\n position_score = 0\n for marble in marbles:\n if marble in prime:\n position_score += 10\n elif marble in good:\n position_score += 5\n else:\n position_score -= 1\n return position_score", "def score(self):\n result = 0\n\n idx = self.cups.index(1)\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n # ok, keep adding things until we get back to 1\n while 1 != self.cups[idx]:\n # add this value..\n result *= 10\n result += self.cups[idx]\n # and on to the next one..\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n\n return result", "def blackjack_result(cards):\n sum = 0\n a_cards = 0\n dictionary = {\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n 'T': 10,\n 'J': 10,\n 'Q': 10,\n 'K': 10,\n }\n for card in cards.split():\n if card in dictionary:\n sum = sum + dictionary[card]\n elif card == 'A':\n a_cards = a_cards + 1\n\n if a_cards > 0:\n for i in range(a_cards):\n if a_cards > 1:\n sum = sum + 1\n a_cards = a_cards - 1\n else:\n if sum + 11 < 22:\n sum = sum + 11\n else:\n sum = sum + 1\n\n return sum", "def calculate_score(card_list):\n if sum(card_list) == 21 and len(card_list) == 2:\n return 0\n if sum(card_list) > 21 and 11 in card_list:\n card_list.remove(11)\n card_list.append(1)\n return sum(card_list)", "def scoring(self):\n pass", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def policy_value_fn(board):\n # return uniform probabilities and 0 score for pure MCTS\n moves, true_moves = board.get_avaiable_moves()\n action_probs = np.ones(len(moves)) / len(moves)\n return zip(true_moves, action_probs), 0", "def custom_score(game, player):\n # return penalize_corners_heuristic(game, player)\n # return favor_run_away_heuristic(game, player)\n return look_ahead_heuristic(game, player)", "def max_score(self):\n return self.points", "def sumDeck(aDeck,cardType):\r\n points =0\r\n \r\n for x in range(len(aDeck)):\r\n card=aDeck[x]\r\n if (card[\"Type\"]==cardType):\r\n points+= card[\"Value\"]\r\n return points", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def calculate_score(cards):\n if sum(cards) == 21 and len(cards) == 2:\n return 0\n \n if 11 in cards and sum(cards) > 21:\n cards.remove 11\n cards.append 1\n return sum(cards)", "def score(self, X, y):\n ...", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def _score_to_decision(self, score):", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.move_count < 15:\n return center_modified_score(game, player)\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def get_score(score_map, test_result):\n if test_result < score_map[20]:\n return int((test_result / score_map[20]) * 20)\n elif test_result < score_map[40]:\n return int(20 + (test_result - score_map[20]) / (score_map[40] - score_map[20]) * 20)\n elif test_result < score_map[60]:\n return int(40 + (test_result - score_map[40]) / (score_map[60] - score_map[40]) * 20)\n elif test_result < score_map[85]:\n return int(60 + (test_result - score_map[60]) / (score_map[85] - score_map[60]) * 20)\n elif test_result < score_map[100]:\n return int(85 + (test_result - score_map[85]) / (score_map[100] - score_map[85]) * 20)\n else:\n return 100", "def get_card_val(card):\n\n if card == '1':\n return 1\n if card == '2':\n return 2\n if card == '3':\n return 3\n else:\n return 4", "def balance(self, player):\n print 'hand of %s: %s'%(player.name,player.cards.hand)\n print 'hand of %s: %s'%(self.name,self.cards.hand)\n if player.cards.hand == self.cards.hand:\n return 0\n elif player.cards.hand > self.cards.hand:\n return player.bet_amount*2\n else:\n return -player.bet_amount", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return float(self._score)", "def get_points(self, challenger_id):\n if challenger_id in self.scores.keys():\n return self.scores[challenger_id]\n else:\n for i, c in enumerate(self.challengers):\n if i == challenger_id:\n return self.scores[c]\n return 0", "def calculate_performance(scorecard):\n \n scorecard_array = numpy.asarray(scorecard)\n performance = scorecard_array.sum() / scorecard_array.size\n\n return performance", "def basic_evaluate(board):\n if board.is_game_over():\n # If the game has been won, we know that it must have been\n # won or ended by the previous move.\n # The previous move was made by our opponent.\n # Therefore, we can't have won, so return -1000.\n # (note that this causes a tie to be treated like a loss)\n score = -1000\n else:\n score = board.longest_chain(board.get_current_player_id()) * 10\n # Prefer having your pieces in the center of the board.\n for row in range(6):\n for col in range(7):\n if board.get_cell(row, col) == board.get_current_player_id():\n score -= abs(3-col)\n elif board.get_cell(row, col) == board.get_other_player_id():\n score += abs(3-col)\n\n return score", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def score(cards):\n \n values = sorted(map(lambda x: x[0], cards))\n\n if same_suit(cards) and values[0] == 10 and values[4] == 14: # royal flush\n return (10, 14, 0) \n\n if same_suit(cards) and values[4] - values[0] == 4 and len(set(values)) == 5: # straigh flush\n return (9, values[4], 0)\n\n if len(set(values)) == 2 and values[1] == values[3]: # four of a kind\n if values[0] != values[1]:\n high_card = values[0]\n else: high_card = values[4]\n return (8, values[2], high_card)\n\n if len(set(values)) == 2 and values[1] != values[3]: # full house\n return (7, values[2], 0)\n\n if same_suit(cards): # flush\n return (6, values[4], 0)\n\n if values[4] - values[0] == 4 and len(set(values)) == 5: # straight\n return (5, values[4], 0)\n\n if len(set(values)) == 3: # three of a kind or two pair\n # three of a kind\n if values[0] == values[2]:\n return (4, values[0], max(values[3:5]))\n if values[1] == values[3]:\n return (4, values[1], max(values[0], values[4]))\n if values[2] == values[4]: \n return (4, values[2], max(values[0:2]))\n else: # two pair\n return (3, max(values[1], values[3]), dict((values.count(i), i) for i in values)[1])\n\n if len(set(values)) == 4: # one pair\n high_value_card = dict((values.count(i), i) for i in values)[2]\n s = set(values)\n s.remove(high_value_card)\n return (2, high_value_card, max(s))\n\n return (1, values[4], 0)", "def get_score(self):\n return self.__score", "def score(self):\n return None", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def calculate_score(list_of_cards):\n if sum(list_of_cards) == 21 and len(list_of_cards) == 2:\n return 0\n if 11 in list_of_cards and sum(list_of_cards) > 21:\n list_of_cards.remove(11)\n list_of_cards.append(1)\n return sum(list_of_cards)", "def disp_score():", "def get_r_score(self):\n return self.r_score", "def comp10001go_score_group(cards):\n \n # Put int a dictionary for each card which is scored based on its value\n # For example, J is 11, Q is 12 and K is 13, Ace is 20\n \n values = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, \n '0': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 20}\n \n # Spades and Clubs are black, Hearts and Diamonds are red\n suits = {'S': True, 'C': True, 'H': False, 'D': False}\n \n # First, find if the group is a valid N-of-a-kind (i.e there are 2 or more\n # cards of the same non_Ace value), the score is that value multiplied \n # by N facorial\n \n # Check if the group is valid for N-of-a-kind\n if validate_n_of_kind(cards, values) is True:\n # Calculate the score for the group with valid N-of-a-kind\n n = len(cards)\n card_value = values[cards[0][0]]\n score = card_value * factorial(n)\n return score\n \n # Check if the group is a valid run\n valid_run = validate_run(cards, values, suits)\n if valid_run[0] is True:\n sort_card = valid_run[1]\n score = 0\n for card in sort_card:\n score += card[0]\n return score\n \n # If the group is a singleton card or doesn't form a valid N-of-a-kind or \n # run, it should be scored as the negative sum of the scores of the \n # individual cards (scoring Aces as 20)\n else:\n if len(cards) == 1:\n return 1\n else: \n sort_card = []\n for card_num in range(len(cards)):\n value_card = values[cards[card_num][0]]\n suit_card = suits[cards[card_num][1]]\n sort_card.append((value_card, suit_card))\n \n score = 0\n for card in sort_card:\n score += (-card[0])\n return score", "def score(self):\n result = 1\n one_node = self.cups.locate_node(1)\n a = one_node.next()\n b = a.next()\n\n result = a.value * b.value\n\n return result", "def evaluate_pile(self,pile):\n cards_new = deepcopy(self.cards)\n for color in pile:\n cards_new[color] += 1\n return (ChromakinGame.score(cards_new, self.game_state['scoring']) \n - ChromakinGame.score(self.cards, self.game_state['scoring']))", "def score(self) -> int:\n return self.function(self.x, self.y)", "def getScorecard(self, **kwargs):\n lstPlayers = []\n for n,sc in enumerate(self._players):\n dct = {\n 'player': sc.doc,\n 'in': sc.dct_net['in'],\n 'out': sc.dct_net['out'],\n 'total': sc.dct_net['total'],\n 'holes': sc.dct_net['holes'],\n 'bumps': sc._bumps,\n }\n line = '{:<3} {:>2}'.format(sc.getInitials(), sc.result.course_handicap)\n for net,bump in zip(sc.dct_net['holes'][:9], sc._bumps[:9]):\n nets = '{}{}'.format(bump*'*', net if net is not None else '')\n line += ' {:>3}'.format(nets)\n line += ' {:>4}'.format(dct['out'])\n for net,bump in zip(sc.dct_net['holes'][9:], sc._bumps[9:]):\n nets = '{}{}'.format(bump*'*', net if net is not None else '')\n line += ' {:>3}'.format(nets)\n line += ' {:>4} {:>4}'.format(dct['in'], dct['total'])\n dct['line'] = line\n lstPlayers.append(dct)\n self.dctScorecard['players'] = lstPlayers\n return self.dctScorecard" ]
[ "0.811599", "0.7140242", "0.71156716", "0.7035238", "0.6973796", "0.6955599", "0.6876456", "0.6845327", "0.6747479", "0.6636815", "0.6617078", "0.66109943", "0.6589633", "0.6580763", "0.65789396", "0.65678483", "0.654564", "0.65092874", "0.65078074", "0.6452383", "0.64124787", "0.64066875", "0.64042985", "0.6373427", "0.6346619", "0.63368267", "0.6328852", "0.6207419", "0.61720943", "0.61526626", "0.6112692", "0.6094956", "0.6078382", "0.6070677", "0.6058523", "0.60496575", "0.60127145", "0.59643877", "0.595779", "0.59517336", "0.59283686", "0.5908376", "0.5908376", "0.5908376", "0.5905326", "0.5893936", "0.58922386", "0.5880275", "0.58526236", "0.584116", "0.5826184", "0.5825219", "0.58173764", "0.581569", "0.57926077", "0.5783365", "0.5782662", "0.57795566", "0.57772535", "0.5766961", "0.5753972", "0.5727733", "0.57212913", "0.5718943", "0.571405", "0.5703574", "0.56979305", "0.5697622", "0.5693798", "0.5681625", "0.56796384", "0.56792414", "0.5677649", "0.5674774", "0.5671167", "0.566945", "0.5666337", "0.5663489", "0.5661231", "0.5659049", "0.5657636", "0.5649202", "0.5649202", "0.5649202", "0.5644858", "0.5644278", "0.5644129", "0.56427294", "0.56385624", "0.5628913", "0.5619518", "0.56173205", "0.5613932", "0.5608553", "0.56059355", "0.56024426", "0.5599334", "0.5597725", "0.55975735", "0.55939955", "0.55869627" ]
0.0
-1
Finds the total points that the hand h has and returns that value
def total(h): return sum(i.points() for i in h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_points(self):\n points = 0\n for power in self.stats['powers']:\n points += self.stats['powers'][power]\n return points", "def calculate_points(hand): \r\n hand_value = 0\r\n ace_count = 0 \r\n \r\n #Finds value of non-Ace cards, and counts number of Aces.\r\n for card in hand:\r\n if card[0] == 'Ace':\r\n ace_count += 1\r\n else:\r\n # Calls card_value function to evaluate the card.\r\n hand_value += card_value(card) \r\n \r\n #Ace card present\r\n if ace_count > 0:\r\n return ace_hand_value(ace_count, hand_value)\r\n \r\n #Implied \"if ace_count == 0:\"\r\n return hand_value", "def get_points(self, guess, current_card, next_card):\n\n # (AH)\n if guess.lower() == \"h\" and next_card > current_card:\n points = 100\n elif guess.lower() == \"l\" and next_card < current_card:\n points = 100\n else:\n points = -75\n\n # (AH) points will be added to the total score in Director class.\n return points", "def calculateHandlen(hand):\n # TO DO... <-- Remove this comment when you code this function\n sum = 0\n for value in hand.values():\n sum += value\n return sum", "def hand_points(hand):\n points = [[]]\n branch = 1\n for card in hand:\n if not card[\"is_hidden\"]:\n if card[\"value\"].isnumeric():\n for possibility in range(branch):\n points[possibility].append(int(card[\"value\"]))\n elif card[\"value\"] == \"A\":\n for possibility in range(branch):\n # Ace is 1 or 11. Creating the two possibility\n points.append(points[possibility] + [11]) \n points[possibility].append(1)\n branch += 1\n else:\n # Left are the face value of 10\n for possibility in range(branch):\n points[possibility].append(10)\n\n score = list(zip([sum(branch) for branch in points], points))\n score.sort(key=lambda x: x[0], reverse=True)\n\n for total, points in score:\n if total == 21 and len(hand) == 2:\n return total, \"BlackJack!\"\n if total <= 21:\n if 1 in points and 11 in points:\n return total, None\n if 1 in points: \n return total, \"Soft\"\n if 11 in points:\n return total, \"Hard\"\n else:\n return total, None\n\n # If you get there, you have lost or you had empty hand \n # or all card in hand was hiddien\n if score:\n return score[-1][0], None\n else:\n return 0, None", "def calculateHandlen(hand):\n\n ans = 0\n\n lista = hand.values()\n for index in lista:\n \tans += index\n\n return ans", "def calculateHandlen(hand):\n # TO DO... <-- Remove this comment when you code this function\n l=0\n for v in hand.values():\n l+=v\n return l", "def hand_total(self):\n\n total = 0\n for card in self.__hand:\n total += card.blackjack_value()\n return total", "def sumHandReturnPoints(self, valueLs): # card is the element in valueLs\n #Ace is dealt with here, assume Ace to be 11 initially, decreasing by 10 per Ace if sum > 21\n rowSum = 0\n AceCount = 0\n for ele in valueLs:\n rank = ele.get_rank()\n if rank == 1:\n rank = 11\n AceCount += 1 # serve as flag\n rowSum += rank\n while(AceCount!=0):\n if rowSum > 21:\n rowSum -= 10\n AceCount -= 1\n points = self.countPoints(rowSum)\n return points", "def calculateHandlen(hand):\n return sum(hand.itervalues())", "def calculate_points(this_hand):\n # Check to see if hand got dealt an Ace and whether 11 points or 1 point\n total_points = 0\n int_ace_count = 0\n\n # For each card, add together all the points\n for each_card in this_hand:\n total_points += each_card.get_points()\n\n # Check for Aces, get the name of the card\n this_card_name = each_card.get_name()\n\n if this_card_name == \"A\":\n int_ace_count += 1\n\n # How to determine if Aces are worth 1 or 11\n # A - 1 or 11\n # AA - 2 or 12\n # AAA - 3 or 13\n # AAAA - 4 or 14\n\n if int_ace_count > 0:\n # Add 10 points to the total if it doesn't bust the hand\n if (total_points + 10) <= 21:\n total_points += 10\n\n return total_points", "def get_points(self):\n self.round_points = 0\n for die in self.dice:\n if die == 1:\n self.round_points += 100\n elif die == 5:\n self.round_points += 50\n return self.round_points", "def calc_points_harbor(self):\n points = 0\n if self.cnt_1 + self.cnt_2 + self.cnt_3 + self.cnt_4 + self.cnt_5 >= 2:\n hor = 0\n for i in range(4):\n j = 0\n while j < 5 and ord(self.b[i * 5 + j]) >= 54:\n j += 1\n if j < 4:\n start = j\n j += 1\n while j < 5 and ord(self.b[i * 5 + j]) < 54:\n j += 1\n length = j - start\n if length > hor:\n hor = length\n vptab_harbor = (0, 0, 3, 7, 12, 18)\n points += vptab_harbor[hor]\n ver = 0\n for j in range(5):\n i = 0\n while i < 4 and ord(self.b[i * 5 + j]) >= 54:\n i += 1\n if i < 3:\n start = i\n i += 1\n while i < 4 and ord(self.b[i * 5 + j]) < 54:\n i += 1\n length = i - start\n if length > ver:\n ver = length\n points += vptab_harbor[ver]\n if 'cust' in args.exp:\n if ver == 4 or hor == 5:\n points += 5\n points += 2 * self.cnt_2 + 3 * self.cnt_3\n return points", "def get_points(self, guess, drawn_card, prev_card):\n points_owed = 0\n\n if guess.lower() == \"higher\":\n if drawn_card >= prev_card:\n points_owed = 100\n else:\n points_owed = -75\n\n elif guess.lower() == \"lower\":\n if drawn_card <= prev_card:\n points_owed = 100\n else:\n points_owed = -75\n\n return points_owed", "def calc_points_tower(self):\n points = 0\n cnt_tower = 0\n vptab_tower = (0, 1, 3, 6, 10, 15)\n for i in range(20):\n if self.b[i] == 'T':\n points += vptab_tower[self.f[i]]\n cnt_tower += 1\n if 'poli' in args.exp:\n points += max(self.f)\n if 'scho' in args.exp:\n points += cnt_tower\n return points", "def score_on_hands(cards_on_hand):\r\n score = 0\r\n straightCount = 0\r\n max_card = 0\r\n suite_dict = {}\r\n face_dict = {}\r\n transfer_dict = {'A':1,'J':11,'Q':12,'K':13}\r\n card_face = []\r\n '''Circulate the player's hand, build a list of points and a suit dict'''\r\n for index in range(len(cards_on_hand)):\r\n if str(cards_on_hand[index])[1] in transfer_dict:\r\n card_face.append(transfer_dict.get(str(cards_on_hand[index])[1]))\r\n elif str(cards_on_hand[index])[1] == '1':\r\n card_face.append(10)\r\n else:\r\n card_face.append(int(str(cards_on_hand[index])[1]))\r\n suite_dict[str(cards_on_hand[index])[0]] = 1\r\n '''Because 1 can be treated as 1 or 14, so if 1 exists, add 14 to the end of the list to calculate flush'''\r\n if 1 in card_face:\r\n card_face.append(14)\r\n\r\n '''Check straight, if it is straight, straight should be 4'''\r\n for face in range(len(card_face)-1):\r\n if card_face[face] +1 == card_face[face+1] :\r\n straightCount +=1\r\n\r\n '''Detect the number of cards of the same number'''\r\n for face in card_face:\r\n\r\n if face not in face_dict:\r\n face_dict[face] = 1\r\n else:\r\n face_dict[face] += 1\r\n\r\n '''Store the maximum number of points'''\r\n max_card = card_face[len(card_face)-1]\r\n\r\n '''Calculate player score'''\r\n if straightCount == 4:\r\n score+= 8\r\n\r\n if len(suite_dict) == 1:\r\n score+= 9\r\n\r\n for values in face_dict.values():\r\n if values == 2:\r\n score += 3\r\n elif values == 3:\r\n score += 7\r\n elif values == 4:\r\n score += 11\r\n\r\n return (score, max_card)", "def update_points(self):\n #Calculate Upper Section\n total = 0\n for box in self._upper_section:\n total += box.points\n self._upperSum = total\n\n if total >= 63:\n self._bonus = 35\n total += 35\n self._upperTotal = total\n\n # Calculate Lower Section\n total = 0\n for box in self._lower_section:\n total += box.points\n\n if self.get_box(\"Yahtzee\").points > 0:\n total = total + (self._yahtzee_count - 1) * 100 # Yahtzee Bonus\n\n self._lowerTotal = total\n\n #Total Points\n self._grandTotal = self._upperTotal + self._lowerTotal", "def total_points(self, **kwargs):\n points = 0.0\n for key, value in self.stat_data.items():\n points = points + STATS[key][1](value)\n return round(points, self.__class__.default_round)", "def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count", "def calculateHandlen(hand):\r\n sum=0\r\n dict=hand.copy()\r\n for key in dict:\r\n sum+=dict[key]\r\n return sum", "def calculateHandlen(hand):\n result = 0\n for k in hand:\n \tresult = result + hand.get(k,0)\n return result", "def sum_points(self) -> int:\n return sum([card.rank_value for card in self.deck.cards])", "def points(self):\r\n\t\tif self.rank() >= 9:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn 0", "def h(self):\n h = 0\n for y, row in enumerate(self._tiles):\n for x, tile in enumerate(row):\n h += math.fabs(x - self.goals[tile][0]) + \\\n math.fabs(y - self.goals[tile][1])\n return h", "def hand_value(hand):\n val = 0 \n for card in hand:\n val += card.value\n\n return val", "def score(self, hand, position):\n\n try:\n assert self.grid[position][1] == \"---\"\n except AssertionError:\n print self\n print position\n raise FilledInError\n except KeyError:\n print \"\\nCheck your code. This is not a valid position:\", position, \"\\n\"\n raise\n\n if position.startswith(\"n\"): # Return sum of relevant number\n n = int(position[1])\n return sum(d for d in hand.dice if d == n)\n\n elif position in [\"k3\", \"k4\", \"ch\"]: # Return total sum\n if position == \"k3\" and hand.max_tally()[0] < 3:\n return 0 # The is not a three of a kind\n elif position == \"k4\" and hand.max_tally()[0] < 4:\n return 0 # The is not a four of a kind\n return sum(hand.dice)\n\n elif position in [\"fh\", \"ss\", \"ls\", \"yz\", \"yb\"]: # Return fixed score\n if position == \"fh\":\n tallies = hand.get_dicedict().values()\n if 1 in tallies:\n return 0 # This is not a full house\n\n elif position in [\"ss\", \"ls\"]:\n ds = \"\".join(str(x) for x in hand.sort_by_value())\n if position == [\"ss\"]:\n if \"1234\" not in ds and \"2345\" not in ds and \"3456\" not in ds:\n return 0\n else:\n if \"12345\" not in ds and \"23456\" not in ds:\n return 0\n\n else:\n if hand.max_tally()[0] < 5:\n return 0 # This is not a yahtzee\n if position == \"yb\" and self.grid[\"yz\"] == \"---\":\n return 0 # YB only scores points if there already is a YZ\n\n return fixed_scores[position]\n\n else:\n raise InvalidPositionError", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def value(self, hand):\n return sum(self.accelerations[hand]) / (sum(self.velocities[hand])+.1)", "def getNumberPoints(self, move):\r\n (current_point_white, current_point_black) = self._board.get_nb_pieces()\r\n self._board.push(move)\r\n (new_point_white, new_point_black) = self._board.get_nb_pieces()\r\n self._board.pop()\r\n \r\n if(self._mycolor == 1): #black\r\n return (new_point_black-current_point_black) \r\n else:\r\n return (new_point_white-current_point_white)", "def my_hitpoints(state):\n return state['gladiators'][state['current_player']]['cur_hp']", "def get_points(self):\n return self.card_points", "def full_house_points(dice_list):\n if check_full_house(dice_list) or check_yahtzee(dice_list):\n return 25\n else:\n return 0", "def sum_(hand: list):\n vals = [card.rank for card in hand]\n intvals = []\n while len(vals) > 0:\n value = vals.pop()\n try:\n intvals.append(int(value))\n except ValueError:\n if value in ['K', 'Q', 'J']:\n intvals.append(10)\n elif value == 'A':\n intvals.append(1) # Keep it simple for the sake of example\n if intvals == [1, 10] or intvals == [10, 1]:\n print(\" Blackjack!\")\n return(21)\n else:\n points = sum(intvals)\n print(\" Current score: {}\".format(str(points)))\n return(points)", "def calc_points_shop(self):\n rem_pop = self.popula - self.popula_used\n points = min(self.cnt_shop, rem_pop // 5) * 11\n rem_shop = self.cnt_shop - rem_pop // 5\n vptab_shop = (0, 1, 2, 4, 7)\n if rem_shop > 0:\n points += vptab_shop[rem_pop % 5]\n penalty_popula = max(rem_pop - self.cnt_shop * 5, 0)\n points -= penalty_popula\n return points", "def num_quadrature_points(self) -> int:", "def get_value(self):\n global VALUES\n hand_value = 0\n has_ace = False\n\n for card in self.hand:\n v = VALUES[card.get_rank()]\n hand_value += v\n if card.get_rank() is 'A':\n has_ace = True\n\n if not has_ace:\n return hand_value\n else:\n if hand_value + 10 <= 21:\n return hand_value + 10\n else:\n return hand_value", "def countPoints(self,sumation):\n if sumation == 21:\n points = 7\n elif sumation == 20:\n points = 5\n elif sumation == 19:\n points = 4\n elif sumation == 18:\n points = 3\n elif sumation == 17:\n points = 2\n elif sumation <=16:\n points = 1\n else:\n points = 0\n return points", "def _calculate_points(tree):\n leafs = []\n points = 0\n\n def order(tree):\n if tree is not None:\n if tree.get_left_child() is not None:\n order(tree.get_left_child())\n elif tree.get_right_child() is not None:\n order(tree.get_right_child())\n else:\n leafs.append(tree.key)\n\n order(tree)\n\n for board in leafs:\n if board.is_winner()[0]:\n if board.is_winner()[1] == 5:\n points -= 1\n else:\n points += 1\n\n return points", "def best_hand(hands):\r\n best_val = 0\r\n sum = 0\r\n hand = None\r\n for h in hands:\r\n for t in h:\r\n sum = sum + t[1]\r\n if sum > best_val:\r\n best_val = sum\r\n hand = h\r\n\r\n return hand", "def score(self, hand, top_card):\n powerset = GameState.powerset(list(hand).append(top_card))\n score = 0\n for meld in powerset:\n score += self.score_meld(meld)\n return score", "def calculate_points(card):\n for value in scores.keys():\n if value == card.value:\n card_score = scores[card.value]\n return card_score", "def score(self):\n hand = sorted(self.hand)\n score = -self.chips\n index = 0\n while index < len(hand):\n if index == 0 or hand[index-1] != hand[index]-1:\n score += hand[index]\n index += 1\n return score", "def get_value(self):\r\n value, aces = 0, 0\r\n for card in self.hand:\r\n value += VALUES[card.get_rank()]\r\n # Keep track of the aces in Hand\r\n if card.get_rank() == \"A\":\r\n aces += 1\r\n if aces >= 1 and value + 10 <= 21:\r\n value += 10\r\n return value", "def piece_evaluate(self):\r\n evaluation = 0\r\n for player in range(2):\r\n player_sign = player * 2 - 1\r\n evaluation += player_sign * 4 * count_bits(self.bitboard_king[player])\r\n evaluation += player_sign * 2 * count_bits(self.bitboard_pawns[player])\r\n\r\n return evaluation", "def total_points(self):\n total_points = 0.0\n for ingredient in self.ingredients:\n if (ingredient.has_property('ppg')):\n # Use given value if specified\n total_points += ingredient.property('ppg').to('ppg') * ingredient.quantity.to('lb')\n else:\n total_points += EXTRACTS[ingredient.type] * ingredient.quantity.to('lb')\n return(Quantity(total_points, 'points'))", "def getTotalDistance(self,points):\n return sum([self.getDistance(points[i],points[i+1]) for i in range(len(points)-1)])", "def get_h_score(start, end):\n #uses a heuristic function\n #return 0 #used if you want Djikstras algorithm\n return (abs(end[0]-start[0])+abs(end[1]-start[1])) * 10", "def numberOfPoints(self):\n return 20000", "def getHP(self):\n return len(self.deck)", "def _getDamagePoints(self, weapon, hitloc):\n try:\n points = self.damage[weapon][int(hitloc) - 1]\n self.debug(\"_getDamagePoints(%s, %s) -> %d\" % (weapon, hitloc, points))\n return points\n except (KeyError, IndexError), err:\n self.warning(\"_getDamagePoints(%s, %s) cannot find value : %s\" % (weapon, hitloc, err))\n return 15", "def calculate_score(hand,hand_value):\n first,second,third,fourth,fifth,*_=[rank for rank,suit in hand]\n if fifth==12:\n fifth=-1\n return calculate_score_pairs(hand_value,first,second,third,fourth,fifth)", "def firstPointAfter_h(self, points, h):\n if h > points.max():\n raise ValueError(\"h outside section\\n h > max\")\n elif h < points.min():\n raise ValueError(\"h outside section\\n h < min\")\n else:\n #print 'points:', points, h\n for i, p in enumerate(points):\n #print i, p\n if p > h:\n return i", "def heuristic(self):\n game_score = (self.get_game_score(), 0.85)\n road_score = (self.get_longest_road_score(), 0.05)\n steps_score = (self.get_steps_available_score(), 0.05)\n reachable_nodes_score = (self.get_reachable_nodes_score(), 0.05)\n heuristics = [game_score, road_score, steps_score, reachable_nodes_score]\n result = 0\n for score, weight in heuristics:\n result += score * weight\n if DEBUG_PRINT:\n print(f\"Heuristic value for location {self.loc} is {result}\")\n print(f\"\\treachable score: {reachable_nodes_score[0] * reachable_nodes_score[1]}\")\n print(f\"\\tsteps score: {steps_score[0] * steps_score[1]}\")\n print(f\"\\tlongest road score: {road_score[0] * road_score[1]}\")\n print(f\"\\tgame score: {game_score[0] * game_score[1]}\")\n return result", "def get_value(self):\n #Finds all of the values in the cards\n score_list=[Card.get_value(card) for card in self.cards]\n #Sums the scores\n if self.num_cards() > 0:\n total_score=reduce((lambda x,y: x+y),score_list)\n return total_score\n else:\n return 0", "def threes_points(dice_list):\n return dice_list.count(3) * 3", "def count_points(p1,p2):\n\tif p1 > p2:\n\t\tdrawWinner(1)\n\t\treturn 1\n\telif p2 > p1:\n\t\tdrawWinner(2)\n\t\treturn 2\n\telse:\n\t\tdrawWinner(3)\n\t\treturn 3", "def scoreGame(self):\n # create valueLs[card1,card2,...], pass it to sumHandReturnPoints(valueLs) or twoCardReturnPoints(valueLs)\n scoreLs = []\n ### Score of row\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n points = self.sumHandReturnPoints(valueLs)\n scoreLs.append(points)\n\n ### Score of 4-card column\n for offset in range(0,3): # 0,1,2\n tmpLs = []\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n if len(valueLs) == 5:\n iterStart = 1\n else:\n iterStart = 0\n card = valueLs[iterStart+offset]\n tmpLs.append(card)\n points = self.sumHandReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Score of 2-card column\n #(1) 1st column\n valueLs1 = self.table['row1']\n valueLs2 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs1[0].get_rank())\n tmpLs.append(valueLs2[0].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points)\n #(2) 5th column\n valueLs3 = self.table['row1']\n valueLs4 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs3[-1].get_rank())\n tmpLs.append(valueLs4[-1].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Add up scoreLs\n sumPoints = 0\n for points in scoreLs:\n sumPoints += points\n return sumPoints", "def _calculate_score(lsh, minhash, total_num_events):\n neighbours = lsh.query(minhash)\n return float(len(neighbours)) / float(total_num_events)", "def points(self):\r\n\t\tif self.rank() in self.point_sysm:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn (self.rank() + 2)", "def current_points(self):\n return self.points_on_level_exit * self.has_exited()", "def _get_halluc_points(_, halluc_pts):\n if len(halluc_pts) > 0:\n return halluc_pts\n else:\n return halluc_pts", "def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)", "def heuristicValueOfPosition(currPositions):\n hVal = 0;\n\n for y in range(1, n+1): #1,2,3\n for x in range(1, n+1):\n val = currPositions[y][x];\n if ((val == 0) or (goalPositions[val] == (y,x))): #val 0 means blank\n continue;\n else:\n hVal += abs(y-goalPositions[val][0]) + abs(x-goalPositions[val][1])\n\n return hVal;", "def calc_points_park(self):\n be = ['_'] * 8\n be += self.b[ 0: 5]\n be += ['_'] * 2\n be += self.b[ 5:10]\n be += ['_'] * 2\n be += self.b[10:15]\n be += ['_'] * 2\n be += self.b[15:20]\n be += ['_'] * 8\n cnt_PG = 0\n cnt_P = 0\n points = 0\n vptab_park = (0, 2, 4, 7, 11)\n for i in range(8, 34):\n if be[i] == 'P' or be[i] == 'G':\n cnt_PG += 1\n if be[i] == 'P':\n cnt_P += 1\n neigh_tower_office = 0\n if be[i - 1] == 'T' or be[i - 1] == 'O':\n neigh_tower_office += 1\n if be[i + 1] == 'T' or be[i + 1] == 'O':\n neigh_tower_office += 1\n if be[i - 7] == 'T' or be[i - 7] == 'O':\n neigh_tower_office += 1\n if be[i + 7] == 'T' or be[i + 7] == 'O':\n neigh_tower_office += 1\n points += vptab_park[neigh_tower_office]\n if 'park' in args.exp:\n points += cnt_PG\n if 'repr' in args.exp:\n recycle_energy = max(self.energy - self.energy_used, 0)\n points += recycle_energy\n else:\n penalty_energy = max(self.energy - self.energy_used - cnt_P, 0)\n points -= penalty_energy\n return points", "def score(hand):\r\n \r\n if not hand:\r\n return 0\r\n \r\n max_score = 0\r\n \r\n for dice in hand:\r\n temp = list(hand).count(dice) * dice\r\n if temp > max_score:\r\n max_score = temp\r\n \r\n return max_score", "def get_health_points(self):\n return self.__health_points", "def chance_points(dice_list):\n return sum(dice_list)", "def score(hand):\n # print \"hand:\", hand\n score = 0\n quants = get_quantities(hand)\n idx = -1\n matches = False\n\n for quant in quants:\n if quant > 1:\n matches = True\n\n # print \"matches:\", matches\n\n if matches:\n for quant in quants:\n idx += 1\n if quant > 1:\n score += (idx + 1) * quants[idx]\n # print \"idx1:\", idx\n return score\n\n elif not matches:\n for quant in quants:\n idx += 1\n score += (idx + 1) * quants[idx]\n # print \"idx2:\", idx\n return score", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def score(self):\n xg, yg = self.goal\n xe, ye = self.empty_node()\n score = len(self.history) + 4*(xg + yg)\n if xg == 1:\n score -= 3\n if ye > 1:\n score += ye - 1\n dx = abs(xe - xg + 1)\n if xg and dx:\n score += dx\n return score", "def get_total_health(self,obs):\n total_health = 0\n for unit in obs.observation.raw_units:\n if(unit.alliance == PlayerRelative.SELF):\n total_health += unit[FeatureUnit.health]\n return total_health", "def get_max_score(self):\r\n return sum(self.maxpoints.values())", "def get_stats(self):\n\n win_points = 0\n lose_points = 0\n\n for username in self.bets:\n bet_for_win, points = self.bets[username]\n if bet_for_win:\n win_points += points\n else:\n lose_points += points\n\n return win_points, lose_points", "def yahtzee_points(dice_list):\n if of_a_kind_size(dice_list) >= 5:\n return 50\n else:\n return 0", "def get_value(self):\n \n value = 0\n ace = False\n\n for card in self.hand:\n value += VALUES[card.get_rank()]\n \n if (card.get_rank() == 'A'):\n ace = True\n \n if not ace:\n return value\n else:\n if (value + 10) <= 21:\n return (value + 10)\n else:\n return value", "def punch(self, a_fighter):\n points = int(uniform(0.7,1.0)*10*self.get_strength()/a_fighter.get_agility())\n a_fighter.__health_points = a_fighter.get_health_points() - points\n return a_fighter.__health_points", "def houses(self):\n num = 0\n points = 0\n # TODO: add pattern matching\n if \"s\" in self.__as_str:\n num += 1\n if \"f\" in self.__as_str:\n num += 1\n if \"1\" in self.__as_str or \"2\" in self.__as_str or \"3\" in self.__as_str or \"4\" in self.__as_str:\n num += 1\n if \"o\" in self.__as_str:\n num += 1\n if \"p\" in self.__as_str:\n num += 1\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'h':\n if 'f' in self.neighbours(i, j):\n points += 1\n else:\n points += num\n return points", "def calculate_score(player_cards):\n score = sum(player_cards)\n return score", "def fours_points(dice_list):\n return dice_list.count(4) * 4", "def score(hand):\r\n \r\n max_score = []\r\n \r\n for dice in hand:\r\n max_score.append(hand.count(dice) * dice)\r\n \r\n return max(max_score)", "def strategy(hand, num_die_sides):\n result = (0.0, ())\n current_value = float('-inf')\n \n for item in gen_all_holds(hand):\n value = expected_value(item, num_die_sides, len(hand) - len(item))\n if value > current_value:\n current_value = value\n result = (current_value, item)\n \n return result", "def get_value(self) -> float:\n return self.points[0, 0]", "def calc_points_expansion(self):\n tot_points = 0\n if 'capi' in args.exp:\n be = ['_'] * 8\n be += self.b[ 0: 5]\n be += ['_'] * 2\n be += self.b[ 5:10]\n be += ['_'] * 2\n be += self.b[10:15]\n be += ['_'] * 2\n be += self.b[15:20]\n be += ['_'] * 8\n max_points = 0\n for i in range(8, 34):\n if be[i] == 'U':\n points = 0\n if be[i - 1] == 'P' or be[i - 1] == 'G':\n points += 5\n elif be[i - 1] == 'S':\n points += 3\n elif be[i - 1] == 'U':\n points += 2\n elif be[i - 1] == 'A' or be[i - 1] == 'F' or ord(be[i - 1]) < 54:\n points -= 5\n if be[i + 1] == 'P' or be[i + 1] == 'G':\n points += 5\n elif be[i + 1] == 'S':\n points += 3\n elif be[i + 1] == 'U':\n points += 2\n elif be[i + 1] == 'A' or be[i + 1] == 'F' or ord(be[i + 1]) < 54:\n points -= 5\n if be[i - 7] == 'P' or be[i - 7] == 'G':\n points += 5\n elif be[i - 7] == 'S':\n points += 3\n elif be[i - 7] == 'U':\n points += 2\n elif be[i - 7] == 'A' or be[i - 7] == 'F' or ord(be[i - 7]) < 54:\n points -= 5\n if be[i + 7] == 'P' or be[i + 7] == 'G':\n points += 5\n elif be[i + 7] == 'S':\n points += 3\n elif be[i + 7] == 'U':\n points += 2\n elif be[i + 7] == 'A' or be[i + 7] == 'F' or ord(be[i + 7]) < 54:\n points -= 5\n if points > max_points:\n max_points = points\n tot_points += max_points\n if 'plan' in args.exp:\n nb_b_in_district = [0, 0, 0, 0, 0]\n i_to_district = (0, 0, 1, 2, 2, 0, 0, 1, 2, 2, 3, 3, 1, 4, 4, 3, 3, 1, 4, 4)\n for i in range(20):\n if self.b[i] != '_':\n nb_b_in_district[i_to_district[i]] += 1\n points = len([1 for x in nb_b_in_district if x == 4])\n if points == 5:\n points = 6\n tot_points += points\n if 'fire' in args.exp:\n be = ['_'] * 8\n be += self.b[ 0: 5]\n be += ['_'] * 2\n be += self.b[ 5:10]\n be += ['_'] * 2\n be += self.b[10:15]\n be += ['_'] * 2\n be += self.b[15:20]\n be += ['_'] * 8\n max_points = 0\n for i in range(8, 34):\n if be[i] == 'U':\n points = 0\n if be[i - 1] == 'A' or be[i - 1] == 'F':\n points += 3\n if be[i + 1] == 'A' or be[i + 1] == 'F':\n points += 3\n if be[i - 7] == 'A' or be[i - 7] == 'F':\n points += 3\n if be[i + 7] == 'A' or be[i + 7] == 'F':\n points += 3\n if points > max_points:\n max_points = points\n tot_points += max_points\n return tot_points", "def calc_points_all_expansions(self):\n points = self.calc_points_expansion()\n if 'cust' in args.exp and self.cnt_1 + self.cnt_2 + self.cnt_3 + self.cnt_4 + self.cnt_5 >= 2:\n hor = 0\n for i in range(4):\n j = 0\n while j < 5 and ord(self.b[i * 5 + j]) >= 54:\n j += 1\n if j < 4:\n start = j\n j += 1\n while j < 5 and ord(self.b[i * 5 + j]) < 54:\n j += 1\n length = j - start\n if length > hor:\n hor = length\n ver = 0\n for j in range(5):\n i = 0\n while i < 4 and ord(self.b[i * 5 + j]) >= 54:\n i += 1\n if i < 3:\n start = i\n i += 1\n while i < 4 and ord(self.b[i * 5 + j]) < 54:\n i += 1\n length = i - start\n if length > ver:\n ver = length\n if ver == 4 or hor == 5:\n points += 5\n if 'hall' in args.exp:\n points += self.cnt_public\n if 'park' in args.exp:\n cnt_PG = 0\n for i in range(20):\n if self.b[i] == 'P' or self.b[i] == 'G':\n cnt_PG += 1\n points += cnt_PG\n if 'poli' in args.exp:\n points += max(self.f)\n if 'repr' in args.exp:\n recycle_energy = max(self.energy - self.energy_used, 0)\n points += recycle_energy\n if 'scho' in args.exp:\n cnt_tower = 0\n for i in range(20):\n if self.b[i] == 'T':\n cnt_tower += 1\n points += cnt_tower\n return points", "def nr_points(self):\n return len(self.x)", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore", "def get_total_supply() -> int:\n return total_supply", "def GOAL_TOTAL() -> int:\n return 21", "def get_npoints(self, answer_id):\r\n npoints = self.get_property(answer_id, 'npoints')\r\n if npoints is not None:\r\n return npoints\r\n elif self.is_correct(answer_id):\r\n return 1\r\n # if not correct and no points have been assigned, return 0\r\n return 0", "def getPairStats(): \r\n\r\n #calculcate remainder of equations\r\n s_xx = x_sum_square - (1/n)*(x_sum**2)\r\n s_yy = y_sum_square - (1/n)*(y_sum**2)\r\n s_xy = xy_sum - (1/n)*x_sum*y_sum\r\n \r\n return s_xx, s_yy, s_xy", "def get_pH(self):\n rawline = self.f.readline()\n while rawline:\n rematch = self.solvphre.match(rawline)\n if rematch:\n return float(rematch.groups()[0])\n rawline = self.f.readline()", "def get_h_score(self):\n if self._h_score is None:\n self._h_score = self._heuristic.compute(self)\n return self._h_score", "def total_rewards(self) -> float:\n return self.__total_rewards", "def total_present_value_rule(_m):\r\n\r\n return sum(m.DELTA[y] * (m.INV[y] + m.FOM[y] + m.OP[y]) for y in m.Y) + m.EOH", "def get_n_and_p(dices, asked, tabs=False):\n\tp = 0\n\tfor value in dices:\n\t\tif value in asked.keys() and asked[value] > 0:\n\t\t\tasked[value] -= 1\n\t\telse:\n\t\t\tp += 1\n\tif not tabs:\n\t\tn = sum(asked.values())\n\telse:\n\t\tn = asked\n\treturn n, p", "def _calc_hp(self, average=False):\n dice = self.hd + self.constitution\n if average:\n return round((dice * self.level).average)\n\n return max(sum((dice * self.level).roll()), 1)", "def get_hcost(self):\n hvcost = self.get_hvcost()\n dcost = self.get_dcost()\n hcost = hvcost + dcost\n return hcost", "def harmony(self):\n back = 0\n for i in range(self.stems.shape[0]):\n tmp = N.sum(self.stems[i][1:, 1])\n if tmp == 0 or tmp == 3:\n back += 1\n ### FOR PROPORTIONAL HARMONY ###\n #return float(back)/self.stems.shape[0]\n return back", "def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0", "def points_earned(self):\n delta_counts = self.alive_counts - self.initial_counts\n points = self.points_table * delta_counts\n points = points.reshape(-1,72) # unravel the points for easier sum\n return np.sum(points, axis=1) + super().current_points()" ]
[ "0.69434226", "0.69146436", "0.6852298", "0.6826466", "0.6719616", "0.67074096", "0.66723466", "0.6659441", "0.65662277", "0.6562473", "0.6524262", "0.6511739", "0.64851105", "0.64222765", "0.6378741", "0.63757426", "0.636187", "0.63509446", "0.6337169", "0.6326228", "0.63157463", "0.6315296", "0.6313138", "0.62992895", "0.62611544", "0.6241056", "0.62362987", "0.6235288", "0.61928433", "0.61910546", "0.616156", "0.6125156", "0.6104505", "0.6092677", "0.6045393", "0.60422117", "0.60355544", "0.6032553", "0.6021203", "0.6018707", "0.6013984", "0.6005544", "0.6001087", "0.59821516", "0.59229815", "0.5915133", "0.59128004", "0.5904911", "0.5900737", "0.5889754", "0.5885689", "0.5881843", "0.58804446", "0.58747435", "0.58702236", "0.58602643", "0.5850612", "0.58456475", "0.58450514", "0.5843413", "0.5833671", "0.5819848", "0.57983816", "0.5796702", "0.5796102", "0.5780804", "0.57747", "0.57726055", "0.57657045", "0.57479113", "0.5747597", "0.57443804", "0.5723645", "0.57187635", "0.57153875", "0.571206", "0.57036495", "0.5694375", "0.56830645", "0.56819445", "0.568166", "0.5675625", "0.56736505", "0.56720936", "0.5668039", "0.56678385", "0.5657124", "0.56568563", "0.565172", "0.56449205", "0.56376", "0.56281763", "0.5626867", "0.5623669", "0.5615284", "0.56126946", "0.5611731", "0.5610863", "0.5609402", "0.5608" ]
0.76675487
0
Returns a deck full of a cards that are defined by the cla class
def new_deck(cla): return [cla(i) for i in range(52)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_deck(self):\n\t\tsuits = [\"hearts\", \"spades\",\"diamonds\",\"clubs\"]\n\t\tcards = []\n\n\t\tfor suit in suits:\n\t\t\tif self.ace_as_eleven:\n\t\t\t\tace = Card(\"Ace\", 11, suit)\n\t\t\telse:\n\t\t\t\tace = Card(\"Ace\", 1, suit)\n\t\t\tcards.append(ace)\n\n\t\t\ttwo = Card(\"Two\", 2, suit)\n\t\t\tcards.append(two)\n\t\t\t\n\t\t\tthree = Card(\"Three\", 3, suit)\n\t\t\tcards.append(three)\n\n\t\t\tfour = Card(\"Four\", 4, suit)\n\t\t\tcards.append(four)\n\n\t\t\tfive = Card(\"Five\", 5, suit)\n\t\t\tcards.append(five)\n\n\t\t\tsix = Card(\"Six\", 6, suit)\n\t\t\tcards.append(six)\n\n\t\t\tseven = Card(\"Seven\", 7, suit)\n\t\t\tcards.append(seven)\n\n\t\t\teight = Card(\"Eight\", 8, suit)\n\t\t\tcards.append(eight)\n\n\t\t\tnine = Card(\"Nine\", 9, suit)\n\t\t\tcards.append(nine)\n\n\t\t\tten = Card(\"Ten\", 10, suit)\n\t\t\tcards.append(ten)\n\n\t\t\tjack = Card(\"Jack\", 10, suit)\n\t\t\tcards.append(jack)\n\n\t\t\tqueen = Card(\"Queen\", 10, suit)\n\t\t\tcards.append(queen)\n\n\t\t\tking = Card(\"King\", 10, suit)\n\t\t\tcards.append(king)\n\n\t\treturn cards", "def __init__(self):\n self.deck = []\n for n in range(1, 14):\n card1 = Card(n, \"diamond\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"spade\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"heart\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"club\")\n self.deck.append(card1)", "def get_deck():\n deck = []\n for suit in Suit:\n for rank in Rank:\n deck.append(Card(suit, rank))\n return deck", "def cards(self):\r\n return Cards(self)", "def __init__(self):\n \n self.deck = [Card(suit,rank) for suit in SUITS for rank in RANKS]", "def create_deck(self):\n\n deck = []\n\n # Suits and face values\n suits = ['Clubs', 'Diamonds', 'Hearts', 'Spades']\n face_values = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\n # Creating deck\n for suit in suits:\n for value in face_values:\n deck.append(Card(suit[0], value))\n\n # Adding jokers\n if self.jokers:\n deck.append(Card('Jk', 0))\n deck.append(Card('Jk', 0))\n\n return deck", "def get_cards(self):\n return [Flashcard.from_word(word) for word in self.get_words()]", "def print_deck(self):\n\n ls = []\n for card in self.deck:\n ls.append(card.get_card())\n print(ls)", "def __init__(self):\n self.deck = []\n\n for i in SUITS:\n for j in RANKS:\n self.deck.append(Card(i, j))", "def __init__(self):\r\n \r\n self.deck_of_cards= deque([(y,x) for x in range(1,14) for y in Cards.shades])", "def __repr__(self):\n return f\"Deck({self.cards})\"", "def fill_standard_deck(self):\n for name in [\"ace\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\", \"jack\",\n \"queen\", \"king\"]:\n for suit in [\"hearts\", \"diamonds\", \"spades\", \"clubs\"]:\n self.cards.append(card.Card(name, suit, self.card_values[name]))", "def get_card_list(self):\n return self.cards", "def __init__(self):\n self.deck_of_cards = [Card(value[j], suit[i])\\\n for i in range(len(suit))\\\n for j in range(len(value))]", "def create_deck():\r\n deck = []\r\n faces = [2,3,4,5,6,7,8,9,10,\r\n 'Jack','Queen','King','Ace']\r\n suits = ['Spades', 'Diamonds', 'Clubs', 'Hearts']\r\n for face in faces:\r\n for suit in suits:\r\n # Creates a card-tuple and adds it to the deck.\r\n deck.append((face, suit))\r\n \r\n return deck", "def __init__(self):\r\n self.__suit_dict = [{\"Diamonds\": 1}, {\"Spades\": 2}, {\"Harts\": 3}, {\"Clubs\": 4}]\r\n self.cards_list = []\r\n for suit in self.__suit_dict:\r\n for value in range(1, 14):\r\n self.cards_list.append(Card(suit, value))\r\n self.Shuffle()", "def __init__(self):\n self.deckcards = []\n for suit_by_number in range(4):\n for rank_by_number in range(1, 14):\n card = card_create.Createcard(suit_by_number, rank_by_number)\n self.deckcards.append(card)", "def get_game_cards(gameId):\n pass", "def get_cards(self):\n return [card.view_model() for card in self._deck.loc]", "def cards(self):\n return self._cards", "def generate_deck(deck_number: int) -> List[Card]:\n all_cards = list(product([i for i in range(2, 15)], CARD_SUITS)) * deck_number\n shuffle(all_cards)\n return [Card(*card) for card in all_cards]", "def getAllCards(self):\n return self._cards", "def get_deck_list(deckid):\n # Need to know if we're looking at a deckid or deckid tuple\n # TODO: Clean this up a bit (shouldn't need to support deckids or deck)\n # tuples now that I'm using Deck objects.)\n if isinstance(deckid, tuple):\n # The deckid is in deck[0]\n # Format is (deckid, deck_class)\n deckid = deckid[0]\n # http://www.hearthpwn.com/decks/listing/ + /neutral or /class\n url = 'http://www.hearthpwn.com/decks/listing/'\n css = '#cards > tbody > tr > td.col-name'\n\n cards = []\n\n # Class Cards\n pagetree = get_pagetree(url + str(deckid) + '/class')\n elements = get_elements_from_page(pagetree, css)\n for element in elements:\n card = html.tostring(element, method='text', encoding='UTF-8')\n cards.append(card)\n\n # Neutral Cards\n pagetree = get_pagetree(url + str(deckid) + '/neutral')\n elements = get_elements_from_page(pagetree, css)\n for element in elements:\n card = html.tostring(element, method='text', encoding='UTF-8')\n cards.append(card)\n\n regex = re.compile(b'^\\r\\n(.+)\\r\\n\\r\\n\\xc3\\x97 (\\d+)')\n deck = []\n for card in cards:\n match = re.search(regex, card)\n if match:\n cardname = match.group(1).decode('UTF-8')\n amount = int(match.group(2))\n deck.append(Card(cardname, amount))\n\n return deck", "def create_deck(number = 1):\n deck = []\n for suit, face in itertools.product(suit_names, face_names):\n if face == \"Ace\":\n value = 11\n elif face in ['Jack', 'Queen', 'King']:\n value = 10\n else:\n value = int(face)\n img = Image(img_path+suit+\"_\"+face + \".png\")\n state = True\n card = Card(suit, face, value, img, state)\n deck.append(card)\n random.shuffle(deck)\n return deck", "def __init__(self):\n self._cards = []\n #Add a single card for each suit and rank\n for suit in Card.SUITS:\n for rank in Card.RANKS:\n c = Card(rank, suit)\n self._cards.append(c)", "def __init__(self, cards):\n self.cards = cards", "def main():\n\t\n\tDeck = []\n\tfor suite in range(suites):\n for typecard in range(1, typecard+1):\n cards.append(typecard)", "def test_deck_contains_all_cards(self):\n\n # I'm using collections.Counter so that the order is ignored (as in a\n # set) but that multiples are accounted for.\n expected = collections.Counter([\n ('r', 'i'), ('r', 'i'), ('r', 'i'),\n ('r', 2), ('r', 3), ('r', 4), ('r', 5), ('r', 6), \n ('r', 7), ('r', 8), ('r', 9), ('r', 10),\n\n ('g', 'i'), ('g', 'i'), ('g', 'i'),\n ('g', 2), ('g', 3), ('g', 4), ('g', 5), ('g', 6),\n ('g', 7), ('g', 8), ('g', 9), ('g', 10),\n\n ('b', 'i'), ('b', 'i'), ('b', 'i'),\n ('b', 2), ('b', 3), ('b', 4), ('b', 5), ('b', 6),\n ('b', 7), ('b', 8), ('b', 9), ('b', 10),\n\n ('y', 'i'), ('y', 'i'), ('y', 'i'),\n ('y', 2), ('y', 3), ('y', 4), ('y', 5), ('y', 6),\n ('y', 7), ('y', 8), ('y', 9), ('y', 10),\n\n ('w', 'i'), ('w', 'i'), ('w', 'i'),\n ('w', 2), ('w', 3), ('w', 4), ('w', 5), ('w', 6),\n ('w', 7), ('w', 8), ('w', 9), ('w', 10), ])\n\n self.assertEqual(expected, collections.Counter(deck.deck_gen()))", "def card_factory(rank,suit):\n pass", "def generate_card_deck() -> [Card]:\n\n card_deck = []\n\n for card_color in CardColor:\n for card_value in CardValue:\n card_deck.append(Card(card_color, card_value))\n\n return Shuffle.__shuffle_card_deck(card_deck * Shuffle._CARD_DECK_MULTIPLIER)", "def __init__(self):\n suits = [\"hearts\", \"spade\", \"diamond\", \"clubs\"]\n values = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']\n self.cards = []\n for suit in suits:\n for value in values:\n self.cards.append((value, suit))", "def get_deck(self):\n deck = Deck(self.get_cards())\n return deck.as_string()", "def get_cards(self):\n card = self._starting_card\n return card", "def cards(self) -> Union[\n List[List[Tuple[int, str, str]]],\n List[Any]\n ]:\n return self._cards", "def get_card(self, suit, face):\n for card in self.deck:\n if card.suit == suit and card.value == face:\n return card", "def card(self):\r\n return Card(self)", "def card(self):\r\n return Card(self)", "def __init__ ( self ):\n \n self.__deck = []\n \n for i in range(0,7):\n self.__deck.append('1')\n \n for i in range(0,10):\n self.__deck.append('2')\n \n for i in range(0,3):\n self.__deck.append('3')\n \n #appends the event cards using the first 3 letters of the card in all caps\n self.__deck.append('SEA')\n self.__deck.append('HER')\n self.__deck.append('VIC')\n self.__deck.append('PIL')\n self.__deck.append('TRU')", "def __init__(self, cards = []):\n self.cards=cards", "def shuffle():\n deck = []\n # By Baccarat rules, there are 4 aces worth 1 point, 16 face cards and tens\n # worth 0 point, and 32 other cards worth their numerical value.\n # 8 decks are suffled together to create a shoe.\n for n in range(8):\n for i in range (32):\n deck.append((i % 8) + 2)\n \n for i in range (16):\n deck.append(0)\n \n for i in range (4):\n deck.append(1)\n \n random.shuffle(deck)\n\n return deck", "def make_deck():\r\n deck = []\r\n for i in range(1,5):\r\n for j in range(1,14):\r\n card = (i,j)\r\n deck.append(card)\r\n return deck", "def get_card_sets(self, name: str) -> List:", "def get_cards(self):\n return deepcopy(self._cards)", "def __init__(self):\n self.cards = [Card(face=card[0], value=card[1], suit=suit)\n for card in CARD_VALUES().items() for suit in CARD_SUITS()]", "def revealAll(aDeck):\r\n cardNames=''\r\n for x in range(len(aDeck)):\r\n card= aDeck[x]\r\n if (card[\"Name\"] == \"Joker\"):\r\n cardNames += card[\"Suite\"] + \" \" + card[\"Name\"]\r\n else:\r\n cardNames += card[\"Name\"] + \" of \" +card[\"Suite\"]\r\n cardNames += \"\\n\" \r\n return (cardNames)", "def prepare_deck(cards, nb_copies = 1):\n deck = []\n for _i in range(nb_copies):\n deck.extend(cards)\n shuffle(deck)\n return deck", "def get_cards(self, name):\n cards = []\n\n for card in self.cards:\n if card.name == name:\n cards.append(card)\n\n return cards", "def deck(self) -> Iterable[CardIdentifier]:\n # for some reason cards are treated quite different by NS api currently\n # so we cant simply make a shards call. for now we make a direct call\n # to the requester shards_xml method, since it does not insert the\n # `nation=name` parameter\n # this request returns a <CARDS><DECK><CARD/>...</DECK><CARDS> structure,\n # so we immedietly retrieve the DECK node (which contains multiple CARD nodes)\n # with [0]\n deck = as_xml(\n self.requester.shard_request(\n shards=[\"cards\", \"deck\"], nationname=self.nationname\n ).text\n )[0]\n return [CardIdentifier.from_xml(node) for node in deck]", "def buildDeck(self, resources):\n for key,value in resources.deckData.items():\n self.deck.append(resources.cards[value[self.playerClass]])\n random.shuffle(self.deck)\n self.HP = self.getHP() # Set HP", "def __init__(self):\r\n self.cards = []", "def __init__(self, deck_type=\"standard\"):\n if deck_type == \"standard\":\n self.card_values = {\n \"ace\": 1,\n \"two\": 2,\n \"three\": 3,\n \"four\": 4,\n \"five\": 5,\n \"six\": 6,\n \"seven\": 7,\n \"eight\": 8,\n \"nine\": 9,\n \"ten\": 10,\n \"jack\": 10,\n \"queen\": 10,\n \"king\": 10\n }\n self.cards = []\n self.fill_standard_deck()\n else:\n raise Exception(\"Only standard deck type is supported right now.\")", "def get_card(self, card):\n\n\t\tself.add_card_to_grps(card)\n\n\t\tself.grps = sorted(self.grps, key = lambda x: -len(x))\n\n\n\t\t# check if # of cards forming sets is more than 5; if yes, then break the set to allow computer to form runs\n\t\tnum_set_cards = 0\n\t\tpos = -1\n\t\tfor i in range(len(self.grps)):\n\t\t\tif len(self.grps[i]) > 1 and self.grps[i][0] == self.grps[i][1]:\n\t\t\t\tnum_set_cards += len(self.grps[i])\n\t\t\t\tpos = i\n\n\t\tif num_set_cards > 5:\n\t\t\tcard = self.grps[pos][-1]\n\t\t\tself.grps[pos].remove(card)\n\t\t\tlogger.info(f\"In computer.py/get_card: computer returned {card} to break too many set, computer = {self}\")\n\t\t\treturn card\n\n\n\t\t# if # of sets is fine, then remove a card from the group with least size\n\t\tcard = self.grps[-1][-1]\n\n\t\t\n\t\tif len(self.grps[-1]) == 1:\n\t\t\tself.grps.remove(self.grps[-1])\n\t\telse:\n\t\t\tself.grps[-1].remove(self.grps[-1][-1])\n\n\t\tlogger.info(f\"In computer.py/get_card: computer returned {card}, computer = {self}\")\n\n\t\treturn card", "def show(self):\r\n for card in self.cards_list:\r\n print(card)", "def __init__(self, numberOfPairs):\n self._cards = []\n self._suitList=['s','h','d','c']\n for rank in range(1, numberOfPairs + 1): #start by 1 (not 0) as first rank\n suit=random.choice(self._suitList)\n c1 = Card(rank,suit)\n self._cards.append(c1)\n c2 = Card(rank,suit)\n self._cards.append(c2)", "def burn_card():\n\tglobal my_deck\n\tburn=my_deck.draw()\n\tmy_deck.discard(burn)\n\treturn my_deck", "def get_cards(query_param):\n return _query_scryfall(query_param)", "def __str__(self):\n return f\"This deck contains the following cards: {self.cards}\"", "def create_deck():\n suit_list = [\"\\u2665\", #\n \"\\u2666\", #\n \"\\u2663\", #\n \"\\u2660\"] #\n name_points_dict = {\"A\": 1, \"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"6\": 6, \"7\": 7,\n \"8\": 8, \"9\": 9, \"10\": 10, \"J\": 10, \"Q\": 10, \"K\": 10}\n\n # Use a double ended queue structured list for the deck\n deck_list = deque([])\n\n # For each suit, create a card with each of the name and point entries\n for each_suit in suit_list:\n for each_entry in name_points_dict.keys():\n new_card = Card(each_entry,\n name_points_dict[each_entry],\n each_suit)\n deck_list.append(new_card)\n\n return deck_list", "def generateCards(filename):\n infile = open(filename)\n\n header = pf.Header()\n\n # Loop through each line, converting to a pyfits card\n for line in infile.readlines():\n line = line.rstrip('\\n')\n line = line.strip()\n if(line == 'END'):\n break\n else:\n c = pf.Card().fromstring(line)\n c.verify() # This will attempt to fix issuesx[1]\n header.append(c)\n \n return header.cards", "def test_get_deck(self):\n my_deck = get_deck(deck_name='Test Deck Name')\n self.assertEqual(genanki.deck.Deck , type(my_deck))", "def __repr__(self):\n return f\"Card({self.face}, {self.value}, {self.suit})\"", "def get_hand(deck):\n random.shuffle(deck)\n return deck[0:5]", "def shuffle_deck(self):\n deck = [i for i in range(0, 52)]\n shuffle(deck)\n self.deck = [cards[c*2:c*2+2] for c in deck]", "def test_cards_get_list(self):\n pass", "def __init__(self):\n self._cards = []", "def get_decks(filtering=None, sorting=None, count=None,\n patch=None, classid=None):\n decks_metainfo = get_deck_metainfo(filtering, sorting, count,\n patch, classid)\n decks = [Deck(deck[0], deck[1], get_deck_list(deck[0]))\n for deck in decks_metainfo]\n return decks", "def test_cards_get(self):\n pass", "def generate_deck(suits=4, type_cards=13):\n cards = []\n for suite in range(suits):\n for type_card in range(1, type_cards+1):\n # Setting the key-value pair for every card\n if (type_card == 1):\n cards.append({'A':type_cards+1})\n elif (type_card == 11):\n cards.append({'J': type_card})\n elif (type_card == 12):\n cards.append({'Q': type_card})\n elif (type_card == 13):\n cards.append({'K': type_card})\n else:\n cards.append({type_card:type_card})\n # Randomize the set of cards in the deck\n random.shuffle(cards)\n return cards", "def yatzy_card(players):\n return [[0 for x in range(0, 14)] for x in range(players)]", "def get_cards():\n with open(\"mashape_key.txt\", \"r\") as mashape_key:\n api_key = mashape_key.read()\n print(api_key)\n url = \"https://omgvamp-hearthstone-v1.p.mashape.com/cards?collectible=1\"\n headers = {\"X-Mashape-Key\": api_key}\n response = requests.get(url, headers=headers)\n cards = json.loads(response.text)\n return cards", "def print_cards(self, all_cards=True):\n # print(\"Cards:\")\n result = \"\"\n cards = self.cards\n if all_cards:\n cards += self.cards_on_table\n for c in cards:\n result += str(c)\n return result", "def get_hand(self):\n return self.cards", "def drawCards(deck):\n\n\tcard1 = deck[random.randrange(0, len(deck))]\n\tcard2 = deck[random.randrange(0, len(deck))]\n\n\treturn card1, card2", "def shuffle(self):\n\n if self.deck:\n self.deck = deque()\n\n max_decks = self.deck_count + 1 # +1 for range function\n\n for deck in range(1, max_decks):\n for suit in self.suits:\n for num, name in enumerate(self.names, start=1):\n card = PlayingCard()\n card.set_attributes(name, suit, num)\n self.deck.append(card)\n\n for deck_shuffle in range(self.shuffle_count):\n random.shuffle(self.deck)", "def draw_a_card(cards):\n import random\n card_drawn = random.choices(card_deck)\n cards.append(card_drawn[0])\n return", "async def get_all_cards():\n card_tuple = await ex.conn.fetch(\"SELECT id FROM blackjack.cards\")\n all_cards = []\n for card in card_tuple:\n all_cards.append(card[0])\n return all_cards", "def create_deck(table):\n # Make a standard poker deck (14 represents an ace)\n\n for value in range(2, 15):\n\n if value > 10:\n if value == 11:\n name = 'Jack'\n elif value == 12:\n name = 'Queen'\n elif value == 13:\n name = 'King'\n elif value == 14:\n name = 'Ace'\n else:\n name = str(value)\n\n table.deck.append(Card(name + \"_Diamonds\", value, \"d\"))\n table.deck.append(Card(name + \"_Hearts\", value, \"h\"))\n table.deck.append(Card(name + \"_Spades\", value, \"s\"))\n table.deck.append(Card(name + \"_Clubs\", value, \"c\"))\n\n random.shuffle(table.deck)", "def new_deck(shoe_size=1, is_hidden=True) -> list:\n result = []\n for deck in range(shoe_size):\n result += [\n {\"value\": value, \"base\": base, \"is_hidden\": is_hidden}\n for base in card_base\n for value in card_value\n ]\n return result", "def getCards(self):\n return list(map(lambda x:x&63, self.cards))", "def factory(cls, game, player, play): # RIP n decks ;_;\n if len(play) == 1:\n return SingleCardRound(game, player, play)\n\n if len(play) > 1:\n play.sort()\n\n \"\"\"if number of cards > 1, checks to see if all cards are the same\"\"\"\n for i in range(len(play) - 1):\n if play[i].__eq__(play[i + 1]):\n allSame = True\n else:\n allSame = False\n if (allSame):\n return TupleCardsRound(game, player, play)\n\n \"\"\"if number of cards > 1 && cards not the same, check if play is all consecutive tuples\"\"\"\n tupleTracker = defaultdict(int)\n for j in range(len(play)):\n tupleTracker[play[j]] += 1\n cardValueList = tupleTracker.keys()\n numTuples = cardValueList[0]\n constainsTuples = True\n for cardValue in cardValueList:\n numCopies = cardValueList[tuple]\n if numTuples != numCopies:\n containsTuples = False\n if (containsTuples): # if hand is all tuples, see if they're consecutive\n isConsecutiveTupleCardsRound = True\n numConsecutiveTuples = 0\n for (index, key) in enumerate(cardValueList.sort()):\n if key.suit != keys[index + 1].suit or key.number != keys[index + 1].number - 1:\n isConsecutiveTupleCardsRound = False\n else:\n numConsecutiveTuples += 1\n if (isConsecutiveTupleCardsRound):\n return ConsecutiveTupleCardsRound(game, player, play)\n\n \"\"\"\n else check that the cards are the highest cards in that suit:\n look at the card \"groups\" that are in play and then check to see that there's no group in someone's hand that's higher than the current group\n is there a tuple? how many? if there is, mark this as a tuple with n tuples is there a consectuple? if there is, mark it as a consectuple\n \"\"\"\n \"\"\"making hash table for remainder of deck\"\"\"\n remainder = []\n for player in games.players:\n hand = player.hand\n for card in hand:\n remainder.append(card)\n\n remainderHash = defaultdict(int)\n for k in range(len(remainder)):\n remainderHash[remainder[k]] += 1\n\n \"\"\"below, check cardgroup against cardgroups in the remainderdeck to see if there's any higher cardgroups there\"\"\"\n isTopCardsRound = True\n numTuples = defaultdict(int)\n for card in cardValueList:\n\n \"\"\"this shit is not immediately useful. it just counts the number of tuples\"\"\"\n if tupleTracker[card] > 1:\n numTuples[tupleTracker[card]] += 1 #numTuples is actually hash table for init play\n\n for remainderCard in remainder:\n if card.suit != remainderCard.suit:\n continue\n if card.number < remainderCard.number:\n if tupleTracker[card] <= remainderHash[remainderCard]:\n isTopCardsRound = False # if any cardgroup is higher, then topGame is false\n\n \"\"\"by now this is definitely a topcardround, now to see if there's any tuples and how many tuples up in here\"\"\"\n if isTopCardsRound:\n if numTuples:\n isTopConsecutiveTuplesRound = True\n for (index, card) in enumerate(cardValueList.sort()):\n if card.suit != cardValueList[index + 1].suit or card.number != cardValueList[index + 1].number - 1: # problem\n isTopConsecutiveTupleCardsRound = False\n if (isTopConsecutiveTupleCardsRound):\n return TopConsecutiveTupleCardsRound(game, player, play)\n else:\n return TopCardsRound(game, player, play)\n\n \"\"\"if gets to here without returning something, this is a failed play\"\"\"\n raise ValueError('Cannot play illegal play to round')", "def get_card(self):\n return self.deck.pop()", "def __init__(self, numberOfPairs):\n self._cards = []\n for rank in range(1, numberOfPairs + 1): #start by 1 (not 0) as first rank\n c1 = Card(rank)\n self._cards.append(c1)\n c2 = Card(rank)\n self._cards.append(c2)", "def hand(self, id):\n return self.players[id].cards", "def testCard(self):\n # test1\n cardObj1 = Card('A','d')\n self.assertEquals(1,cardObj1.get_rank())\n self.assertEquals('d',cardObj1.get_suit())\n # test2\n cardObj2 = Card('J','d')\n self.assertEquals(10,cardObj2.get_rank())\n # test3\n cardObj3 = Card(5,'d')\n self.assertEquals(5,cardObj3.get_rank())", "def get_card(self):\n if self.card_suit in self.RED_SUITS:\n color = 'red'\n else:\n color = 'blue'\n\n return colored(self.card_name, 'yellow') + colored(self.card_suit,\n color)", "def shuffle(self):\r\n random.shuffle(self.deck_of_cards)\r\n return self.deck_of_cards", "def create_deck():\n vals = ['2', '3', '4', '5', '6', '7', '8',\n '9', '10', 'jack', 'queen', 'king', 'ace']\n suits = ['spades', 'clubs', 'hearts', 'diamonds']\n deck = []\n for val in vals:\n for suit in suits:\n tup = (val, suit)\n deck.append(tup)\n return tuple(deck)", "def create_deck(self):\n\n id_already_use, deck, hand = [], [], []\n\n for _ in range(self.number_domino - self.hand_size):\n\n # We generate a domino and keep its id in id_alread_use\n # then we make sure to ony keep new id\n\n id = (randint(0, 6), randint(0, 6))\n while id in id_already_use:\n id = (randint(0, 6), randint(0, 6))\n deck.append(Domino(id[0], id[1]))\n id_already_use.append(id)\n\n for _ in range(self.hand_size):\n id = (randint(0, 6), randint(0, 6))\n while id in id_already_use:\n id = (randint(0, 6), randint(0, 6))\n hand.append(Domino(id[0], id[1]))\n id_already_use.append(id)\n\n return deck, hand", "def get_card(name, reda):\n connection = pymongo.MongoClient(MONGO_URL)\n db = connection[DB]\n\n dbcard = db.cards.find_one({'name': name, 'redaction': reda})\n return tocard(dbcard) if dbcard is not None else None", "def __repr__(self):\n return f\"{self.deck}\"", "def get_card(self):\n\n card = random.randint(1,13)\n return card", "def make_deck():\n deck = []\n for i in range(13):\n for j in range(13):\n if j >= i:\n deck.append([i, j])\n else:\n pass\n return deck", "def deal(self):\n deck = range(3, 36)\n self.cards = sample(deck, 24)\n self.card = self.cards.pop()", "def get_card (self, card):\n\t\treturn self._card", "def show_hand(self):\n\n print(f\"{self.name.title()}'s cards are:\")\n for card in self.hand:\n print(card.get_card_details())", "def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card", "def card(self, card_id):\r\n return Card(self, card_id)", "def mock_card():\n return Card(Suit.SPADE, 1)", "def get_playable_cards(self, first):\n playable_cards = []\n first_colors = []\n if len(self.hand) == 0:\n print(\"ERROR: Handy Empty\")\n if first is None:\n return self.hand\n for card in self.hand:\n # White cards can ALWAYS be played.\n if card.color == \"White\":\n playable_cards.append(card)\n # First card color can ALWAYS be played.\n elif card.color == first.color:\n first_colors.append(card)\n # Other colors can only be played if there\n # no cards of the first color in the hand.\n if len(first_colors) > 0:\n return playable_cards + first_colors\n else:\n # Cannot follow suit, use ANY card.\n return self.hand", "def test_consumed_cards(self):\n game = TestGames.replay(9, [3, 1, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [2 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 0 / 2, # handmaid\n 1 / 2, # prince\n 0 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess" ]
[ "0.7541672", "0.71924794", "0.7061139", "0.69914514", "0.6714803", "0.666234", "0.664922", "0.6637694", "0.66035664", "0.657957", "0.6567809", "0.6554334", "0.6484326", "0.6472026", "0.6435984", "0.64274347", "0.6413029", "0.6395127", "0.63739616", "0.63564146", "0.635381", "0.63424927", "0.6329025", "0.63171214", "0.6314101", "0.63047564", "0.6299174", "0.62974", "0.62890536", "0.6281078", "0.627222", "0.627078", "0.626123", "0.6229466", "0.6194257", "0.6189266", "0.6189266", "0.618642", "0.61823815", "0.61662126", "0.61534745", "0.6116302", "0.6071773", "0.6069865", "0.60512304", "0.60470897", "0.60426974", "0.6039434", "0.6034083", "0.60289025", "0.60019815", "0.5997656", "0.59573585", "0.59567636", "0.5947855", "0.592216", "0.59207785", "0.5919356", "0.5887119", "0.5873565", "0.58666664", "0.58626735", "0.58609545", "0.5854989", "0.5844806", "0.58434546", "0.5835184", "0.58350265", "0.58286786", "0.58284515", "0.5818152", "0.5818033", "0.58128834", "0.58024377", "0.58002806", "0.57959706", "0.57955384", "0.5792701", "0.5789686", "0.57727414", "0.57712936", "0.57646006", "0.5758363", "0.57544166", "0.5747849", "0.5747395", "0.5745115", "0.57416457", "0.5740202", "0.573878", "0.57335", "0.57328093", "0.57308143", "0.5722589", "0.5712179", "0.5705036", "0.5699017", "0.5694043", "0.5689122", "0.5688937" ]
0.76038283
0
Pushes records to the server
def _push(self, server): defns = [self.get_id(ident) for ident in list(self.ids)] #for ident in list(self.ids): # defn = self.get_id(ident) if len(defns) == 0: return self.app.logger.info(f"Updating {server} with {len(defns)} records") url = f"{server}/add_record" try: resp = requests.post(url, json=defns) except Exception as e: self.app.logger.error(str(e)) return if not resp.ok: self.app.logger.error(f"{resp.reason} {resp.content}") return self._server_updated[server] = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _es_push_results(self, query_name, records):\n logger.debug(f\"Pushing {query_name}: {records}\")\n for c in self.es_clients:\n c.send_to_es(query_name, records)", "def _push_to_server(self) -> None:\n timestamp = int(arrow.get().float_timestamp * 1000)\n\n datapoints: List[Dict[str, Union[str, List[Tuple[float, float]]]]] = []\n\n for metric in REGISTRY.collect():\n if type(metric) == Metric and metric.type in [\"gauge\", \"counter\"]:\n if len(metric.samples) == 0:\n continue\n\n external_id = self.external_id_prefix + metric.name\n datapoints.append({\"externalId\": external_id, \"datapoints\": [(timestamp, metric.samples[0].value)]})\n\n self.cdf_client.datapoints.insert_multiple(datapoints)\n self.logger.debug(\"Pushed metrics to CDF tenant '%s'\", self._cdf_project)", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def add_records(self, data: dict, execution_context: dict):", "def enqueue(self, record):\r\n self.queue.put_nowait(record)", "def record_and_push(self, data):\n return self.record(data).push()", "def push(self):\n\n self.start = time.time()\n self.log.info('Uploading {} files to database...'\n ''.format(len(self.filenames)))\n i = 0\n\n # Loop over a portion of files and upload them\n if self.n_files != -1:\n files = self.filenames[0:self.n_files]\n else:\n files = self.filenames\n\n for i, f in enumerate(files):\n\n # If were not debugging script allow exceptions and report them\n # later\n if not self.debug:\n try:\n self._push_one(f, **self.meta)\n\n except Exception as e:\n self.log.error('Error with {}'.format(f))\n self.log.error(e)\n self.errors.append((f, e))\n\n else:\n self._push_one(f, **self.meta)\n\n self.session.close()\n\n # Log the ending errors\n self.report(i + 1)", "def _push_to_server(self) -> None:\n pass", "def __call__(self):\r\n AddNewRecords()", "def put_record(self, tag, json_str):\n a = 0\n while a < 2000:\n if a % 100 == 0 and a != 0:\n logger.info(\"A batch of 100 simple json records have been sent\")\n self.firehose_client.put_record(DeliveryStreamName=self.get_stream_name(tag),\n Record={\n 'Data': json_str\n }\n )\n a = a + 1\n logger.info(\"Records were placed successfully!!\")", "def enqueue(self, record: List[LogRecord]):\n self.queue.put_nowait(record)", "def emit(self, record):\n if self.list is not None:\n try:\n self.r.lpush(self.list, json.dumps(self.format(record)))\n except Exception:\n self.handleError(record)", "def push(self, *args, **kwargs):\n pass", "def write(self, record):\n # Make Splunk ready payload data and append it to self._buffers list.\n self._buffer.append({\n 'index': self._index,\n 'sourcetype': 'json',\n 'event': record\n })\n\n # If the records count in self._buffer is more than allowed by\n # self._buffer_size, send those records to Splunk.\n if len(self._buffer) >= self._buffer_size:\n self._flush()", "def records(self, records):\n\n self._records = records", "def records(self, records):\n\n self._records = records", "def _batch_write(self):\n if self.to_put:\n db.put(self.to_put)\n self.to_put = []\n if self.to_delete:\n db.delete(self.to_delete)\n self.to_delete = []", "def put_records(self, records, timeout=1.0):\n octets = b''.join(ndef.message_encoder(records))\n return self.put_octets(octets, timeout)", "def emit(self, record):\n log_entry = self.format(record)\n try: \n requests.post(self.host+self.url, log_entry,headers={\"Content-type\": \"application/json\"}).content\n except Exception as e:\n if self.debug:\n print(e)", "def batch_push(self, payloads):\n body = json.dumps(payloads)\n\n status, response = self._request('POST', body, BATCH_PUSH_URL,\n 'application/json')\n if not status == 200:\n raise AirshipFailure(status, response)", "def record(self):\n # TODO: record the data", "def push_to_object_store():\n list_my = openshift_object.get_all()\n return jsonify(list_my)", "def add_elasticsearch_records(self, data_list):\n actions = [self.create_data_record(data_dict) for data_dict in data_list]\n self.actions_buffer.extend(actions)", "def publish_event(self, event_records):\n if self.service_enabled is False:\n return\n\n running_threads = []\n result_queue = Queue.Queue()\n for client_URI_endpoint in self.client_URI_endpoints.keys():\n event = self.client_URI_endpoints[client_URI_endpoint]\n event.set_event_records(event_records)\n\n thr = threading.Thread(\n target=self.post_to_client,\n args=(\n client_URI_endpoint,\n event.attrs,\n result_queue\n )\n )\n thr.start()\n running_threads.append(thr)\n\n for running_thread in running_threads:\n running_thread.join()\n\n while not result_queue.empty():\n failed_client_url = result_queue.get()\n self.remove_subscription(failed_client_url)", "def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH", "def postponed_send(self):\n\n for event in self._event_list:\n self._http_post([event], postpone=True)\n\n # clear event_list for future use\n self._event_list = []", "def push_bq_records(client, dataset, table, records, sleep = 300, max_batch = 100, print_failed_records = True, retry_on_fail = True):\n if len(records) == 0:\n return\n if len(records) > max_batch:\n split = len(records) // 2\n push_bq_records(client, dataset, table, records[0:split], sleep, max_batch)\n push_bq_records(client, dataset, table, records[split:], sleep, max_batch)\n else:\n try:\n succ = client.push_rows(dataset, table, records)\n if not succ:\n if retry_on_fail:\n print(\"Push to BigQuery table was unsuccessful. Waiting %s seconds and trying one more time.\" % sleep)\n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch, print_failed_records, False)\n else:\n if print_failed_records:\n print(\"\\nRecord 0:\")\n print(records[0])\n if len(records) > 1:\n print(\"\\nRecord %s:\" % (len(records) - 1))\n print(records[len(records)-1])\n raise RuntimeError('Push to BigQuery table was unsuccessful. See above for sample record(s) if requested.')\n except BrokenPipeError:\n print(\"BrokenPipeError while pushing %s records. Waiting %s seconds and trying again.\" % (len(records), sleep)) \n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch)", "def test_append_new_record_to_queue_method(small_app):\n sample_hep_record = _IdDict({\n '$schema': 'http://localhost:5000/schemas/records/hep.json',\n 'authors': [{\n 'affiliations': [{'value': 'Copenhagen U.'}],\n 'curated_relation': False,\n 'full_name': 'Glashow, S.L.',\n 'signature_block': 'GLASs',\n 'uuid': '5ece3c81-0a50-481d-8bee-5f78576e9504'\n }],\n 'collections': [\n {'primary': 'CORE'},\n {'primary': 'HEP'}\n ],\n 'control_number': '4328',\n 'self': {'$ref': 'http://localhost:5000/api/literature/4328'},\n 'titles': [{'title': 'Partial Symmetries of Weak Interactions'}]\n })\n\n append_new_record_to_queue(sample_hep_record)\n\n assert str(sample_hep_record.id) == \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def _(event):\n\n N = len(self.view_model.results)\n coll = self.shared_state[\"active_collection\"]\n self.view_model.status_textcontrol.text = (\n f\"adding {N} records to {coll.name}...\"\n )\n count = 0\n for record in self.view_model.results:\n try:\n coll.add_document(record_id=record[\"record_id\"])\n count += 1\n except Exception:\n pass\n self.view_model.status_textcontrol.text = (\n f\"added {count} records to {coll.name}.\"\n )", "def sync(self):\n self._start_slow_sync()\n self._ask_for_all_records()\n self._process_events()\n self._process_reminders()\n self._process_recurrences()\n #self._write_events()", "def on_incoming_records(self, connection: ConnectionInterface) -> None:\n self.generate_metadata()\n\n df = connection.record_containers[0].build_dataframe()\n df[\"optional_value\"] = self.workflow_config[\"Value\"]\n\n self.output_anchor.push_records(\n generate_records_from_df(df, self.output_anchor.record_info)\n )\n\n connection.clear_records()", "def push(host):\n dispatcher = Dispatch(host)\n\n post(host)\n\n context = zmq.Context()\n zmq_socket = context.socket(zmq.PUSH)\n zmq_socket.bind('tcp://127.0.0.1:5560')\n\n for record in dispatcher:\n zmq_socket.send_pyobj((int(time.time()),record.raw))", "def emit(self, message):\n # handle vaping data that arrives in a list\n if isinstance(message.get(\"data\"), list):\n for row in message.get(\"data\"):\n\n\n # format filename from data\n filename = self.format_filename(message, row)\n\n # create database file if it does not exist yet\n if not os.path.exists(filename):\n self.create(filename)\n\n # update database\n self.log.debug(\"storing time:%d, %s:%s in %s\" % (\n message.get(\"ts\"), self.field, row.get(self.field, \"-\"), filename))\n self.update(filename, message.get(\"ts\"), row.get(self.field))", "def add_to_queue(self, items):\n\n for i in items:\n self.r.rpush(self.joblist, i)", "def push(self, request):\n serializer = PushTestSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({'success': True}, status=status.HTTP_201_CREATED)", "def _send_data(self):\n \n # Do not send more than 100 datasets each time (totally arbitrary)\n MAX_DATA_SETS_PER_POST = 100\n data_to_send = self._data_buffer[0:MAX_DATA_SETS_PER_POST]\n data_to_keep = self._data_buffer[MAX_DATA_SETS_PER_POST:]\n\n # Prepare data string with the values in data buffer\n now = time.time()\n data_string = '[' \n for (timestamp, data) in data_to_send:\n data_string += '['\n data_string += str(round(timestamp-now,2))\n for sample in data:\n data_string += ','\n data_string += str(sample)\n data_string += '],'\n # Remove trailing comma and close bracket\n data_string = data_string[0:-1]+']'\n\n self._log.debug(\"Data string: \" + data_string)\n \n # Prepare URL string of the form\n # 'http://domain.tld/emoncms/input/bulk.json?apikey=\n # 12345&data=[[-10,10,1806],[-5,10,1806],[0,10,1806]]'\n url_string = self._settings['protocol'] + self._settings['domain'] + \\\n self._settings['path'] + \"/input/bulk_json?apikey=\" + \\\n self._settings['apikey'] + \"&data=\" + data_string\n self._log.debug(\"URL string: \" + url_string)\n\n # Send data to server\n self._log.info(\"Sending to \" + \n self._settings['domain'] + self._settings['path'])\n try:\n result = urllib2.urlopen(url_string, timeout=60)\n except urllib2.HTTPError as e:\n self._log.warning(\"Couldn't send to server, HTTPError: \" + \n str(e.code))\n except urllib2.URLError as e:\n self._log.warning(\"Couldn't send to server, URLError: \" + \n str(e.reason))\n except httplib.HTTPException:\n self._log.warning(\"Couldn't send to server, HTTPException\")\n except Exception:\n import traceback\n self._log.warning(\"Couldn't send to server, Exception: \" + \n traceback.format_exc())\n else:\n if (result.readline() == 'ok'):\n self._log.debug(\"Send ok\")\n # Send ok -> empty buffer\n self._data_buffer = data_to_keep\n return True\n else:\n self._log.warning(\"Send failure\")", "def emit(self, record):\n self.buffer.append(record)\n while len(self.buffer) != 0:\n nextRecord = self.buffer.popleft()\n\n super().emit(nextRecord)\n\n if self.sock is None: # If we failed to send the record\n self.buffer.appendleft(nextRecord)\n break", "def submit_to_queue(queue_df, conn, table_name):\n queue_df.to_sql(con=conn, name=table_name, if_exists='replace', index=False)\n print 'Inserted ' + str(len(queue_df)) + ' records to the task_queue'", "def record(records: list,\n method=\"\",\n method_uuid=\"\",\n indicator=\"\",\n indicator_uuid=\"\",\n indicator_unit=\"\",\n flow=\"\",\n flow_uuid=\"\",\n flow_category=\"\",\n flow_unit=\"\",\n cas_number=\"\",\n location=\"\",\n location_uuid=\"\",\n factor=0.0) -> list:\n records.append([\n method,\n method_uuid,\n indicator,\n indicator_uuid,\n indicator_unit,\n flow,\n flow_uuid,\n flow_category,\n flow_unit,\n cas_number,\n location,\n location_uuid,\n factor])\n return records", "def add_record(self, record):\n pass", "def run(self):\n\t\tlogger.info(\"Uploading data... @ %f, PID: %d\" % (time.time(), os.getpid()))\n\n\t\tself.dump_db()", "def push_write(self, s):\n ...", "def ingest():\n db.delete_dataset_records(DATASET_ID)\n\n db.insert_dataset({\n 'dataset_id': DATASET_ID,\n 'title': 'North American Breeding Bird Survey (BBS)',\n 'version': '2016.0',\n 'url': 'https://www.pwrc.usgs.gov/bbs/'})\n\n to_taxon_id = insert_taxa()\n to_place_id = insert_places()\n to_event_id = insert_events(to_place_id)\n insert_counts(to_event_id, to_taxon_id)", "def records(self):\r\n raise NotImplementedError()", "def record_all(self):\n for i in self.recorders:\n t = i[0]\n r = i[1]\n self.add_row(t, r())", "def commit(self):\r\n # print(\"Connection to Mongo...\")\r\n client = MongoClient(DatabaseConfig.host, DatabaseConfig.port)\r\n # print(\"mongo-client: {}\".format(client))\r\n db = client[DatabaseConfig.database]\r\n records = db[self.collection]\r\n # print(kmodels)\r\n records.save(self.to_dict())\r\n client.close()", "def send_to_db(ck_transactions):\n db = DDDB()\n\n db.add_orders(ck_transactions)", "def insert_data(self) -> None:\n if self.min_insert_size > self.insert_count:\n LOG.debug(\"Not enough data for insert....\")\n return\n LOG.debug(f'Inserting {self.insert_count} records...')\n self.insert.write(self.copy_trailer)\n self.insert.seek(0)\n conn = pg.connect(self.dsn)\n with conn.cursor() as cur:\n cur.copy_expert(self.cmd, self.insert)\n conn.commit()\n conn.close()\n self.insert.close()\n self.create_byte_buffer()", "def push(self):\n\n self.start = time.time()\n\n i = 0\n\n if self.smp_log_f is not None:\n self.smp_log = SMPMeasurementLog(self.smp_log_f)\n else:\n self.smp_log = None\n\n # Keep track of whether we are using a site details file for each profile\n smp_file = False\n\n # Read the data and organize it, remap the names\n if not isinstance(self.smp_log, type(None)):\n self.log.info(\n 'Processing SMP profiles with SMP measurement log...')\n smp_file = True\n self.meta['header_sep'] = ':'\n\n # Loop over all the ssa files and upload them\n if self.n_files != -1:\n self.filenames[0:self.n_files]\n\n for i, f in enumerate(self.filenames):\n meta = self.meta.copy()\n\n if smp_file:\n extras = self.smp_log.get_metadata(f)\n meta.update(extras)\n\n # If were not debugging script allow exceptions and report them\n # later\n if not self.debug:\n try:\n self._push_one(f, **meta)\n\n except Exception as e:\n self.log.error('Error with {}'.format(f))\n self.log.error(e)\n self.errors.append((f, e))\n\n else:\n self._push_one(f, **meta)\n\n self.report(i + 1)", "async def post(self):\n data = json.loads(self.request.body)\n\n # validate first\n req_fields = {\n 'host': str,\n 'queues': dict, # dict of {name: type}\n 'version': str, # iceprod version\n }\n for k in req_fields:\n if k not in data:\n raise tornado.web.HTTPError(400, reason='missing key: '+k)\n if not isinstance(data[k], req_fields[k]):\n r = 'key {} should be of type {}'.format(k, req_fields[k])\n raise tornado.web.HTTPError(400, reason=r)\n\n # set some fields\n data['grid_id'] = uuid.uuid1().hex\n data['start_date'] = nowstr()\n data['last_update'] = data['start_date']\n data['username'] = self.auth_data['username']\n if 'debug' not in data:\n data['debug'] = False\n\n ret = await self.db.grids.insert_one(data)\n self.set_status(201)\n self.write({'result': data['grid_id']})\n self.finish()", "def SetRecords(self, records):\n # Number of bytes less than 1MB for ndb.BlobProperty.\n chunk_size = 1000000\n serialized = pickle.dumps(records, 2)\n length = len(serialized)\n if length / chunk_size > _MAX_NUM_PARTS:\n logging.error('Data too large to save.')\n return None\n\n log_parts = []\n for i in xrange(0, length, chunk_size):\n # +1 to start entity key at 1.\n part_id = i // chunk_size + 1\n part_value = serialized[i:i + chunk_size]\n log_part = QuickLogPart(id=part_id, parent=self.key, value=part_value)\n log_parts.append(log_part)\n\n self.size = len(log_parts)\n ndb.put_multi(log_parts + [self])", "def addRecord(self):\n\n ## Saving recorded entries to the CRM and Mailings Database\n print(\"Saving entries to the CRM and Mailings database...\")\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.crm_company_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.address.title() + \"', '\" + self.city.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.county.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.state_code.upper() + \"', '\" + str(self.zip_code) + \"', '\" + self.phone_number + \"', '\" + self.phone_number_2 + \"' , '\" + self.email_address + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \" \" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.company_name.replace(\"\\'\", \"\\'\\'\").title() + \"','\" + self.address + \" \" + self.city.title() + \" \" + self.county.title() + \" \" + self.state_code.upper() + \" \" + str(self.zip_code) + \"'); COMMIT\")", "def _push_to_server(self) -> None:\n if not self.url or not self.job_name:\n return\n\n try:\n pushadd_to_gateway(self.url, job=self.job_name, registry=REGISTRY, handler=self._auth_handler)\n\n except OSError as exp:\n self.logger.warning(\"Failed to push metrics to %s: %s\", self.url, str(exp))\n except:\n self.logger.exception(\"Failed to push metrics to %s\", self.url)\n\n self.logger.debug(\"Pushed metrics to %s\", self.url)", "def upload_data(self):\n labeled_ids = self.get_labeled_ids()\n\n users = []\n users_ids = []\n\n activities = []\n last_activities = []\n\n trackpoints = []\n\n for root, dirs, files in os.walk(DATASET_PATH, topdown=True):\n path_parts = root.split(\"/\")\n if len(path_parts) < 4: # check if inside user folder\n continue\n user_id = path_parts[3]\n\n if user_id not in labeled_ids:\n continue\n\n if user_id not in users_ids:\n users_ids.append(user_id)\n users.append({\"id\": user_id, \"has_labels\": user_id in labeled_ids})\n\n if 'labels.txt' in files:\n last_activities = self.get_activities(user_id, root + \"/labels.txt\")\n activities.extend(last_activities)\n\n if 'Trajectory' in root:\n files.sort()\n for file_path in files:\n trackpoints.extend(self.get_trackpoints(root + \"/\" + file_path, last_activities))\n print(len(trackpoints))\n\n\n print(\"Uploading data\")\n self.insert_data_bulk(\"User\", users)\n print(\" > Users done\")\n self.insert_data_bulk(\"Activity\", activities)\n print(\" > Activities done\")\n self.insert_data_bulk(\"TrackPoint\", trackpoints)\n print(\" > TrackPoints done\")\n self.cursor.close()", "def push(keys: List[str]):\n api = API()\n api.push(*keys)", "def Save(self):\n if not self._records:\n return\n records = list(self._records)\n stored_records = self._log.GetRecords()\n self._MergeRecords(records, stored_records)\n self._log.SetRecords(records[0:_MAX_NUM_RECORD])\n self._records.clear()", "def test_append_updated_record_to_queue_new_record(small_app):\n sample_hep_record = _IdDict({\n '$schema': 'http://localhost:5000/schemas/records/hep.json',\n 'authors': [{\n 'affiliations': [{'value': 'Copenhagen U.'}],\n 'curated_relation': False,\n 'full_name': 'Glashow, S.L.',\n 'signature_block': 'GLASs',\n 'uuid': '5ece3c81-0a50-481d-8bee-5f78576e9504'\n }],\n 'collections': [\n {'primary': 'CORE'},\n {'primary': 'HEP'}\n ],\n 'control_number': '4328',\n 'self': {'$ref': 'http://localhost:5000/api/literature/4328'},\n 'titles': [{'title': 'Partial Symmetries of Weak Interactions'}]\n })\n\n result = append_updated_record_to_queue(None, sample_hep_record,\n sample_hep_record, \"records-hep\",\n \"hep\")\n\n assert result is None\n assert str(sample_hep_record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def test_append_updated_record_to_queue_same_data(small_app):\n pid = PersistentIdentifier.get(\"literature\", 11883)\n publication_id = str(pid.object_uuid)\n record = Record.get_record(publication_id)\n\n append_updated_record_to_queue(None, record, record, \"records-hep\", \"hep\")\n\n assert str(record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "async def _async_send_records_loop(self):\n next_cleanup = 0\n while True:\n logging.info('CachedDataWriter trying to connect to '\n + self.data_server)\n try:\n async with websockets.connect('ws://' + self.data_server) as ws:\n while True:\n try:\n record = self.send_queue.get_nowait()\n logging.debug('sending record: %s', record)\n record = {'type':'publish', 'data':record}\n await ws.send(json.dumps(record))\n response = await ws.recv()\n logging.debug('received response: %s', response)\n except asyncio.QueueEmpty:\n await asyncio.sleep(.2)\n\n except websockets.exceptions.ConnectionClosed:\n logging.warning('CachedDataWriter lost websocket connection to '\n 'data server; trying to reconnect.')\n await asyncio.sleep(0.2)\n\n # If the websocket connection failed\n except OSError as e:\n logging.warning('CachedDataWriter websocket connection to %s '\n 'failed; sleeping before trying again: %s',\n self.data_server, str(e))\n await asyncio.sleep(5)", "def createRecord(self):\n self.dto.getRecord().append(self.controller.createNewObj())\n print(\"Record added.\")", "def emit(self, record):\r\n try:\r\n self.enqueue(self.prepare(record))\r\n except Exception:\r\n self.handleError(record)", "def _records_to_redis_pipe(self, records: List[Any]) -> bool:\n redis_client: Redis = self.redis_client\n\n queue_type: str = self._config[\"graph_queue_type\"]\n queue_key: str = self._config[\"graph_queue_key\"]\n\n try:\n with redis_client.pipeline() as pipe:\n\n pipe.multi()\n\n redis_action = getattr(\n pipe, self._redis_methods_map[queue_type].lower()\n )\n\n for r in records:\n gevent.sleep()\n redis_action(queue_key, json_dumps(r))\n\n pipe.execute()\n\n except RedisError as e:\n self._logger.exception(\"Redis Exception: %s\", str(e)) # noqa: G200\n result = False\n\n else:\n result = True\n\n return result", "def on_data(self, data):\n self.tweets.append(data)\n if len(self.tweets) >= self.batch_size:\n self.write_to_pubsub(self.tweets)\n self.tweets = []\n self.count += 1\n\n if (self.count % 1000) == 0:\n print('count is: {} at {}'.format(\n self.count, datetime.datetime.now())\n )\n return True", "def task_redis_push_single(\n self, records: List[Any], push_mode: str = \"pipe\"\n ) -> bool:\n result: bool = False\n\n push_method = getattr(self, \"_records_to_redis_{}\".format(push_mode))\n\n result = push_method(records=records)\n\n return result", "def flush(self):\n self.acquire()\n try:\n if self.buffer:\n self.client.insertall(self.mapLogRecord(k) for k in self.buffer)\n self.buffer = []\n except Exception as e:\n pass\n finally:\n self.release()", "def send_record(args):\n topic = args.topic.rstrip()\n\n schema_registry_config = {\n 'url': args.schema_registry }\n schema_registry_client = SchemaRegistryClient(schema_registry_config)\n\n avro_serializer = AvroSerializer(\n schema_registry_client,\n DATA_SCHEMA,\n data_to_dict)\n\n producer_config = {\n \"bootstrap.servers\": args.bootstrap_servers,\n \"key.serializer\": StringSerializer('utf_8'),\n \"value.serializer\": avro_serializer\n }\n producer = SerializingProducer(producer_config)\n\n split_incoming_data = args.record_value.split(',')\n if not len(split_incoming_data) == 7: # Data Format Check\n print('** Error: Insufficient Incoming Data: ', split_incoming_data)\n raise Exception\n try: # Data Format Check\n incoming_data = {\n 'envId': int(split_incoming_data[0]),\n 'whenCollected': str(split_incoming_data[1]),\n 'timeLightOnMins': int(split_incoming_data[2]),\n 'humidity': int(split_incoming_data[3]),\n 'soilMoisture': int(split_incoming_data[4]),\n 'temperature': int(split_incoming_data[5]),\n 'waterConsumption': int(split_incoming_data[6]) }\n except Exception as error:\n print('** Error Creating Dict of Data: ', error)\n\n print(f'Producing data records to topic {topic}. ^C to exit.')\n producer.poll(1)\n try:\n key = args.record_key if args.record_key else str(uuid4())\n data_object = Data(incoming_data)\n print('\\t-Producing Avro record. . .')\n producer.produce(topic = topic,\n key = key,\n value = data_object,\n on_delivery = delivery_report)\n except ValueError:\n print('\\t-Invalid input, discarding record. . .')\n print('\\nFlushing records. . .')\n producer.flush()", "def push(self, points, database):\n params = urllib.urlencode(\n {'db': database, 'u': self.user, 'p': self.password, 'precision': 's'}\n )\n\n stamp = int(time.time())\n for point in points:\n if not point.time:\n point.time = stamp\n\n while points:\n body = '\\n'.join(p.serialize() for p in points[:100])\n points = points[100:]\n for attempt in range(5):\n if attempt:\n time.sleep(2 ** (attempt - 1))\n\n try:\n conn = httplib.HTTPConnection(self.host_port)\n conn.request('POST', '%s/write?%s' % (self.path, params), body)\n resp = conn.getresponse()\n except httplib.HTTPException:\n print >>sys.stderr, (\n 'Exception POSTing influx points to: %s\\n%s'\n % (self.host_port, traceback.format_exc())\n )\n continue\n if resp.status >= 500:\n continue\n if resp.status >= 400:\n raise Error(\n 'Error writing InfluxDB points (attempt #%d, status code %d): %s'\n % (attempt, resp.status, resp.read())\n )\n break\n else:\n raise Error(\n 'Failed to write InfluxDB points with %d attempts. (status code %d): %s'\n % (attempt, resp.status, resp.read())\n )", "def _push(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def test_append_updated_record_to_queue(small_app):\n pid = PersistentIdentifier.get(\"literature\", 4328)\n publication_id = str(pid.object_uuid)\n record = Record.get_record(publication_id)\n\n record_to_update = deepcopy(record)\n record_to_update['authors'][0]['full_name'] = \"John Smith\"\n\n append_updated_record_to_queue(None, record_to_update, record_to_update,\n \"records-hep\", \"hep\")\n\n assert str(record_to_update.id) == \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def create_records(data: List[str]) -> List[dict]:\n records = []\n for d in data:\n records.append(create_record(d))\n\n logger.debug(f\"Formed Kinesis Records batch for PutRecords API: {records}\")\n return records", "def push(self, obj):\r\n request = http.Request('POST', self.get_push_url(), obj)\r\n return request, parsers.parse_json", "def push(self, data):\n self._list.append(data)", "def _send(self) -> None:\n if not self.connected or now() < self.next_send:\n return\n self.next_send += self.poll_interval\n buff = []\n while self.outq:\n msg_id, tag, data = self.outq.popleft()\n buff.append(pickle.dumps((msg_id, tag, data)))\n if buff:\n stream = b\"\".join(buff)\n self.endpoint.sendall(stream)", "def upload(message, metrics):\n \n url = 'http://dev.air.eng.utah.edu/api/stations/data'\n headers = {'Content-Type': 'application/json'}\n\n print json.dumps(message)\n\n r = requests.post(url, data=json.dumps(message), headers=headers)\n print r.status_code # TODO: just printing for sanity check\n \n if r.status_code == 200:\n print 'OK! SUCCESS'\n for m in metrics:\n m.uploaded = True\n m.save()\n\n return r.status_code", "def make_push(db,product_name, date, product_number, product_price, url):\n if db.product_mstator.find({'url':url}).count()==0:\n push = {\n \"product_name\": product_name,\n \"date\": [date],\n 'product_number': [product_number],\n 'product_price': [product_price],\n 'url': url\n }\n db.product_mstator.insert_one(push)\n else:\n db.product_mstator.update_one({'url':url},{'$push': {\n 'date':date,\n 'product_number':product_number,\n 'product_price':product_price\n }})\n return None", "def save(self, data):\n activities = [json.loads(activity['Json']) for activity in data]\n\n for i in range(len(activities)):\n activities[i]['created_at'] = to_datetime(activities[i]['created_at'])\n\n with Elastic(index='wink', doc_type='activity') as elastic:\n elastic.upload(activities, 'created_at')\n\n Log.info(\"Successfully uploaded wink activity data into elasticsearch.\")", "def _flush(self):\n buffer_len = len(self._buffer)\n\n if buffer_len == 0:\n _log.info('No pending records to index; URI: %s; index: %s',\n self._uri, self._index)\n return\n\n _log.info('Indexing %d records; URI: %s; index: %s ...',\n buffer_len, self._uri, self._index)\n\n headers = {'Authorization': 'Splunk ' + self._token}\n\n try:\n response = self._session.post(self._uri,\n headers=headers,\n data=json.dumps(self._buffer),\n verify=self._ca_cert)\n\n log_data = ('URI: {}; index: {}; response status: {}; '\n 'response content: {}'\n .format(self._uri, self._index,\n response.status_code, response.text))\n\n if response.status_code != 200:\n _log.error('Failed to index %d records; HTTP status '\n 'code indicates error; %s',\n buffer_len, log_data)\n return\n\n try:\n j = response.json()\n except Exception as e:\n _log.error('Failed to get JSON from response; %s; '\n 'error: %s; %s', log_data, type(e).__name__, e)\n return\n\n if j['code'] != 0:\n _log.error('Failed to index %d records; Splunk status '\n 'code in JSON indicates error; %s',\n buffer_len, log_data)\n return\n\n _log.info('Indexed %d records; %s', buffer_len, log_data)\n del self._buffer[:]\n\n except requests.ConnectionError as e:\n _log.error('Failed to index %d records; connection error; '\n 'URI: %s; index: %s; error: %s: %s; ',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)\n\n except Exception as e:\n _log.error('Failed to index %d records; unexpected error; '\n 'URI: %s; index: %s; error: %s: %s',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)", "def pushSerializedAll(**namespace):", "def push_data(self, wave_data, finish_processing=False):\n self._parent_conn.send((wave_data, finish_processing))", "def push(self):\n self.stack.append(self.save())", "def new_archive_record(self, event):\n end_ts = event.record['dateTime']\n start_ts = end_ts - event.record['interval'] * 60\n\n for topic in self.subscriber.subscribed_topics: # topics might not be cached.. therefore use subscribed?\n self.logger.debug(\"Service record prior to update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.record['dateTime']),\n to_sorted_string(event.record)))\n target_data = self.subscriber.get_accumulated_data(topic, start_ts, end_ts, event.record['usUnits'])\n event.record.update(target_data)\n self.logger.debug(\"Service record after update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.record['dateTime']),\n to_sorted_string(event.record)))", "def _(event):\n\n record_id = self.view_model.results[self.view_model.index][\"record_id\"]\n coll = self.shared_state[\"active_collection\"]\n self.view_model.status_textcontrol.text = (\n f\"adding {record_id} records to {coll.name}...\"\n )\n coll.add_document(record_id=record_id)\n self.view_model.status_textcontrol.text = (\n f\"added {record_id} to {coll.name}\"\n )", "def add_record(self, data):\n if self.current_trip is None:\n print \"no trip to add data\"\n return\n self.current_trip.store_data(data)", "def refresh():\n DB.drop_all()\n DB.create_all()\n results = get_results()\n for i, result in enumerate(results):\n record = Record(id=i, datetime=result[0], value=result[1])\n DB.session.add(record)\n DB.session.commit()\n return redirect(\"/\")", "def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()", "def push(self, obj):\n pass", "def store_documents(self, documents: list):\n results = app.Results()\n entries = [\n { \n 'Id': str(uuid1()),\n 'MessageBody': json.dumps(doc)\n }\n for doc in documents\n ]\n ids = [ e['Id'] for e in entries ]\n self.Logger.info(f'Store {ids} in sqs')\n self.Logger.debug(f'Saving {entries} in sqs {self.sqs_queue_url}')\n self.sqs_client.send_message_batch(\n QueueUrl=self.sqs_queue_url,\n Entries=entries\n )\n results.ActionStatus = 0\n results.Results = ids\n return results", "def save_multiple_records(self, records, collection_name):\n\n try:\n self.logger.info('in save_multiple_records()')\n collection = self.get_db()[collection_name]\n record_ids = collection.insert_many(records)\n self.logger.info('out save_multiple_records()')\n return record_ids\n except Exception as e:\n self.logger.error(f'Error occurred while saving multiple records {e}')", "def _add_record(self, datetime_, hash_):\n assert isinstance(datetime_, datetime)\n assert isinstance(hash_, str)\n record = {'datetime': datetime_, 'hash': hash_, 'artifacts': self.artifacts}\n self.logger.debug(f'Adding record: {record}')\n self.db_collection.update_one(self.query, {'$addToSet': {'records': record}})", "def put_record(self, obj):\r\n for output in self.outputs:\r\n output.put_record(obj)", "def emit(self, record):\n data = self.mapLogRecord(record)\n client = Client()\n if self.method == 'GET':\n response = client.get(self.url, data)\n else:\n response = client.post(self.url, data)\n self.testcase.assertEqual(response.status_code, 200)\n self.testcase.assertContains(response, 'message saved')", "def submit(self):\n self.keep_data = False\n ManagedJob.submit(self)", "def add(self, trace_records):\n if self.is_active():\n \n #Check if argument is a single TraceRecord or a list.\n if isinstance(trace_records, TraceRecord):\n records = [trace_records]\n elif isinstance(trace_records, list):\n records = trace_records\n \n #Save each TraceRecord object to the Trace object.\n for rec in records:\n rec[\"number\"] = self._number\n self._record_list.append(rec)\n self._number = self._number + 1\n \n #Write TraceRecord to the console.\n if self._config.console_trace:\n assert isinstance(rec, TraceRecord)\n self.sim.con.display_trace(rec)", "def add_records(self):\n\n self.setup_progressbar(\"Loading {} records from table {}...\"\n .format(self.record_count, self.lyr.name()),\n self.record_count)\n\n provider = self.lyr.dataProvider()\n for i, row in enumerate(self.cur):\n feature = QgsFeature()\n feature.setGeometry(QgsGeometry())\n feature.setAttributes([flds for flds in row])\n provider.addFeatures([feature])\n self.update_progressbar(i)\n\n iface.messageBar().clearWidgets()\n iface.messageBar().pushMessage(\"Ready\", \"{} records added to {}\".format(str(self.record_count), self.lyr.name())\n , level=QgsMessageBar.INFO)", "def bulk_index_records(records):\n indexer = RecordIndexer()\n\n click.echo('Bulk indexing {} records...'.format(len(records)))\n indexer.bulk_index([str(r.id) for r in records])\n indexer.process_bulk_queue()\n click.echo('Indexing completed!')", "def emit(self, record):\n self.buffer.append(record.__dict__)", "def emit(self, record):\n self.buffer.append(record.__dict__)", "def enqueue(self, server_id, url, title, duration, user):\n srv = self.get_server_dict(server_id)\n srv['queue'].append( (url, title, duration, user) )", "def save_data(self, record):\n self.dbm.addRecord(record)", "async def insert_many(self, models):\n\n pass" ]
[ "0.6992219", "0.6472381", "0.6440058", "0.6413171", "0.6410277", "0.64007604", "0.6349231", "0.6322149", "0.61902606", "0.61212903", "0.60770565", "0.60610574", "0.60589975", "0.599316", "0.5989526", "0.5989526", "0.5933634", "0.5901903", "0.5889706", "0.5877791", "0.58750814", "0.5860765", "0.58231306", "0.58051693", "0.57911825", "0.57806814", "0.57779974", "0.577091", "0.5752932", "0.5738225", "0.5733679", "0.57041734", "0.57040405", "0.5694443", "0.5692443", "0.5673697", "0.5652291", "0.56483734", "0.5646129", "0.56318724", "0.5590242", "0.5562287", "0.5555306", "0.5544084", "0.5541798", "0.5536014", "0.55358315", "0.5534376", "0.55283207", "0.55239284", "0.55176973", "0.551432", "0.5510757", "0.55085987", "0.5495481", "0.547695", "0.54721045", "0.54503226", "0.5449143", "0.54405785", "0.5438331", "0.5437649", "0.54330146", "0.54272515", "0.54269874", "0.54178643", "0.54169744", "0.5409208", "0.54017556", "0.5396441", "0.5382747", "0.5372759", "0.5347811", "0.5345923", "0.5342053", "0.53388655", "0.53335017", "0.5332492", "0.5331492", "0.53305215", "0.53240716", "0.5318128", "0.5312354", "0.5308714", "0.53043044", "0.5304143", "0.52992195", "0.5299087", "0.5294616", "0.5287203", "0.52848005", "0.52690697", "0.5267788", "0.52674156", "0.526507", "0.5262833", "0.5262833", "0.5262562", "0.52483815", "0.5245781" ]
0.7575993
0
Fetch a url and BeautifulSoupify the returned doc
def _url2soup(self, url, qsdata={}, postdata=None, headers={}): logger.info("Fetching: %s" % url) ua = 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.11) Gecko/20071204 Ubuntu/7.10 (gutsy) Firefox/2.0.0.11' headers.update({'User-Agent': ua}) params = urlencode(qsdata) if params: if '?' in url: url = "%s&%s" % (url,params) else: url = "%s?%s" % (url,params) req = Request(url,postdata,headers) doc = urlopen(req) data = doc.read() soup = BeautifulSoup(data) return soup
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_url(url):\n try:\n soup = bs(urlopen(url).read(), 'html.parser')\n return soup\n except:\n print \"Couldnot download the content from the URL\", url\n return \"\"", "def get_document(url):\n req = requests.get(url)\n doc = BeautifulSoup(req.content, \"html.parser\")\n return doc", "def get_soup(url):\n return BeautifulSoup(requests.get(url).content, 'lxml')", "def getSoup(url):\n return BeautifulSoup(getHtml(url), 'lxml')", "def request(url):\n response=requests.get(url)\n soup=BeautifulSoup(response.content,\"lxml\")\n return soup", "def request(self, url):\r\n\r\n req = self.get(url)\r\n soup = BeautifulSoup(req.content, \"lxml\")\r\n return soup", "def get_url_soup(url):\n r = requests.get(url)\n if r.status_code != 200:\n raise Exception(\"Paper request failed '%s'\" % url)\n return get_soup(r.content)", "def make_soup(url):\r\n htmlFile = urllib.request.urlopen(url).read()\r\n soup = BeautifulSoup(htmlFile)\r\n return soup", "def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)", "def get_soup(url):\n\tr = requests.get(url)\n\tdata = r.text\n\tsoup = BeautifulSoup(data, \"lxml\")\n\treturn soup", "def get_soup(url):\n\tresponse = urlopen(url)\n\thtml = response.read()\n\tsoup = BeautifulSoup(html, \"html.parser\")\n\tresponse.close()\n\treturn soup", "def get_soup(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n return soup", "def get_soup(url: str):\n response = requests.get(url)\n\n return BeautifulSoup(response.content, \"html.parser\")", "def _soup(self, url):\n r = self.session.get(url)\n r.raise_for_status()\n html = Soup(r.text, 'lxml') # lxml is fastert than html.parser\n r.close()\n return html", "def load_website(self):\n# r = urllib.request.urlopen(self.url).read()\n r = requests.get(self.url).content \n self.soup = bs(r, \"lxml\")", "def make_request(url):\r\n req = requests.get(url, headers)\r\n soup = BeautifulSoup (req.content, \"html5lib\")\r\n return soup", "def get_soup(url):\n opener = urllib2.build_opener()\n request = urllib2.Request(url);\n request.add_header('User-Agent','Mozilla/6.0 (Windows NT 6.2; WOW64; rv:16.0.1) Gecko/20121011 Firefox/16.0.1');\n data = opener.open(request).read(); \n return BeautifulSoup(data);", "def load_data(url: str):\n\n page = requests.get(url=url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup", "def soup_given_url(given_url):\n url = given_url\n content = urllib.request.urlopen(url)\n soup = BeautifulSoup(content, \"html.parser\")\n return soup", "def get_article(url):\n \n r = requests.get(url) \n html_soup = BeautifulSoup(r.content, 'lxml')\n return html_soup", "def get_soup(url: str) -> BeautifulSoup:\n html = get_html(url)\n soup = BeautifulSoup(html, 'lxml')\n return soup", "def fetch_and_parse(url, filepath):\n print \"fetch %s\" % (url)\n page = urllib.urlopen(url).read()\n # @todo: add gzip support\n\n parsed_html = BeautifulSoup(page)\n res = parsed_html.head.find('title').text\n\n try:\n f = codecs.open(filepath, \"w\", \"utf-8\")\n f.write(res)\n f.close()\n except:\n logging.error(\"Something went wrong with writing %s\", filepath)\n raise\n\n return {\"url\": url, \"result\": res}", "def get_soup(url):\r\n page=requests.get(url)\r\n soup = BeautifulSoup(page.text.encode(\"utf-8\"), 'html.parser')\r\n return soup", "def _fetch(url, ssl_verify = True):\n req = Request(url)\n if ssl_verify:\n page = urlopen(req)\n else:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n page = urlopen(req, context=ctx)\n content = page.read().decode('utf-8')\n page.close()\n return content", "def __download_web(self):\n page = requests.get(self.url)\n\n if page.status_code == 200:\n return BeautifulSoup(page.content, \"html.parser\")", "def _get_soup_object(url: str) -> bs4.BeautifulSoup:\n request_result=requests.get(url)\n soup = bs4.BeautifulSoup(request_result.text, \"html.parser\")\n return soup", "def getSoupFromURL(url, supressOutput=True):\n if not supressOutput:\n print url\n \n try:\n r = requests.get(url)\n except:\n return None\n \n return BeautifulSoup(r.text)", "async def get_one_page_soup_object(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n return await response.text()", "def getHTML(url): \n return urlopen(url)", "def fetch(self, url):\r\n fname = os.path.join(self._cachedir, self._formatter(url))\r\n if not os.path.exists(fname):\r\n time.sleep(self._sleep)\r\n html = urllib.urlopen(url).read()\r\n with codecs.open(fname, 'w', 'utf-8') as f:\r\n soup = BeautifulSoup(html)\r\n f.write(unicode(soup))\r\n return fname", "def soup(url):\n handle = ''\n max_tries = 10\n for i in range(max_tries):\n try:\n handle = urlopen(url)\n handle = handle.read()\n break\n except:\n logging.exception('urlopen failed (attempt %d)', i + 1)\n if i == max_tries - 1:\n logging.error('the maximum urlopen attempts have been reached')\n raise\n time.sleep(1)\n\n s = BeautifulSoup(handle)\n return s", "def return_beautiful_soup_object(url: str) -> bs4.BeautifulSoup:\n html_filename, headers = urllib.request.urlretrieve(url)\n with open(html_filename) as file:\n soup = BeautifulSoup(file, 'html.parser')\n file.close()\n return soup", "def _html(url: str) -> BeautifulSoup:\n with urllib3.PoolManager() as manager:\n res = manager.request(\"GET\", url, headers={\"User-Agent\": ua.chrome})\n if res.status != 200:\n raise Exception(res.status)\n soup = BeautifulSoup(res.data, \"html.parser\")\n return soup", "def fetch_document(self, url: str) -> bytes:\n self.html_document = b''\n try:\n response = requests.get(url, headers=self.headers)\n response.raise_for_status()\n self.html_document = response.content\n logger.info('web page {0} fetched with status code: {1}'.format(url, response.status_code))\n return self.html_document\n except requests.exceptions.RequestException:\n logger.exception('Exception raised in Scraper.fetch_document()')\n raise", "def get_soup_obj(url):\n try:\n html = session.get(url, headers=headers).text\n return BeautifulSoup(html, \"html.parser\")\n except HTTPError:\n print(\"{} not reachable\".format(url))\n return None", "def getHtml(url):\n return urlopen(url)", "def _get_soup(self, url):\n\n # generate a random header \n headers = {'User-Agent': self._random_user_agent()}\n # send a request and get the soup\n response = requests.get(url, headers=headers)\n results = response.content\n if not response.status_code == 404:\n soup = BeautifulSoup(results, 'lxml')\n return soup", "async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:\n if url != self.driver.current_url:\n self.driver.get(url)\n return BeautifulSoup(self.driver.page_source, 'lxml')", "def make_soup(self):\n req = urllib.request.Request(\n url,\n data=None,\n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n f = urllib.request.urlopen(self.html)\n soupdata = BeautifulSoup(f, \"html.parser\")\n return soupdata", "def getHTML(url):\n\n time.sleep(2.00)\n html = urllib2.urlopen(url,timeout=10).read()\n urllib2.urlopen(url).close()\n\n soup = BeautifulSoup(html)\n\n return soup", "def page_soup(url):\n html = requests.get(url).text\n return bs(html, 'html.parser')", "def get_html_soup(url_target, getter=1):\n if getter == 1:\n response = requests.get(url_target) # getter == 1\n status_code = response.status_code\n markup = response.text\n else:\n response = urlopen(url_target)\n status_code = response.getcode()\n markup = response\n print(f\"status_code = [{status_code}] \\n\")\n return BeautifulSoup(markup=markup, features='html.parser')", "def get_soup(url):\n url_hash = get_url_hash(url)\n www_cache_file = os.path.join(www_cache_dir, url_hash)\n if os.path.exists(www_cache_file):\n with open(www_cache_file) as file:\n charset = 'utf8'\n data = file.read().encode(charset)\n else:\n print('Downloading %s...' % url, file=sys.stderr)\n with urlopen(url) as stream:\n charset = stream.info().get_param('charset')\n data = stream.read()\n with open(www_cache_file, 'w') as file:\n file.write(data.decode(charset))\n return bs4.BeautifulSoup(data, 'lxml', from_encoding=charset)", "def get_soup_from_url(url, parser='html.parser'):\n r = requests.get(url)\n r.raise_for_status()\n soup = bs4.BeautifulSoup(r.text, parser)\n return soup", "def get_document(url):\n r = requests.get(url, timeout=5)\n r.raise_for_status()\n return lxml.html.fromstring(r.content)", "def read_html(url: str) -> BeautifulSoup:\n try:\n response = requests.get(url, stream=True)\n status_code = response.status_code\n content_type = response.headers[\"Content-Type\"].lower()\n except requests.RequestException as e:\n raise RuntimeError(f\"Error during requests to {url} : {str(e)}\")\n else:\n if (\n status_code == 200\n and content_type is not None\n and content_type.find(\"html\") > -1\n ):\n return BeautifulSoup(response.content, \"html.parser\")", "def get_soup(url: str):\n\n page_response = get_page_response(url)\n if page_response is not None:\n try:\n soup = BeautifulSoup(page_response.content, 'lxml')\n except:\n print('Trouble parsing the soup for: {}'.format(url))\n return None\n else:\n return soup\n else:\n print(f'The response object was \"None\" so there is no point in trying to parse for url {url}')\n return None", "def beautify_page(url=\"https://www.transportation.gov/individuals/aviation-consumer-protection/air-travel-consumer-reports-2020\"):\n # page = requests.get(url)\n page = urllib.request.urlopen(url)\n if page.getcode() == 200:\n soup = BeautifulSoup(page.read(), 'html.parser')\n print('Connection Successful!')\n print(url)\n return soup\n else:\n print('Connection Failure!')\n print(f'Status Code: {page.status_code}')", "def get_html(url):\n return urllib.request.urlopen(url)", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def get_data(URL):\n try:\n page = requests.get(URL)\n soup = BeautifulSoup(page.text, \"html.parser\")\n print(f\"Fetching data from '{URL}' was successful.\")\n return soup\n except Exception as e:\n print(f'Something went wrong while fetching the page: {e}')\n return", "def read_soup(url, base_url=None,debug=False):\n\n soup = None\n # read local file\n if debug == True:\n print(\"\\n***********************************\")\n print(\"Opening \", url)\n # read url or file\n if url[:4].lower() == \"http\":\n soup = read_soup_from_url(url, debug=debug)\n else:\n soup = read_soup_from_local_html(url, debug=debug)\n if debug is True:\n print(f\"read soup from {url} containing {len(str(soup))} characters\") \n if base_url is not None:\n soup = replace_relative_links(soup,base_url,debug=debug)\n return soup", "def fetch(url):\n content = requests.get(url).text\n if \"Error\" in content:\n raise ValueError(f\"Cannot read from: {url}\")\n return content", "def load_url_content(url):\n try:\n r = requests.get(url)\n if r.ok:\n return r.text\n else:\n return None\n except Exception:\n return None", "def download_simple(url): # url(str)\n html = urlopen(url).read().decode()\n return html", "def fetch(url,delay=(1,3)):\n time.sleep(random.randint(delay[0],delay[1])) # wait random seconds\n try:\n response = requests.get(url)\n except ValueError as e:\n print(str(e))\n return '', BeautifulSoup('', \"html.parser\")\n html = response.text\n soup = BeautifulSoup(html, \"html.parser\")\n return (html,soup)", "def scrape_url(url):\n html = requests.get(url).text\n return scrape_html(html)", "def hot_soup(url, payload={}):\r\n response = query(url, payload)\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n return soup", "def get_html_parser(url):\n response = requests.get(url)\n return BeautifulSoup(response.content, 'html.parser')", "def get_soup_for_page(url: str) -> BeautifulSoup:\n return BeautifulSoup(get_html(url), 'html.parser')", "def html_dl(url):\n return urllib2.urlopen(url).read()", "def get_soup(url, using_TOR = False):\n try:\n request = get_request(url, using_TOR = using_TOR)\n if request == None:\n logger.debug(\"Request is empty, don't create soup.\")\n return None\n soup = BeautifulSoup(request, 'html.parser')\n return soup\n except Exception as error:\n #logger.warn(traceback.format_exc())\n raise\n return None", "def load_page(self) -> bs4.BeautifulSoup:\n\n res = requests.get(self.url)\n\n res.raise_for_status()\n return bs4.BeautifulSoup(res.text, 'html.parser')", "def get_html_content(self, url):\n\n req = urllib2.Request(url, headers=self.HEADER)\n page = urllib2.urlopen(req)\n soup = BeautifulSoup(page)\n\n return soup", "def get_html(url):\n print('fetching', url)\n try:\n re = requests.get(url, timeout=1, stream=True)\n print('success!')\n # limit file size to 1mb\n html = re.raw.read(1000000+1, decode_content=True)\n if len(html) > 1000000:\n raise ValueError('response too large')\n return html\n except:\n raise TimeoutError('request timed out')", "def url_to_soup(data_url, **kwargs):\n try:\n data_page = get_cached_url(data_url, **kwargs)\n except requests.RequestException:\n _logger.warning(f'request failed: {data_url}')\n raise\n _logger.debug(f'request successful: {data_url}')\n\n # Create a Beautiful Soup object\n data_text = data_page.text\n data_soup = BeautifulSoup(data_text, 'html.parser')\n\n return data_soup", "def fetch(self, url):\n self.log.info(\"Fetching URL: \" + url)\n\n r = requests.get(url, verify=False)\n # raise an HTTPError on badness\n r.raise_for_status()\n\n # this decodes r.content using a guessed encoding\n return r.text", "def get_soup(self):\n page = get(self.url)\n if page.status_code == 200:\n soup = BeautifulSoup(page.text, 'lxml')\n return soup\n else:\n raise ConnectionError('The page is not disponible.')", "def get_webpage_content(url):\n request = urllib2.Request(url)\n page = urllib2.urlopen(request)\n soup = BeautifulSoup(page.read())\n return unicode(soup)", "def get_soup(self, url):\n if self.session is None:\n return BeautifulSoup(requests.get(url).content, features=\"xml\")\n else:\n return BeautifulSoup(self.session.get(url).content, features=\"xml\")", "def load_page(url):\n try:\n url = 'https://en.wikipedia.org'+url\n html = urlopen(url)\n bs = BeautifulSoup(html.read(),'html.parser')\n except:\n #if page not exists or page not found\n return None \n return bs", "def retrieve_html(url):\n req = urllib2.Request(url)\n req.add_header('User-Agent', 'Just-Crawling 0.1')\n request = None\n status = 0\n try:\n logger.info(\"Crawling %s\" % url)\n request = urllib2.urlopen(req)\n except urllib2.URLError as e:\n logger.error(\"Exception at url: %s\\n%s\" % (url, e))\n except urllib2.HTTPError as e:\n status = e.code\n except:\n return\n if status == 0:\n status = 200\n\n try:\n data = request.read()\n except:\n return\n\n return str(data)", "def fetch(self):\n # This method also sets self._results_filtered and\n # self._urltable.\n page = self._conn.fetch_page(self._ddg_url.relative())\n\n if logger.isEnabledFor(logging.DEBUG):\n import tempfile\n fd, tmpfile = tempfile.mkstemp(prefix='ddgr-response-')\n os.close(fd)\n with open(tmpfile, 'w', encoding='utf-8') as fp:\n fp.write(page)\n logger.debug(\"Response body written to '%s'.\", tmpfile)\n\n parser = DdgParser(news=self._ddg_url.news)\n parser.feed(page)\n\n self.results = parser.results\n self._results_filtered = parser.filtered\n self._urltable = {}\n for r in self.results:\n self._urltable.update(r.urltable())", "def make_soup(url, params=None):\n\n r = requests.get(url, params=params)\n if r.status_code != requests.codes.ok:\n raise Exception('Error: status code is %s for URL: %s' %\n (str(r.status_code), url))\n\n contents = r.content\n \n soup = BeautifulSoup(contents, parser, from_encoding='iso-8859-1')\n return soup", "async def _fetch(self, session, url, proxy=None, raw=False, which_site=False):\n print(url)\n result = None\n site = None\n if 'hare' in url: # {'Unknown': -1, 'Pixnet': 0, 'Hares': 1}\n site = self._websites['Hares']\n elif 'pixnet' in url:\n site = self._websites['Pixnet']\n else:\n site = self._websites['Unknown']\n\n count = 1\n while count <= 2:\n soup = ''\n status = 0\n try:\n async with session.get(url, proxy=proxy) as response:\n source_code = await response.text('utf-8')\n status = response.status\n soup = source_code if raw else BeautifulSoup(source_code, 'lxml')\n except Exception as e:\n print('Connection error: ' + str(e))\n soup = None\n finally:\n result = (url, soup, status, site) if which_site else (url, soup, status)\n if status != 0:\n return result\n if 'searcharticle' not in url:\n count += 1\n result = (url, soup, status, site) if which_site else (url, soup, status)\n return result", "def load_page(url):\n try:\n response = urllib2.urlopen(url)\n html = response.read()\n\n if response.code == 200:\n body_text = html\n return html\n return \"\"\n except Exception:\n return \"\"", "def create_soup(u):\n req = requests.get(u)\n html = req.text\n s = BeautifulSoup(html, \"html.parser\")\n return s", "def url_fetch(self, url):\n user_agent = random.choice(self.conf.user_agents)\n if self.isCompress == True:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Encoding\": \"gzip,deflate\",\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n else:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n raw_data = ''\n try:\n conn = httplib.HTTPConnection(self.proxy, timeout=3.0)\n conn.request('GET', url, None, headers)\n response = conn.getresponse()\n raw_data = response.read()\n except Exception as err:\n self.logger.error('connect error[%s]' % err)\n return '999', 'Request failed', ''\n finally:\n conn.close()\n \n content = ''\n if self.isCompress == True:\n if response.status == 200:\n try:\n stream = StringIO.StringIO(raw_data)\n decompressor = gzip.GzipFile(fileobj=stream)\n content = decompressor.read()\n except:\n self.logger.error('status[%s] len_raw_data[%d]' % (response.status, len(raw_data)))\n return '998', 'content err', ''\n else:\n if response.status == 200:\n content = raw_data \n\n return response.status, response.reason, content", "def get_soup_from_url(page_url):\n r = requests.get(page_url)\n if r.status_code != requests.codes.ok:\n raise requests.exceptions.HTTPError\n return BeautifulSoup(r.content, 'lxml')", "def get_html(url):\n req = urllib.request.Request(\n url,\n headers={\n 'User-Agent': 'Python Learning Program',\n 'From': '[email protected]'\n }\n )\n resp = urllib.request.urlopen(req)\n\n if resp.code == 200:\n return resp.read() # returns the html document\n else:\n return None", "def get_soup(session, url, user_agent):\n headers = cs.base_request_headers\n headers['User-Agent'] = user_agent\n\n page = custom_get(session=session, url=url, headers=headers)\n\n return BeautifulSoup(page.text, 'html.parser')", "def process(self, url, configuration={}, data=None):\n try:\n response = self.fetch(url, data, configuration.get(\"headers\", {}))\n response.mime_type = response.headers.get('Content-Type', ';').split(';')[0]\n response.body = response.read()\n if configuration.get(\"parse_result\", True) and response.mime_type.startswith('text'):\n if response.body.startswith(b\"<!DOC\") or response.body.startswith(b\"<!doc\"):\n response.soup = BeautifulSoup(\n response.body[response.body.find(b\">\")+1:], \"html.parser\")\n else:\n response.soup = BeautifulSoup(response.body, \"html.parser\")\n else:\n response.soup = BeautifulSoup('', \"html.parser\")\n return response\n except urllib.error.HTTPError as e:\n status = int(str(e).split()[2][0:3])\n if status in configuration.get('status', [200, 301, 302, 303]):\n # This is OK -- the status matches what we're expecting\n class response(object):\n status_code = status\n soup = BeautifulSoup('', \"html.parser\")\n body = ''\n def __init__(self, u):\n self.url = u\n return response(url)\n raise", "def soup_explore(url_or_file, session=None):\n soup = ph.get_soup(url_or_file, session)\n if not soup:\n ph.logger.error('No soup found for {}'.format(url_or_file))\n else:\n print('\\nExplore the \"soup\" object\\n\\n')\n embed()\n return soup", "def extract_link(url):\n\theaders = {\"Host\": \"www.zomato.com\",\n\t \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0\",\n\t \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n\t \"Accept-Language\": \"en-US,en;q=0.5\",\n\t \"Accept-Encoding\": \"gzip, deflate, br\",\n\t \"Referer\": \"https://www.zomato.com/\",\n\t \"Connection\": \"keep-alive\"}\n\n\tif url.startswith('file'):\n\t\twith open(url.replace('file:\\\\\\\\', ''), encoding='utf-8') as fp:\n\t \t\tpage_source = fp.read()\n\n\telse:\n\t\tr = requests.get(url, headers=headers)\n\t\tif r.status_code == 404:\n\t\t\treturn None\n\t\tpage_source = r.text\n\n\tpage_source = re.sub('<br>', '', page_source)\n\tpage_source = re.sub('<br />', '', page_source)\n\tpage_source = re.sub('<br/>', '', page_source)\n\tsoup = BeautifulSoup(page_source, 'html.parser')\n\n\treturn soup", "def fetch(url, user_agent=\"django-oembed/0.1\"):\r\n request = urllib2.Request(url)\r\n request.add_header('User-Agent', user_agent)\r\n request.add_header('Accept-Encoding', 'gzip')\r\n opener = urllib2.build_opener()\r\n f = opener.open(request)\r\n result = f.read()\r\n if f.headers.get('content-encoding', '') == 'gzip':\r\n result = gzip.GzipFile(fileobj=StringIO(result)).read()\r\n f.close()\r\n return result", "def webdl(url):\n print('Downloading...{}'.format(url))\n try:\n r = requests.get(url)\n r.raise_for_status()\n return r\n except:\n print('[Error webdl]: Download failed for {}'.format(url))\n return None", "def gather_current(url=URL):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup", "def fetch(self, url: furl) -> str:\n try:\n contents = self._download(url)\n except requests.ConnectionError as err:\n logger.exception(f\"Request failed with {err}\")\n click.secho(\n f\"The URL {url} could not be downloaded. Either your network is unreachable or the URL is broken.\"\n f\" Check the URL, fix your connection, or use \"\n f\" {OptionEnum.OFFLINE.as_flake8_flag()} / {OptionEnum.OFFLINE.as_envvar()}=1\",\n fg=\"red\",\n err=True,\n )\n return \"\"\n return contents", "def scrape(self):\n\n self.url = self.headline.url\n\n # Should raise exception...\n if not self.parsing_template:\n return None, None, None, None, None\n\n try:\n response = self.download()\n self.source = response.text\n except:\n return None, None, None, None, None\n\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n if soup:\n return self.parse(soup)\n else:\n return None, None, None, None, None", "def fetchUrl(self, url):\n self.driver.get(url)\n html = self.driver.page_source\n return html", "def get_soup():\n global soup\n html = urlopen(\"http://www.jrenshaw.com/works-in-progress/\")\n soup = BeautifulSoup(html, \"lxml\")\n return soup", "def get_soup_alternate(url):\n\theaders = {\"User-Agent\":\"Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11\"}\n\treq = Request(url, headers=headers)\n\tresponse = urlopen(req)\n\thtml = response.read()\n\tsoup = BeautifulSoup(html, \"html.parser\")\n\tresponse.close()\n\treturn soup", "def retrieve_content(self, url):\n page = requests.get(url)\n content = page.content\n return content", "def open_url(url):\n\tglobal books\n\tglobal count_books\n\tglobal titles\n\t#global word_count\n\ttry:\n\t\t#open url\n\t\tresponse = re.urlopen(url)\n\t\t#get data\n\t\tcontent = response.read().decode('utf8')\n\t\t#close connection\n\t\tresponse.close()\n\t\t\n\texcept(er.URLError):\n\t\t#if url is not functional\n\t\tcontent = \"\"\n\t\tprint(\"The URL is not functional : \",url)\n\t\treturn None\n\t\t# #remove the url from the books dictionary\n\t\t# for key,val in books.items():\n\t\t# \tif val == url:\n\t\t# \t\tdel books[key]\n\t\t# \t\t#pop the last\n\t\t# \t\ttitles.pop()\n\t\t# \t\tbreak\n\t\t# #update count for number of books\n\t\t# count_books = len(books)\n\t\t# return\n\treturn content", "def get_content(self):\n response = requests.get(self.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n return soup", "def parse(url, parser='html5lib', **kwargs):\n return bs4.BeautifulSoup(SESSION.get(url).content, features=parser, **kwargs)", "def get_html_from_url(url):\n request = requests.get(url)\n data = request.text\n return data", "def read_soup_from_url(url, wait_time=10, debug=False):\n\n soup = None\n try:\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n print('Reading %s , Status %d , waiting %d seconds ...' %\n (url, r.status_code, wait_time))\n time.sleep(wait_time)\n except:\n print(traceback.format_exc())\n return soup", "def get_page(self, url):\n page = self.__open_page(url)\n soup = BeautifulSoup(page, 'html.parser')\n return soup", "def fetch(self, url, headers=DEFAULTHEADERS):\n logger = self.loggers['http']\n request = urllib2.Request(url, headers=headers)\n try:\n response = urllib2.urlopen(request)\n except urllib2.HTTPError:\n logger.error(\"failed to retrieve the resource at %s\" % url)\n raise\n urlgot = response.geturl()\n rawcontent = response.read()\n if urlgot != url:\n logger.info(\"successfully retrieved resource from %s, redirected from %s\" % (urlgot, url))\n self.http['redirect'] = True\n else:\n logger.info(\"successfully retrieved resource from %s\" % url)\n self.http['redirect'] = False\n rheaders = response.info()\n \n # store useful info on the object for later access\n self.http['request'] = {}\n self.http['request']['headers'] = headers\n self.http['urlsought'] = url\n self.http['urlgot'] = urlgot\n self.http['response'] = response\n self.http['response_headers'] = {}\n for k in sorted(rheaders.keys()): \n logger.debug(\"response header %s: '%s'\" % (k, rheaders[k]))\n self.http['response_headers'][k.strip().lower()] = rheaders[k].strip() \n self.documenturl = urlgot\n self.rawcontent = rawcontent" ]
[ "0.7806457", "0.7470853", "0.73826766", "0.7287493", "0.72475606", "0.71778685", "0.7014653", "0.69928056", "0.6990505", "0.69898975", "0.6983415", "0.6978702", "0.6976216", "0.69470924", "0.68931305", "0.6890334", "0.68858266", "0.6878703", "0.6874414", "0.68655133", "0.6827061", "0.6809511", "0.6798924", "0.67746943", "0.675328", "0.6751563", "0.6746057", "0.6735367", "0.672723", "0.66773", "0.6670313", "0.665384", "0.66231096", "0.6602891", "0.65783", "0.6578218", "0.65734035", "0.65554863", "0.6543721", "0.65361845", "0.6529656", "0.6527246", "0.65208995", "0.6490769", "0.6484565", "0.6471363", "0.6438142", "0.64321357", "0.64236164", "0.64220375", "0.6417184", "0.64012694", "0.63966876", "0.6380245", "0.63716656", "0.6354815", "0.63354135", "0.6329242", "0.63289076", "0.63274825", "0.63262284", "0.63127786", "0.6291601", "0.6258572", "0.62481165", "0.62444264", "0.62417257", "0.6233152", "0.620704", "0.62045443", "0.62009346", "0.6200755", "0.6194871", "0.6194366", "0.6180859", "0.61783737", "0.6177808", "0.6170214", "0.61598146", "0.61595774", "0.6153746", "0.6151019", "0.614768", "0.61321044", "0.61308557", "0.6120936", "0.6095939", "0.60948086", "0.60742986", "0.6071589", "0.6069189", "0.6065977", "0.60587764", "0.6046815", "0.6045759", "0.6041696", "0.6034829", "0.60308427", "0.6022736", "0.6019769" ]
0.6243189
66
Function to mixup data.
def mixup(batch: Tuple[torch.Tensor, torch.Tensor], alpha: float = 1.0) -> Tuple: data, targets = batch lam = np.random.beta(alpha, alpha) if alpha > 0 else 1 indices = torch.randperm(data.shape[0]) mixed_data = lam * data + (1 - lam) * data[indices, :] target_a, target_b = targets, targets[indices] targets = (target_a, target_b, lam) return mixed_data, targets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concatenate_data():", "def data_unification(self, data1, data2):\r\n data = data1 + data2\r\n return data", "def mixup_data(self, data_ratio_produce=2, alpha=0.2):\n real_samples_idx = np.argwhere(self.data['real']).ravel()\n n_training_samples = real_samples_idx.shape[0]\n # Make random mixup samples\n n_samples = int(n_training_samples * data_ratio_produce)\n data_new = dict()\n for key in self.data:\n data_new[key] = []\n for i in range(n_samples):\n # Mixup ratio\n lam = np.random.beta(alpha, alpha)\n # Should not happen, but just in case to detect bugs\n if lam < 0 or lam > 1:\n raise ValueError('Lam not between 0 and 1')\n # Images to choose for mixup, choose only from real samples\n idxs = np.random.choice(real_samples_idx, 2, replace=False)\n idx0 = idxs[0]\n idx1 = idxs[1]\n\n # Make mixup data\n data_new['greyscale'].append(\n self.data['greyscale'][idx0] * lam + self.data['greyscale'][idx1] * (1 - lam))\n data_new['sample'].append(\n '_'.join([str(self.data['sample'][idx0]), str(lam), str(str(self.data['sample'][idx1])), str(1 - lam)]))\n data_new['lifetime'].append(\n self.data['lifetime'][idx0] * lam + self.data['lifetime'][idx1] * (1 - lam))\n data_new['magnification'].append(\n self.data['magnification'][idx0] * lam + self.data['magnification'][idx1] * (1 - lam))\n data_new['uncertainty'].append(\n self.data['uncertainty'][idx0] * lam + self.data['uncertainty'][idx1] * (1 - lam))\n data_new['image_id'].append(\n '_'.join(\n [str(self.data['image_id'][idx0]), str(lam), str(self.data['image_id'][idx1]), str(1 - lam)]))\n data_new['real'].append(0)\n\n # Add mixup to data\n for key in self.data.keys():\n if len(data_new[key]) != n_samples:\n raise ValueError('Mixup data for %s not of corect length' % key)\n # Do not use np concat as it is slow - filling an array is quicker\n # data_temp = np.empty((self.data[key].shape[0] + len(data_new[key]), *self.data[key].shape[1:]),\n # dtype=self.data[key].dtype)\n # for i in range(self.data[key].shape[0]):\n # data_temp[i] = self.data[key][i]\n # # Add new data after old one (array positions starting after positions of original data)\n # for i in range(len(data_new[key])):\n # data_temp[i+self.data[key].shape[0]] = data_new[key][i]\n # self.data[key] = data_temp\n self.data[key] = np.concatenate([self.data[key], data_new[key]])", "def mixup_data(x, y, alpha=1.0, device=\"cpu\"):\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n index = torch.randperm(batch_size).to(device)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam", "def mixup_data(x, y, alpha, device):\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n index = torch.randperm(batch_size).to(device)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam", "def mix(a, b, c, d, e, f, g, h):\n a ^= (b << 11); d += a; b +=c\n b ^= c >> 2; e += b; c += d\n c ^= (d << 8); f += c; d += e\n d ^= e >> 16; g += d; e += f\n e ^= (f << 10); h += e; f += g\n f ^= g >> 4; a += f; g += h\n g ^= (h << 8); b += g; h += a\n h ^= a >> 9; c +=h; a += b\n return a, b, c, d, e, f, g, h", "def mixup_data(x, y, use_cuda=True, alpha=1.0):\r\n if alpha > 0:\r\n lam = np.random.beta(alpha, alpha)\r\n else:\r\n lam = 1\r\n batch_size = x.size()[0]\r\n if use_cuda:\r\n index = torch.randperm(batch_size).cuda()\r\n else:\r\n index = torch.randperm(batch_size)\r\n\r\n mixed_x = lam * x + (1 - lam) * x[index, :]\r\n y_a, y_b = y, y[index]\r\n return mixed_x, y_a, y_b, lam", "def mixup_data(x, y, alpha=1.0, use_cuda=True):\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam", "def collate_fn(data):\n\toutput = dict()\n\n\tfor name in ['answer_ID','query_ID']:\n\t\toutput[name] = [ _[name] for _ in data]\n\n\n\tfor name in ['query_len','answer_len']:\n\t\ttemp = [ _[name] for _ in data]\t \n\t\toutput[name] = torch.stack(temp, dim=0) \n\t\n\t#deal with source and target\n\tfor name in ['answer','query']:\n\t\tlength = output['{0}_len'.format(name)]\n\t\tl = length.max().item()\n\n\t\tfor i in range(len(data)):\n\t\t\tif(l-length[i].item()>0):\n\t\t\t\tdata[i][name] = torch.cat([data[i][name],torch.zeros(l-length[i].item(),dtype=torch.long)],dim=-1)\n\n\t\ttemp = [ _[name] for _ in data]\n\t\t\n\t\toutput[name] = torch.stack(temp, dim=0).long()\n\t\t\n\n\treturn output", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def combine(addresses1, addresses2, data):\n for n, byte in enumerate(data):\n yield (\n addresses1[n] if addresses1 is not None else None,\n addresses2[n] if addresses2 is not None else None,\n byte)", "def mergeWith(self, others):", "def get_merged_data(self):\n return self._combinedata", "def merge_struct_arrays(self, data1, data2):\n data_final = np.concatenate((data1, data2))\n return data_final", "def make_multi_output_data(self, data):\n confirmed, deceased, recovered = [], [], []\n for sample in data:\n confirmed.append(sample[:,0])\n deceased.append(sample[:,1])\n recovered.append(sample[:,2])\n confirmed = np.stack(confirmed)\n deceased = np.stack(deceased)\n recovered = np.stack(recovered)\n return np.stack([confirmed, deceased, recovered])", "def _umap_concat(data, **umap_kwargs):\n data_tiles = []\n for i in range(5):\n data_i = slice_vec_bands(data, start=i, end=i + 1)\n data_tiles.append(umap.UMAP(**umap_kwargs).fit_transform(data_i))\n\n data_concat = numpy.empty((\n data_tiles[0].shape[0],\n sum(dt.shape[1] for dt in data_tiles)\n ))\n\n start_col = 0\n for dt in data_tiles:\n end_col = start_col + dt.shape[1]\n data[:, start_col:end_col] = dt\n start_col = end_col\n\n return data_concat", "def combineData(self, dataLOLA, dataLOLB):\r\n map(lambda x: x[0].append(x[1][1]), zip(dataLOLA, dataLOLB))\r\n return dataLOLA", "def _mix(a, b, c):\n c = _cutoff32(c)\n a = _cutoff32(a-b-c) ^ c >> 13\n b = _cutoff32(b-c-a) ^ _cutoff32(a << 8)\n c = _cutoff32(c-a-b) ^ b >> 13\n a = _cutoff32(a-b-c) ^ c >> 12\n b = _cutoff32(b-c-a) ^ _cutoff32(a << 16)\n c = _cutoff32(c-a-b) ^ b >> 5\n a = _cutoff32(a-b-c) ^ c >> 3\n b = _cutoff32(b-c-a) ^ _cutoff32(a << 10)\n c = _cutoff32(c-a-b) ^ b >> 15\n return a, b, c", "def combine_all(self):\n combined = copy.deepcopy(self.train)\n\n def _combine_data(data):\n for img_path, pid, camid in data:\n\n if pid in self._junk_pids:\n continue\n #pdb.set_trace()\n pid = self.dataset_name + \"_\" + str(pid)\n camid = self.dataset_name + \"_\" + str(camid)\n combined.append((img_path, pid, camid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def combine_data(dirname = 'data'):\n \n current_dir = os.getcwd()\n os.chdir(dirname)\n photo_info_combined = {}\n print \"Combining photo information...\"\n for fname in os.listdir('.'):\n print \"Fname = \", fname\n if fname.find('photos_') == -1:\n continue\n\n print \"File name: \", fname\n f = open(fname, 'r')\n photo_info = pickle.load(f)\n f.close()\n\n print \"Length of photo info = \", len(photo_info)\n\n for photo_id, photo in photo_info.iteritems():\n if photo_id not in photo_info_combined:\n photo_info_combined[photo_id] = photo\n\n print \"Dumping...\"\n os.chdir(current_dir)\n f = open('photos_all_1.dat', 'wb')\n pickle.dump(photo_info_combined, f)\n f.close()\n print \"Done.\"\n\n print \"Combining user information...\"\n users_data = get_users_data(photo_info_combined)\n f = open('users_all_1.dat', 'wb')\n pickle.dump(users_data, f)\n f.close()\n print \"Done.\"\n\n return photo_info_combined, users_data", "def union(first, second):\n # Put your code here.", "def _combine_sup_unsup_datasets(sup_data, unsup_data):\n # Copy all values from supervised data as is\n output_dict = dict(sup_data)\n\n # take only 'image' and 'aug_image' from unsupervised dataset and\n # rename then into 'unsup_image' and 'unsup_aug_image'\n if 'image' in unsup_data:\n output_dict['unsup_image'] = unsup_data.pop('image')\n if 'aug_image' in unsup_data:\n output_dict['unsup_aug_image'] = unsup_data.pop('aug_image')\n\n return output_dict", "def combine_data(spectras, compounds) :\n final = {}\n for hmdb_id, spec_objs in spectras.items() :\n c = compounds.pop(hmdb_id, None)\n if not c :\n continue\n c.spectras = spec_objs\n final[hmdb_id] = c\n return final", "def combine_data(self):\n\t\tself.Full_E = None\n\t\tself.Imaginary_Spectrum = None\n\t\tif self.raw_file is not None:\n\t\t\tlogger.info(\"Convert to scattering factors\")\n\t\t\tself.NearEdgeData = data.convert_data(self.raw_file,self.DataTypeCombo.GetValue(),'ASF')\n#\t\t\tif self.InvertDataCheckBox.GetValue():\n#\t\t\t\tself.NearEdgeData[:,1] = numpy.abs(self.NearEdgeData[:,1] - 2*numpy.mean(self.NearEdgeData[:,1]))\n\t\tlogger.info(\"Combine Data\")\n\t\t# Get splice points\n\t\tsplice_eV = numpy.array([10.0, 30000.0]) # Henke limits\n\t\tif self.SpliceText1.GetValue() == \"Start\":\n\t\t\tif self.raw_file is not None:\n\t\t\t\tsplice_eV[0] = self.NearEdgeData[0, 0]\n\t\telse:\n\t\t\tsplice_eV[0] = float(self.SpliceText1.GetValue())\n\t\tif self.SpliceText2.GetValue() == \"End\":\n\t\t\tif self.raw_file is not None:\n\t\t\t\tsplice_eV[1] = self.NearEdgeData[-1, 0]\n\t\telse:\n\t\t\tsplice_eV[1] = float(self.SpliceText2.GetValue())\n\t\tif self.raw_file is not None and self.ASF_Data is None:\n\t\t\tself.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), plotting_extras=True)\n\n\t\telif self.raw_file is None and self.ASF_Data is not None:\n\t\t\tself.Full_E = self.ASF_E\n\t\t\tself.Imaginary_Spectrum = self.ASF_Data\n\n\t\telif self.raw_file is not None and self.ASF_Data is not None:\n\t\t\t\n\t\t\tself.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), fix_distortions=self.FixDistortionsCheckBox.GetValue(), plotting_extras=True)\n\n\t\t\t### get start and end Y values from nexafs and asf data\n\t\t\t##splice_nexafs_Im = numpy.interp(splice_eV, raw_Im[:, 0], raw_Im[:, 1])\n\t\t\t###splice_asf_Im = numpy.interp(splice_eV, self.total_asf[:, 0], self.total_asf[:, 2])\n\t\t\t##splice_asf_Im = (data.coeffs_to_ASF(splice_eV[0],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[0])[0][-1]]),data.coeffs_to_ASF(splice_eV[1],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[1])[0][-1]]))\n\t\t\t##cut_boolean = (splice_eV[0]<raw_Im[:, 0]) == (raw_Im[:, 0]<splice_eV[1])\n\t\t\t### Merge Y values\n\t\t\t##if not self.AddBackgroundCheckBox.GetValue():\n\t\t\t\t##logger.info(\"Merge data sets\")\n\t\t\t\t##scale = (splice_asf_Im[1]-splice_asf_Im[0])/(splice_nexafs_Im[1]-splice_nexafs_Im[0])\n\t\t\t\t##scaled_nexafs_Im = ((raw_Im[:, 1]-splice_nexafs_Im[0])*scale)+splice_asf_Im[0]\n\t\t\t\t##self.asf_bg = None # We won't be using this variable this time\n\t\t\t##else:\n\t\t\t\t##logger.info(\"Add data sets (this will currently only work at energies below 30 keV)\")\n\t\t\t\t### Set up background function\n\t\t\t\t### We trust this point to be just before the absorption edge\n\t\t\t\t##trusted_ind = max(0, numpy.where(self.total_asf[:, 0]>splice_eV[0])[0][0]-1)\n\t\t\t\t##Log_total_asf = numpy.log(self.total_asf[:, 2])\n\t\t\t\t### Lets trust the 5 points before our trusted point and make an initial guess at the background function\n\t\t\t\t##p = numpy.polyfit(self.total_asf[(trusted_ind-5):trusted_ind, 0], Log_total_asf[(trusted_ind-5):trusted_ind], 1)\n\t\t\t\t### Now lets look for the points up util the absorption edge\n\t\t\t\t##p_vals = numpy.exp(numpy.polyval(p, self.total_asf[(trusted_ind-5):-1, 0]))\n\t\t\t\t##p_err = max(p_vals[0:5]-self.total_asf[(trusted_ind-5):trusted_ind, 2])\n\t\t\t\t##edge_ind = numpy.where(self.total_asf[trusted_ind:-1, 2]-p_vals[4:-1]>p_err*10)\n\t\t\t\t##if len(edge_ind[0])!=0:\n\t\t\t\t\t##edge_ind = edge_ind[0][0]\n\t\t\t\t##else:\n\t\t\t\t\t##edge_ind = trusted_ind\n\t\t\t\t### Redo background using the 5 points before the background point\n\t\t\t\t##p = numpy.polyfit(self.total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind, 0], Log_total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind], 1)\n\t\t\t\t##asf_bg = numpy.exp(numpy.polyval(p, raw_Im[:, 0]))\n\t\t\t\t##logger.info(\"Background defined as: y=exp(%(p1)ex %(p0)+e)\" % {\"p1\":p[1], \"p0\":p[0]})\n\t\t\t\t### Apply background function\n\t\t\t\t##scale = (splice_asf_Im[1]-numpy.exp(numpy.polyval(p, splice_eV[1])))/splice_nexafs_Im[1]\n\t\t\t\t##scaled_nexafs_Im = raw_Im[:, 1]*scale+asf_bg\n\t\t\t\t### store background data for plotting\n\t\t\t\t##cut_boolean_wide = numpy.roll(cut_boolean, -1) + numpy.roll(cut_boolean, 1)\n\t\t\t\t##self.asf_bg = [[trusted_ind+edge_ind-5, trusted_ind+edge_ind], numpy.vstack((raw_Im[cut_boolean_wide, 0], asf_bg[cut_boolean_wide])).T]\n\t\t\t\n\t\t\t##nexafs_cut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T\n\t\t\t####Merge point-wise data sets together\n\t\t\t##asf_cut_high = self.total_asf[self.total_asf[:, 0]>splice_eV[1], :]\n\t\t\t##asf_cut_low = self.total_asf[self.total_asf[:, 0]<splice_eV[0], :]\n\t\t\t##self.merged_Im = numpy.vstack((asf_cut_low[:, [0, 2]], (splice_eV[0], splice_asf_Im[0]), nexafs_cut, (splice_eV[1], splice_asf_Im[1]), asf_cut_high[:, [0, 2]]))\n\t\t\t\n\t\t\t####Merge coeff data together\n\t\t\t##coeffs_cut_high = self.total_Im_coeffs[self.total_E[:-1]>splice_eV[1],:]\n\t\t\t##coeffs_cut_low = self.total_Im_coeffs[self.total_E[:-1]<splice_eV[0],:]\n\t\t\t###convert points to coeffs\n\t\t\t##nexafs_coeffs_cut = numpy.zeros((len(nexafs_cut)+1,5))\n\t\t\t##Y = numpy.append(numpy.insert(nexafs_cut[:,1],0,splice_asf_Im[0]),splice_asf_Im[1])\n\t\t\t##nexafs_E = numpy.append(numpy.insert(nexafs_cut[:,0],0,splice_eV[0]),splice_eV[1])\n\t\t\t##M = (Y[1:]-Y[:-1])/(nexafs_E[1:]-nexafs_E[:-1])\n\t\t\t##nexafs_coeffs_cut[:,0] = M\n\t\t\t##nexafs_coeffs_cut[:,1] = Y[:-1]-M*nexafs_E[:-1]\n\t\t\t###assemble merged coeffs and energy values\n\t\t\t##self.merged_Im_coeffs = numpy.vstack((coeffs_cut_low, nexafs_coeffs_cut, self.total_Im_coeffs[-coeffs_cut_high.shape[0]-2,:], coeffs_cut_high))\n\t\t\t##self.merged_E = numpy.concatenate((self.total_E[self.total_E<splice_eV[0]], nexafs_E, self.total_E[self.total_E>splice_eV[1]]))\n\t\t\t### Extras for plotting\n\t\t\t##self.splice_ind = (len(asf_cut_low[:, 0]), -len(asf_cut_high[:, 0]))\n\t\t\t##cut_boolean = (splice_eV[0]<=raw_Im[:, 0]) != (raw_Im[:, 0]<=splice_eV[1])\n\t\t\t##self.nexafs_CutOut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T\n\t\t### Previous calculation of f_1 is no longer matching displayed f_2 data\n\t\t##self.KK_Real_Spectrum = None", "def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')", "def mix_embeddings(ndata, proj):\n h = ndata['h']\n c = proj(ndata['content'])\n ndata['h'] = h + c[:, :h.shape[1]]", "def array_merge(a1, a2, inplace=False, empty_source=False): \n if inplace:\n out = a1\n else:\n out = copy.deepcopy(a1)\n if empty_source:\n for i in range(len(out)):\n out.pop()\n for k in a2:\n out[k] = a2[k]\n return out", "def _gm_to_ub_one(args):\n tik_instance, data, data_ub, data_offset, ub_offset, ori_nburst, \\\n burst_len, src_stride, dst_stride, cp_align_len = args\n\n if src_stride <= 65535:\n if ori_nburst <= 4095:\n tik_instance.data_move(data_ub[ub_offset],\n data[data_offset],\n 0, ori_nburst,\n burst_len,\n src_stride, dst_stride)\n\n else:\n n_burst = 4095\n c_cycle = ori_nburst // n_burst\n c_mod = ori_nburst % n_burst\n for num_cy in range(c_cycle):\n data_cur = data_offset + (burst_len + src_stride)\\\n * cp_align_len * n_burst * num_cy\n ub_cur = ub_offset + (burst_len + dst_stride)\\\n * cp_align_len * n_burst * num_cy\n tik_instance.data_move(\n data_ub[ub_cur],\n data[data_cur],\n 0, n_burst,\n burst_len,\n src_stride, dst_stride)\n\n if c_mod > 0:\n data_cur = data_offset + (burst_len + src_stride)\\\n * cp_align_len * n_burst * c_cycle\n ub_cur = ub_offset + (burst_len + dst_stride)\\\n * cp_align_len * n_burst * c_cycle\n tik_instance.data_move(\n data_ub[ub_cur],\n data[data_cur],\n 0, c_mod,\n burst_len,\n src_stride, dst_stride)\n\n else:\n for num_nb in range(ori_nburst):\n data_cur = data_offset + (burst_len + src_stride)\\\n * cp_align_len * num_nb\n ub_cur = ub_offset + (burst_len + dst_stride)\\\n * cp_align_len * num_nb\n tik_instance.data_move(\n data_ub[ub_cur],\n data[data_cur],\n 0, 1,\n burst_len,\n 0, 0)", "def __call__(self, results):\n\n results = self._mixup_transform(results)\n return results", "def concat_all(self):\n return self.merge(1)", "def union(set1, set2):", "def blake2_128_concat(data):\n return blake2b(data, digest_size=16).digest() + data", "def mconcat(a, b):\r\n if a is None:\r\n return b\r\n if b is None:\r\n return a\r\n for key in b.keyset:\r\n value=get(b,key)\r\n put(a,key,value)\r\n return a", "def _combine(self, results_list):\n pass", "def merge(): #Status: WIP\r\n pass", "def collate_fn(data: list):\n def pad_tensor(inp):\n assert type(inp[0]) == torch.Tensor\n it = iter(inp)\n t = next(it)\n max_shape = list(t.shape)\n while True:\n try:\n t = next(it)\n for i in range(len(max_shape)):\n max_shape[i] = int(max(max_shape[i], t.shape[i]))\n except StopIteration:\n break\n max_shape = np.array(max_shape)\n\n padded_ts = []\n for t in inp:\n pad_pattern = np.zeros(2 * len(max_shape), dtype=np.int64)\n pad_pattern[::-2] = max_shape - np.array(t.shape)\n pad_pattern = tuple(pad_pattern.tolist())\n padded_ts.append(F.pad(t, pad_pattern, 'constant', 0))\n\n return padded_ts\n\n def stack(inp):\n if type(inp[0]) == list:\n ret = []\n for vs in zip(*inp):\n ret.append(stack(vs))\n elif type(inp[0]) == dict:\n ret = {}\n for kvs in zip(*[x.items() for x in inp]):\n ks, vs = zip(*kvs)\n for k in ks:\n assert k == ks[0], \"Key value mismatch.\"\n ret[k] = stack(vs)\n elif type(inp[0]) == torch.Tensor:\n new_t = pad_tensor(inp)\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == np.ndarray:\n new_t = pad_tensor([torch.from_numpy(x) for x in inp])\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == str:\n ret = inp\n else:\n raise ValueError('Cannot handle type {}'.format(type(inp[0])))\n return ret\n\n ret = stack(data)\n\n # compute CPU-intensive matrix K1, K2 here to leverage multi-processing nature of dataloader\n # if 'Gs' in ret and 'Hs' in ret and :\n # try:\n # G1_gt, G2_gt = ret['Gs']\n # H1_gt, H2_gt = ret['Hs']\n # sparse_dtype = np.float32\n # K1G = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(G2_gt, G1_gt)] # 1 as source graph, 2 as target graph\n # K1H = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(H2_gt, H1_gt)]\n # K1G = CSRMatrix3d(K1G)\n # K1H = CSRMatrix3d(K1H).transpose()\n #\n # ret['Ks'] = K1G, K1H #, K1G.transpose(keep_type=True), K1H.transpose(keep_type=True)\n # except ValueError:\n # pass\n\n return ret", "def mixedrows(self, func=lambda a: random().scramble(a)):\n out = getcopy(self)\n out.mixrows(func)\n return out", "def _combine(data, target, a, b):\r\n data[:, target, :2] = (data[:, a, :2] + data[:, b, :2]) / 2\r\n data[:, target, 2] = harmonic_mean(data[:, a, 2], data[:, b, 2])", "def collate_fn(data):\n # Sort a data list by caption length\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions, bboxes, depends, ids, img_ids = zip(*data)\n\n # Merge images (convert tuple of 3D tensor to 4D tensor)\n images = torch.stack(images, 0)\n bboxes = torch.stack(bboxes, 0)\n\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n\n return images, targets, bboxes, depends, lengths, ids", "def movies_combin(movies):\n movies['Combined_Data'] = movies.apply(lambda x: combine(x), axis=1)\n return movies", "def brepalgo_ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)", "def combine_bytes(data):\n copy = data[:]\n copy.reverse()\n return sum(x << n*8 for n, x in enumerate(copy))", "def mixup_batch(x, y, alpha=0.3):\n batch_size = len(x)\n lambd = Beta(alpha, alpha).sample()\n idxs = torch.randperm(batch_size, device=x.device, requires_grad=False)\n\n x = lambd * x + (1 - lambd) * x[idxs]\n x = torch.cat((x, x))\n y = torch.cat((y, y[idxs]))\n w = torch.cat((torch.full((batch_size,), 2. * lambd, device=x.device, dtype=x.dtype),\n torch.full((batch_size,), 2. * (1 - lambd), device=x.device, dtype=x.dtype)))\n return x, y, w", "def compress(data, selectors):\n return (d for d, s in zip(data, selectors) if s)", "def get_combined_data(self, pad_pulse_length, exclude_subject_name_list=None):\n data, label = [], []\n for i in range(len(self)):\n if exclude_subject_name_list is not None:\n if self.subject_list[i].name in exclude_subject_name_list:\n continue\n subject_data, subject_label = self.subject_list[i].get_combined_data(\n pad_pulse_length=pad_pulse_length)\n # print(subject_data.shape)\n # print(subject_label.shape)\n try:\n data = np.append(data, subject_data, axis=0)\n label = np.append(label, subject_label, axis=0)\n except Exception as e:\n data = subject_data\n label = subject_label\n print(\"Exception in data.py\")\n print(e)\n # print(label)\n label[label == 0] = 2\n label = label - 1\n # print(label)\n return data, label", "def join_data(self, base_data, join_data, base_field, join_fields):\n for data in base_data:\n extra = join_data[data[base_field]]\n for field in join_fields:\n data[field] = extra[field]\n \n return base_data", "def _ub_to_gm_one(args):\n tik_instance, dst, data_res, dst_offset, res_offset, ori_nburst, \\\n burst_len, src_stride, dst_stride, cp_align_len = args\n\n if dst_stride <= 65535:\n if ori_nburst <= 4095:\n tik_instance.data_move(\n dst[dst_offset],\n data_res[res_offset],\n 0, ori_nburst, burst_len,\n src_stride, dst_stride)\n\n else:\n n_burst = 4095\n c_cycle = ori_nburst // n_burst\n c_mod = ori_nburst % n_burst\n\n for num_cy in range(c_cycle):\n dst_cur = dst_offset + (burst_len + dst_stride)\\\n * cp_align_len * n_burst * num_cy\n res_cur = res_offset + (burst_len + src_stride)\\\n * cp_align_len * n_burst * num_cy\n\n tik_instance.data_move(\n dst[dst_cur],\n data_res[res_cur],\n 0, n_burst, burst_len,\n src_stride, dst_stride)\n\n if c_mod > 0:\n dst_cur = dst_offset + (burst_len + dst_stride)\\\n * cp_align_len * n_burst * c_cycle\n res_cur = res_offset + (burst_len + src_stride)\\\n * cp_align_len * n_burst * c_cycle\n\n tik_instance.data_move(\n dst[dst_cur],\n data_res[res_cur],\n 0, c_mod, burst_len,\n src_stride, dst_stride)\n\n else:\n for num_nb in range(ori_nburst):\n dst_cur = dst_offset + (burst_len + dst_stride)\\\n * cp_align_len * num_nb\n res_cur = res_offset + (burst_len + src_stride)\\\n * cp_align_len * num_nb\n\n tik_instance.data_move(\n dst[dst_cur],\n data_res[res_cur],\n 0, 1, burst_len,\n 0, 0)", "def mergeFile():\n with open(\"output.txt\",'w') as o:\n o.write(data1)\n o.write(data2)\n o.write(data3)", "def merge_chunks(self, data):\r\n fn = \"speech_%s_%s.mp3\" % (\r\n data[\"lang\"], data[\"datetime\"].strftime(\"%Y%m%d-%H%M%S\"))\r\n filename_main = unique_path(fn)\r\n with open(filename_main, \"wb\") as f:\r\n # MP3s can be simply concatenated together, result is legible.\r\n for i, filename in enumerate(data[\"filenames\"]):\r\n f.write(open(filename, \"rb\").read())\r\n # Add more silence for separators like commas and periods.\r\n silence_count = 0\r\n if data[\"chunks\"][i][-1] in [\".\",\"?\",\"!\"]:\r\n silence_count = conf.SilenceCountLong\r\n elif data[\"chunks\"][i][-1] in [\",\",\":\",\";\",\"(\",\")\"]:\r\n silence_count = conf.SilenceCountShort\r\n f.write(base64.decodestring(conf.Silence) * silence_count)\r\n for filename in data[\"filenames\"]:\r\n try:\r\n os.unlink(filename)\r\n except Exception: pass\r\n data.update(filenames=[filename_main], current=filename_main, count=1)", "def get_combined_data(self, pad_pulse_length):\n global OVER_PAD_LENGTH_COUNT\n \n all_combined_data = []\n all_combined_label = np.array([])\n for k in range(1, 3):\n for (i, j) in product(range(len(self.pulse_data[0])), range(len(self.pulse_data[k]))):\n if self.pulse_data[0][i].shape[0] > pad_pulse_length or self.pulse_data[k][j].shape[\n 0] > pad_pulse_length:\n OVER_PAD_LENGTH_COUNT += 1\n continue\n else:\n padded_general = np.pad(self.pulse_data[0][i], (0,\n pad_pulse_length - self.pulse_data[0][i].shape[\n 0])).reshape(1, -1)\n padded_video = np.pad(self.pulse_data[k][j], (0,\n pad_pulse_length - self.pulse_data[k][j].shape[\n 0])).reshape(1, -1)\n combined_data = np.append(padded_general, padded_video, axis=0).T.reshape(\n 1, pad_pulse_length, 2)\n # print(combined_data.shape)\n try:\n all_combined_data = np.append(\n all_combined_data, combined_data, axis=0)\n except Exception as e:\n all_combined_data = combined_data\n # print(e)\n\n combined_label = self.label_data[k][j]\n all_combined_label = np.append(\n all_combined_label, combined_label)\n\n return all_combined_data, all_combined_label", "def transform(self, data):", "def concatonate(data):\n tmp = np.array(data)\n tmp = np.reshape(tmp, (tmp.shape[0] * tmp.shape[1], -1))\n return tmp", "def test_concat_data(self):\n\n this_satellite_dict = satellite_io.concat_data(\n satellite_dicts=[\n SATELLITE_DICT_SUBSET_BY_INDEX, SATELLITE_DICT_SUBSET_BY_TIME\n ]\n )\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_CONCAT\n ))", "def regular_collate_fn(data):\n\timg, box, q, a = list(zip(*data))\n\tq = torch.nn.utils.rnn.pad_sequence(q, batch_first=True)\n\treturn torch.stack(img), torch.stack(box), q, torch.stack(a).long()", "def _crop_concat(self, upsampled, bypass):\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = F.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)", "def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:\n return self.__class__([self, other])", "def merge(self, skel):\n return Skeleton.simple_merge((self, skel)).consolidate()", "def __prepare(self, data):\n #print(\"Running Prepare data\")\n #print(data)\n #print(type(data))\n if len(data) > 1:\n if type(data[0]) == np.ndarray:\n return np.concatenate(data)\n else:\n return torch.cat(data).cpu().numpy()\n else:\n return data[0].cpu().numpy()", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def funky_sum(a, b, mix):\n if mix <= 0:\n return a\n elif mix >= 1:\n return b\n else:\n return (1 - mix) * a + mix * b", "def prepare(self):\n A = np.append(self.a1, self.a2, axis=0)\n b = np.append(self.b1, self.b2, axis=0)\n A, b = shuffle(A, b, random_state=0)\n return A, b", "def _merge(self):\n raise NotImplementedError", "def data_list_wdl_merge(data_list1:list, data_list2:list) -> list:\n list_size = len(data_list1)\n merged_data_list = []\n for i in range(list_size):\n merged_data_list.append(pd.concat([data_list1[i],data_list2[i]]))\n return merged_data_list", "def do_mixup(x: torch.Tensor, mixup_lambda: torch.Tensor):\n out = (x[0::2].transpose(0, -1) * mixup_lambda[0::2] +\n x[1::2].transpose(0, -1) * mixup_lambda[1::2]).transpose(0, -1)\n return out", "def do_mixup(x: torch.Tensor, mixup_lambda: torch.Tensor):\n out = (x[0::2].transpose(0, -1) * mixup_lambda[0::2] +\n x[1::2].transpose(0, -1) * mixup_lambda[1::2]).transpose(0, -1)\n return out", "def union(s1, s2):\n \"*** YOUR CODE HERE ***\"\n s = set()\n for member in s1:\n s.add(member)\n for member in s2:\n s.add(member)\n return s", "def mixed_feats(self) -> np.ndarray:\n result = self.tracks[0]\n for feats_to_add, gain in zip(self.tracks[1:], self.gains):\n result = self.feature_extractor.mix(\n features_a=result, features_b=feats_to_add, energy_scaling_factor_b=gain\n )\n return result", "def additive_mixing(s, n):\n mixed_audio = s + n\n \n alpha = 1. / np.max(np.abs(mixed_audio))\n mixed_audio *= alpha\n s *= alpha\n n *= alpha\n return mixed_audio, s, n, alpha", "def additive_mixing(s, n):\n mixed_audio = s + n\n \n alpha = 1. / np.max(np.abs(mixed_audio))\n mixed_audio *= alpha\n s *= alpha\n n *= alpha\n return mixed_audio, s, n, alpha", "def merge_data(f, v):\n return np.transpose(np.vstack([f, v]))", "def merge(a,b):\n c = a.copy()\n c.update(b)\n return c", "def union(p1: Iterator[Posting], p2: Iterator[Posting]) -> Iterator[Posting]:\n raise NotImplementedError(\"You need to implement this as part of the assignment.\")", "def get_data_subsets(t0, t1):\n\n # Iridium data:\n irid = iridium[(iridium.time >= t0) & (iridium.time <= t1)]\n irid_B = np.vstack((irid.B_e.values, irid.B_n.values, irid.B_r.values))\n irid_coords = np.vstack((irid.lon.values, irid.lat.values, irid.r.values))\n\n # SuperMAG data:\n smag = supermag.loc[t0:t1, :]\n smag_B = np.vstack((smag.Be.values, smag.Bn.values, smag.Bu.values))\n smag_coords = np.vstack((smag.lon.values, smag.lat.values))\n\n # SuperDARN data:\n sd = superdarn.loc[t0:t1, :]\n vlos = sd['vlos'].values\n sd_coords = np.vstack((sd['glon'].values, sd['glat'].values))\n los = np.vstack((sd['le'].values, sd['ln'].values))\n\n\n # Make the data objects. The scale keyword determines a weight for the dataset. Increase it to reduce weight\n iridium_data = lompe.Data(irid_B * 1e-9, irid_coords, datatype = 'space_mag_fac', scale = 200e-9)\n supermag_data = lompe.Data(smag_B * 1e-9, smag_coords, datatype = 'ground_mag' , scale = 100e-9)\n superdarn_data = lompe.Data(vlos , sd_coords , LOS = los, datatype = 'convection' , scale = 500 )\n\n return(iridium_data, supermag_data, superdarn_data)", "def forward(self, reps_in):\n\n reps_cat = self.cat_reps(reps_in)\n reps_out = self.mix_reps(reps_cat)\n return reps_out", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def combine_mfcc_id():\n\tmfcc, vad = [], []\n\tall_frames = get_frame()\n\t#read file utt2spk, sorted and store in an array vad\n\twith open('utt2spk') as f:\n\t\tcontent = f.readlines()\n\tcontent = [x.strip() for x in content]\n\n\tfor key in all_frames.keys():\n\t\tmfcc.append(all_frames[key]) #append mfcc to a list \n\t\tfor tuples in content: \n\t\t\twav = tuples.split(' ')[0]\n\t\t\tID = tuples.split(' ')[1]\n\t\t\tif wav == key: #same wav file name\n\t\t\t\tif ID == 'speech': \n\t\t\t \t\tvad.append([1, 0, 0])\n\t\t\t\telif ID == 'noise':\n\t\t\t\t\tvad.append([0, 1, 0])\n\t\t\t\telse:\n\t\t\t\t\tvad.append([0, 0, 1])\n\n\ttrain_mfcc = mfcc[:len(mfcc)/2]\n\tval_mfcc = mfcc[len(mfcc)/2:len(mfcc)*3/4]\n\ttest_mfcc = mfcc[len(mfcc)*3/4:]\n\ttrain_vad = vad[:len(vad)/2]\n\tval_vad = vad[len(vad)/2:len(vad)*3/4]\n\ttest_vad = vad[len(vad)*3/4:]\n\t\n\t################### Train and Validation data are 2D array ######\n\tnew_train_mfcc, new_train_vad = [], []\n\tfor count, matrix in enumerate(train_mfcc): \n\t\tfor row in matrix:\n\t\t\tnew_train_mfcc.append(row) \n\t\t\tnew_train_vad.append(train_vad[count])\n\n\tnew_val_mfcc, new_val_vad = [], []\n\tfor count, matrix in enumerate(val_mfcc):\n\t\tfor row in matrix:\n\t\t\tnew_val_mfcc.append(row)\n\t\t\tnew_val_vad.append(val_vad[count])\n\t#################### Test data is a 3D array #####################\n\tnew_test_mfcc, new_test_vad = [], []\n\tfor count, matrix in enumerate(test_mfcc): #ensure dimension of the data (10-sec)\n\t\tif np.array(matrix).shape == (1003,20): \n\t\t\tnew_test_mfcc.append(matrix)\n\t\t\tnew_test_vad.append([vad[count]])\n\t\n\tnp_train_mfcc = np.array(new_train_mfcc) \n\tnp_val_mfcc = np.array(new_val_mfcc)\n\tnp_test_mfcc = np.array(new_test_mfcc)\n\tnp_train_vad = np.array(new_train_vad)\n\tnp_val_vad = np.array(new_val_vad)\n\tnp_test_vad = np.array(new_test_vad)\n\n\tprint(\"The shape of the train mfcc numpy array is %s\" % (np_train_mfcc.shape,))\n\tprint(\"The shape of the validation mfcc numpy array is %s\" % (np_val_mfcc.shape,))\n\tprint(\"The shape of the test mfcc numpy array is %s\" % (np_test_mfcc.shape,))\n\tprint(\"The shape of the train vad numpy array is %s\" % (np_train_vad.shape,))\n\tprint(\"The shape of the validation vad numpy array is %s\" % (np_val_vad.shape,))\n\tprint(\"The shape of the test vad numpy array is %s\" % (np_test_vad.shape,))\n\n\treturn (np_train_mfcc,np_val_mfcc,np_test_mfcc,np_train_vad,np_val_vad,np_test_vad)", "def collate_fn(data):\r\n # Sort a data list by caption length\r\n data.sort(key=lambda x: len(x[1]), reverse=True)\r\n\r\n images, captions, ids, img_ids = zip(*data)\r\n\r\n # Merge images (convert tuple of 3D tensor to 4D tensor)\r\n images = torch.stack(images, 0)\r\n\r\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\r\n lengths = torch.LongTensor([len(cap) for cap in captions])\r\n targets = torch.zeros(len(captions), max(lengths)).long()\r\n for i, cap in enumerate(captions):\r\n end = lengths[i]\r\n targets[i, :end] = cap[:end]\r\n\r\n return images, targets, lengths, ids", "def runCombine(self, cache, dataIdList):\n\n dataRefList = [getDataRef(cache.butler, dataId, self.config.coaddName + \"Coadd_calexp\") for\n dataId in dataIdList]\n\n try:\n diaObject = dataRefList[0].get(f\"{self.config.coaddName}Diff_diaObject\")\n uri = dataRefList[0].getUri(f\"{self.config.coaddName}Diff_diaObject\")\n except Exception:\n self.log.info('Cannot read diaObject for %s' % (dataRefList[0].dataId))\n return\n\n # Use a dictionary of arrays to store data. Probably something more efficient\n data = {}\n data['id'] = []\n data['mjd'] = []\n data['filter'] = []\n for key, value in self.config.keepFields.items():\n data[key] = []\n\n for dataRef in dataRefList:\n\n try:\n calexp = dataRef.get(f\"{self.config.coaddName}Coadd_calexp\")\n except Exception:\n self.log.info('Cannot read data for %s' % (dataRef.dataId))\n continue\n\n visitCatalog = calexp.getInfo().getCoaddInputs().ccds\n\n for visitRec in visitCatalog:\n\n visit = int(visitRec.get('visit'))\n ccd = int(visitRec.get('ccd'))\n dataId = {\"visit\": visit, self.config.ccdKey: ccd}\n\n try:\n src = cache.butler.get(f\"{self.config.coaddName}Diff_forced_dia_src\", dataId)\n diff = cache.butler.get(f\"{self.config.coaddName}Diff_differenceExp\", dataId)\n except Exception as e:\n self.log.debug('Cannot read data for %d %d. skipping %s', visit, ccd, e)\n continue\n\n mjd = diff.getInfo().getVisitInfo().getDate().get(system=DateTime.MJD)\n band = diff.getInfo().getFilter().getName()\n self.log.info('Reading diff forced src with %d sources %s', len(src), dataId)\n\n matches = np.in1d(src['dia_object_id'], diaObject['id'], assume_unique=True)\n\n data['id'].extend(src['dia_object_id'][matches])\n data['mjd'].extend([mjd]*np.sum(matches))\n data['filter'].extend([band]*np.sum(matches))\n for key, value in self.config.keepFields.items():\n data[key].extend(src[value][matches])\n\n tract = dataRefList[0].dataId['tract']\n patch = dataRefList[0].dataId['patch']\n path = os.path.dirname(uri)\n df = pd.DataFrame(data)\n getattr(df, \"to_\" + self.config.storage)(f'{path}/diaCombined_{tract}_{patch}.{self.config.storage}')", "def collate_fn(data):\n # Sort a data list by caption length\n images, captions, cap_mask, vision_mask, labels, vision_labels = zip(*data)\n\n images = torch.stack(images, 0)\n labels = torch.stack(labels, 0)\n vision_labels = torch.stack(vision_labels, 0).long()\n targets = torch.stack(captions, 0).long()\n cap_mask = torch.stack(cap_mask,0).long()\n vision_mask = torch.stack(vision_mask,0).long()\n\n return images, targets, cap_mask, vision_mask, labels, vision_labels", "def collate_fn(data, device=default_device):\n # batch.sort(key=lambda x: len(x[1]), reverse=True)\n has_mask_tensor = True if data[0][-1] is not None else False\n input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor = zip(*data)\n\n input_tensor, input_lengths = padSequence(input_tensor)\n target_tensor, target_lengths = padSequence(target_tensor)\n bs_tensor = torch.as_tensor(bs_tensor, dtype=torch.float, device=device)\n db_tensor = torch.as_tensor(db_tensor, dtype=torch.float, device=device)\n mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if has_mask_tensor else None\n # mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if mask_tensor[0] and mask_tensor[0] != [] else None\n\n # data = input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor\n # if torch.cuda.is_available():\n # data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n return input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor # tensors [batch_size, *]", "def Crossover_Function(data1, data2):\n\n # for this function, I modified the uniform crossover function to take care of duplicates after crossover.\n\n data1[1] = 0\n data2[1] = 0\n chromosome1 = list.copy(data1[0])\n chromosome2 = list.copy(data2[0])\n\n #print(\"\\nChromosomes before crossover - \")\n #print(chromosome1)\n #print(chromosome2)\n\n # for each index in both chromosomes, use a coin toss to determine which index is crossed over\n for i in range(len(chromosome1)):\n\n cointoss = random.randrange(2)\n if cointoss == 0:\n chromosome1[i], chromosome2[i] = chromosome2[i], chromosome1[i]\n\n # find duplicates after crossing over\n dupes_in_ch1 = list(duplicates(chromosome1))\n dupes_in_ch2 = list(duplicates(chromosome2))\n\n\n # handle duplicates if any are found\n for i in dupes_in_ch1:\n if i in chromosome1: chromosome1.remove(i)\n chromosome2.append(i)\n \n for i in dupes_in_ch2:\n if i in chromosome2: chromosome2.remove(i)\n chromosome1.append(i)\n\n # replaced the modified chromosomes in the data\n data1[0] = chromosome1\n data2[0] = chromosome2\n\n #print(\"\\nChromsomes after crossover - \")\n #print(data1[0])\n #print(data2[0])\n\n return [data1, data2]", "def normalize_dataset(self):", "def CopyData(self, p_int, vtkDataSetAttributes, p_int_1, vtkDataSetAttributes_1, p_int_2):\n ...", "def ana_merge_datas(datas):\n return {\n 'searches':ana_merge_searches(datas),\n 'senzory_map':ana_merge_senzory_map(datas)\n }", "def _mutualSimplify(data1, data2):\n\n if len(data1) != 3:\n data1 = numpy.transpose(data1)\n if len(data1) != 3:\n raise ValueError(\"Wrong dimensions of data\")\n if len(data2) != 3:\n data2 = numpy.transpose(data2)\n if len(data2) != 3:\n raise ValueError(\"Wrong dimensions of data\")\n\n datax1 = numpy.array(data1[0], float, order=\"C\")\n datay1 = numpy.array(data1[1], float, order=\"C\")\n dataz1 = numpy.array(data1[2], float, order=\"C\")\n\n datax2 = numpy.array(data2[0], float, order=\"C\")\n datay2 = numpy.array(data2[1], float, order=\"C\")\n dataz2 = numpy.array(data2[2], float, order=\"C\")\n\n N1 = len(datax1)\n N2 = len(datax2)\n\n ret = numpy.array([1, 1])\n datax1, datay1, dataz1, datax2, datay2, dataz2, N1, N2 # eclipse warning removal\n code = r\"\"\"\n #line 264 \"binary_search.py\"\n int M = 0;\n int sum = 0;\n int t=0,s=0,k=0, k1;\n int turn=0;\n bool breakflag;\n\n int a;\n position1=vector<point>(N1);\n newposition1=vector<point>(N1);\n\n position2=vector<point>(N2);\n newposition2=vector<point>(N2);\n\n\n for (i=0;i<N1;i++)\n {\n position1[i].x = datax1[i] + 0.000000000000001*(rand()%1000);\n position1[i].y = datay1[i] +0.00000000000000001*(rand()%1000);\n position1[i].z = dataz1[i] + 0.00000000000000001*(rand()%1000);\n }\n\n for (i=0;i<N2;i++)\n {\n position2[i].x = datax2[i] + 0.000000000000001*(rand()%1000);\n position2[i].y = datay2[i] +0.0000000000000000001*(rand()%1000);\n position2[i].z = dataz2[i] + 0.0000000000000000001*(rand()%1000);\n }\n\n todelete1 = vector <int> (N1);\n todelete2 = vector <int> (N2);\n\n for (i=0;i<N1;i++) todelete1[i] == -2;\n for (i=0;i<N2;i++) todelete2[i] == -2;\n\n for (int ttt = 0; ttt < 1; ttt++)\n {\n turn++;\n M=0;\n for (i=0;i<N1;i++) todelete1[i] = -2;\n for (i=0;i<N2;i++) todelete2[i] = -2;\n\n for (int j=1;j<N1-1;j++) //going over all elements trying to delete\n {\n\n breakflag = false; //by default we delete thing\n for (k=0;k<N1;k++) //going over all triangles to check\n {\n if (k < j-2 || k > j+1)\n {\n if (k < N1 - 1) k1 = k + 1;\n else k1 = 0;\n sum = intersect(position1[j-1],position1[j],position1[\n j+1],position1[k],position1[k1]);\n if (sum!=0)\n {\n //printf(\"intersection at %d,%d\\n\",j,k);\n breakflag = true; //keeping thing\n break;\n }\n }\n }\n\n if (breakflag == false)\n {\n for (k=0;k<N2;k++) //going over all triangles to check\n {\n if (k < N2 - 1) k1 = k + 1;\n else k1 = 0;\n sum = intersect(position1[j-1],position1[j],position1[\n j+1],position2[k],position2[k1]);\n if (sum!=0)\n {\n //printf(\"crossintersection at %d,%d\\n\",j,k);\n breakflag = true; //keeping thing\n break;\n }\n }\n }\n\n if (breakflag ==false)\n {\n todelete1[M++] = j;\n position1[j] = (position1[j-1] + position1[j+1])* 0.5;\n //printf(\"%d will be deleted at %d\\n\",j,k);\n j++;\n //break;\n }\n }\n t = 0;//pointer for todelete\n s = 0;//pointer for newposition\n if (M==0)\n {\n break;\n }\n for (int j=0;j<N1;j++)\n {\n if (todelete1[t] == j)\n {\n t++;\n continue;\n }\n else\n {\n newposition1[s++] = position1[j];\n }\n }\n N1 = s;\n M = 0;\n t = 0;\n position1 = newposition1;\n }\n\n ret[0] = N1;\n ret[1] = N2;\n\n for (i=0;i<N1;i++)\n {\n datax1[i] = position1[i].x;\n datay1[i] = position1[i].y;\n dataz1[i] = position1[i].z;\n }\n for (i=0;i<N2;i++)\n {\n datax2[i] = position2[i].x;\n datay2[i] = position2[i].y;\n dataz2[i] = position2[i].z;\n }\n\n \"\"\"\n support = r\"\"\"\n#line 415 \"binary_search.py\"\n#include <cstdlib>\n#include <iostream>\n#include <iomanip>\n#include <cmath>\n#include <vector>\n#include <ctime>\n#include <omp.h>\n#include <stdio.h>\nusing namespace std;\nstruct point{\n double x,y,z;\n point operator + (const point &p) const {\n return (point) {x+p.x, y+p.y, z+p.z};\n }\n point operator - (const point &p) const {\n return (point) {x-p.x, y-p.y, z-p.z};\n }\n/* cross product */\n point operator * (const point &p) const {\n return (point) {y*p.z - z*p.y,\n z*p.x - x*p.z,\n x*p.y - y*p.x};\n }\n point operator * (const double &d) const {\n return (point) {d*x, d*y, d*z};\n }\n\n point operator / (const double &d) const {\n return (point) {x/d, y/d, z/d};\n }\n};\n\nvector <point> position1;\nvector <point> newposition1;\nvector <int> todelete1;\nint N1;\nvector <point> position2;\nvector <point> newposition2;\nvector <int> todelete2;\nint N2;\n\n\nint i;\ndouble dist1(int i,int j);\ndouble dist2(int i,int j);\ndouble dotProduct(point a,point b);\nint intersect(point t1,point t2,point t3,point r1,point r2);\n\ninline double sqr(double x){\n return x*x;\n}\ninline double dist1(int i,int j){\nreturn sqrt(dotProduct((position1[i]-position1[j]),(position1[i]-position1[j])));\n}\n\ninline double dist2(int i,int j){\nreturn sqrt(dotProduct((position2[i]-position2[j]),(position2[i]-position2[j])));\n}\n\ninline double dist(point a,point b){\n return sqr(a.x-b.x)+sqr(a.y-b.y)+sqr(a.z-b.z);\n}\n\ninline double dotProduct(point a,point b){\n return a.x*b.x+a.y*b.y+a.z*b.z;\n}\n\nint intersect(point t1,point t2,point t3,point r1,point r2)\n{\npoint A,B,C,D,n;\nint r;\ndouble det,t,u,v,c1,d1,d2,d3;\nB = t2 - t1;\nC = t3 - t1;\nD = r2 - t1;\nA = r2 - r1;\n\nd1 = (B.y*C.z-C.y*B.z);\nd2 = (B.x*C.z-B.z*C.x);\nd3 = (B.x*C.y-C.x*B.y);\ndet = A.x*d1-A.y*d2+A.z*d3;\nif (det == 0) return 0;\nif (det >0){\nt = D.x*d1-D.y*d2+D.z*d3;\nif (t<0 || t>det) return 0;\nu = A.x*(D.y*C.z-C.y*D.z)-A.y*(D.x*C.z-D.z*C.x)+A.z*(D.x*C.y-C.x*D.y);\nif (u<0 || u>det) return 0;\nv = A.x*(B.y*D.z-D.y*B.z)-A.y*(B.x*D.z-B.z*D.x)+A.z*(B.x*D.y-D.x*B.y);\nif (v<0 || v>det || (u+v)>det) return 0;\n//printf(\"\\n%lf,%lf,%lf, \",t/det,u/det,v/det);\nn = B*C;\nc1 = dotProduct(r1-t1,n);\nif (c1>0) return 1;\nelse return -1;\n}\nelse{\nt = D.x*d1-D.y*d2+D.z*d3;\nif (t>0 || t<det) return 0;\nu = A.x*(D.y*C.z-C.y*D.z)-A.y*(D.x*C.z-D.z*C.x)+A.z*(D.x*C.y-C.x*D.y);\nif (u>0 || u<det) return 0;\nv = A.x*(B.y*D.z-D.y*B.z)-A.y*(B.x*D.z-B.z*D.x)+A.z*(B.x*D.y-D.x*B.y);\nif (v>0 || v<det || (u+v)<det) return 0;\n//printf(\"\\n%lf,%lf,%lf, \",t/det,u/det,v/det);\nn = B*C;\nc1 = dotProduct(r1-t1,n);\nif (c1>0) return 1;\nelse return -1;\n}\n}\n//DNA conformation\n\"\"\"\n from scipy import weave\n weave.inline(code, ['datax1', 'datay1', 'dataz1', 'N1',\n 'datax2', 'datay2', 'dataz2', 'N2', 'ret'],\n extra_compile_args=['-malign-double'], support_code=support)\n\n data1 = numpy.array([datax1, datay1, dataz1]).T\n data2 = numpy.array([datax2, datay2, dataz2]).T\n\n return data1[:ret[0]], data2[:ret[1]]", "def merge(datasets: Sequence[\"Dataset\"]) -> \"Dataset\":\n ds = datasets[0].copy()\n for dsj in datasets[1:]:\n ds = ds._append_items(dsj, copy=False)\n\n return ds", "def build_default_combine_function(template_hdu_arr, no_data_val=np.nan):\n \n img_arrs = np.array([hdu.data for hdu in template_hdu_arr])\n \n if np.isnan(no_data_val):\n templates = (~np.isnan(img_arrs)).astype(float)\n else:\n templates = (img_arrs != no_data_val).astype(float)\n\n multiplier_arr = np.sum(templates, axis=0)\n multiplier_arr = np.divide(1, multiplier_arr, where=(multiplier_arr != 0))\n for t_arr in templates:\n t_arr *= multiplier_arr\n\n def combine_function(cutout_hdu_arr):\n \"\"\"\n Combiner function that takes an array of `~astropy.io.fits.ImageHdu` \n objects and cobines them into a single image.\n\n Parameters\n ----------\n cutout_hdu_arr : list\n Array of `~astropy.io.fits.ImageHdu` objects that will be \n combined into a single image.\n\n Returns\n -------\n response : array\n The combined image array.\n \"\"\"\n \n cutout_imgs = np.array([hdu.data for hdu in cutout_hdu_arr])\n nans = np.bitwise_and.reduce(np.isnan(cutout_imgs), axis=0)\n \n cutout_imgs[np.isnan(cutout_imgs)] = 0 # don't want any nans because they mess up multiple/add\n \n combined_img = np.sum(templates*cutout_imgs, axis=0)\n combined_img[nans] = np.nan # putting nans back if we need to\n\n return combined_img\n\n return combine_function", "def _getitem_augmentation(self):\n n_tracks = len(self.tracks)\n track_indices = random.choices(range(n_tracks), k=len(self.sources))\n\n sources = []\n\n for _source, trackID in zip(self.sources, track_indices):\n track = self.tracks[trackID]\n source_path = track['path'][_source]\n track_samples = track['samples_original']\n\n start = random.randint(0, track_samples - self.samples - 1)\n source, _ = torchaudio.load(source_path, frame_offset=start, num_frames=self.samples)\n\n # Apply augmentation\n source = self.augmentation(source)\n sources.append(source.unsqueeze(dim=0))\n \n if type(self.target) is list:\n target = []\n for _target in self.target:\n source_idx = self.sources.index(_target)\n _target = sources[source_idx]\n target.append(_target)\n target = torch.cat(target, dim=0)\n\n sources = torch.cat(sources, dim=0)\n mixture = sources.sum(dim=0, keepdim=True)\n else:\n source_idx = self.sources.index(self.target)\n target = sources[source_idx]\n target = target.squeeze(dim=0)\n\n sources = torch.cat(sources, dim=0)\n mixture = sources.sum(dim=0)\n\n return mixture, target", "def compose(self, data):\n return super().compose(data=data)", "def consolidate(self, *args, **kwargs):\n kwargs['mode'] = 'consolidate'\n kwargs['mix_before'] = (0, 0)\n kwargs['air_gap'] = 0\n kwargs['disposal_vol'] = 0\n return self.transfer(*args, **kwargs)", "def copyDataFrom (self, other):\n\n self.outErrorPackets=other.outErrorPackets\n self._myHasOutErrorPackets=other._myHasOutErrorPackets\n \n self.inErrorPackets=other.inErrorPackets\n self._myHasInErrorPackets=other._myHasInErrorPackets\n \n self.inDiscardPackets=other.inDiscardPackets\n self._myHasInDiscardPackets=other._myHasInDiscardPackets\n \n self.outUnicastPackets=other.outUnicastPackets\n self._myHasOutUnicastPackets=other._myHasOutUnicastPackets\n \n self.inMulticastPackets=other.inMulticastPackets\n self._myHasInMulticastPackets=other._myHasInMulticastPackets\n \n self.outBroadcastPackets=other.outBroadcastPackets\n self._myHasOutBroadcastPackets=other._myHasOutBroadcastPackets\n \n self.inBroadcastPackets=other.inBroadcastPackets\n self._myHasInBroadcastPackets=other._myHasInBroadcastPackets\n \n self.outMulticastPackets=other.outMulticastPackets\n self._myHasOutMulticastPackets=other._myHasOutMulticastPackets\n \n self.inUnknownProtocolPackets=other.inUnknownProtocolPackets\n self._myHasInUnknownProtocolPackets=other._myHasInUnknownProtocolPackets\n \n self.outDiscardPackets=other.outDiscardPackets\n self._myHasOutDiscardPackets=other._myHasOutDiscardPackets\n \n self.inUnicastPackets=other.inUnicastPackets\n self._myHasInUnicastPackets=other._myHasInUnicastPackets\n \n self.outOctets=other.outOctets\n self._myHasOutOctets=other._myHasOutOctets\n \n self.inOctets=other.inOctets\n self._myHasInOctets=other._myHasInOctets", "def collate_molgraphs(data):\n assert len(data[0]) in [3, 4], \\\n 'Expect the tuple to be of length 3 or 4, got {:d}'.format(len(data[0]))\n if len(data[0]) == 3:\n smiles, graphs, labels = map(list, zip(*data))\n masks = None\n else:\n smiles, graphs, labels, masks = map(list, zip(*data))\n \n bg = dgl.batch(graphs)\n bg.set_n_initializer(dgl.init.zero_initializer)\n bg.set_e_initializer(dgl.init.zero_initializer)\n labels = torch.stack(labels, dim=0)\n \n if masks is None:\n masks = torch.ones(labels.shape)\n else:\n masks = torch.stack(masks, dim=0)\n return smiles, bg, labels, masks", "def combine(new_data, raw_data):\n return pd.merge(new_data, raw_data, on=[\"location\", \"date\"], how=\"outer\")", "def combine(self,data):\n channels = []\n #if type(data.data) == pandas.core.frame.DataFrame:\n # spike_train_all_trial_ = np.array(data.data.stack())\n # spike_train_all_trial = np.reshape(spike_train_all_trial_,(spike_train_all_trial_.shape[0],1))\n #else:\n # spike_train_all_trial_ = np.array(data.data)\n # spike_train_all_trial = np.reshape(spike_train_all_trial_,(spike_train_all_trial_.shape[0],1))\n for c in range(data.nr_cells):\n channels.append(data.cell(c).getFlattend())\n\n d = DesignMatrix(channels[0].shape[0], self.component_width)\n d.setMask(self.mask)\n for c in self.components:\n d.add(c.getSplines(channels),c.header)\n #if self.mask.shape[0] == d.matrix.shape[1] + 1:\n # d.matrix = d.matrix[:,~self.mask[:-1]]\n self._header = d.header\n #print \"--\"\n #print d.matrix.shapeload\n return d.matrix", "def dict_collate(data):\n\n # Assuming there's at least one instance in the batch\n add_data_keys = data[0].keys()\n collected_data = {k: [] for k in add_data_keys}\n\n for i in range(len(list(data))):\n for k in add_data_keys:\n collected_data[k].append(data[i][k])\n\n for k in add_data_keys:\n collected_data[k] = torch.cat(collected_data[k], 0)\n\n # Passing redundant information for compatibility\n return collected_data, collected_data[\"target\"]", "def cutmix(batch: Tuple[torch.Tensor, torch.Tensor], alpha: float = 1.0) -> Tuple:\n data, targets = batch\n indices = torch.randperm(data.size(0))\n shuffled_data = data[indices]\n shuffled_targets = targets[indices]\n lam = np.random.beta(alpha, alpha) if alpha > 0 else 1\n\n x0, x1, y0, y1 = random_bbox(data, lam)\n\n data[:, :, y0:y1, x0:x1] = shuffled_data[:, :, y0:y1, x0:x1]\n\n targets = (targets, shuffled_targets, lam)\n\n return data, targets", "def pack(self):\n if self.stack:\n # Should be a straight forward concatenation\n packed = torch.cat(self.data, dim=0)\n else:\n # Need to account for padding values\n from netharn.data.collate import padded_collate\n inbatch = list(ub.flatten(self.data))\n packed = padded_collate(inbatch, fill_value=self.padding_value)\n return packed", "def _merge(dts):\n df = pd.concat(dts)\n\n ma = df.pivot(index='isomir', columns='sample', values='counts')\n ma_mirna = ma\n ma = ma.fillna(0)\n ma_mirna['mirna'] = [m.split(\":\")[0] for m in ma.index.values]\n ma_mirna = ma_mirna.groupby(['mirna']).sum()\n ma_mirna = ma_mirna.fillna(0)\n return ma, ma_mirna", "def mergeData(x,recover,death):\n x = x.rename(columns = {'latest':'confirm'})\n x['recover'] = recover['latest']\n x['death'] = death['latest']\n\n return x", "def burn(self, burnup):\n # Given the burnup, enrichment, and assembly type,\n # the data model returns a new set of data. " ]
[ "0.68712044", "0.62400174", "0.6233847", "0.61989665", "0.6126787", "0.6072504", "0.5922656", "0.59136444", "0.5769046", "0.5631745", "0.5611928", "0.56094086", "0.5598539", "0.55601525", "0.55570143", "0.547382", "0.54204816", "0.5399414", "0.5374631", "0.53570753", "0.5306404", "0.5293094", "0.52837014", "0.5280836", "0.5277209", "0.52770764", "0.52738035", "0.52719444", "0.5232608", "0.5229344", "0.5225368", "0.52096313", "0.519839", "0.51919013", "0.5150675", "0.5143547", "0.514348", "0.51380664", "0.51350415", "0.5123161", "0.51211584", "0.51206344", "0.511616", "0.5107758", "0.50983447", "0.50929344", "0.5085673", "0.50778884", "0.5073732", "0.5072511", "0.50679237", "0.5058537", "0.50506943", "0.5045564", "0.504404", "0.50418466", "0.50340503", "0.5033898", "0.50317705", "0.50022227", "0.5000835", "0.500062", "0.50001705", "0.49954137", "0.49954137", "0.4995292", "0.4992441", "0.49913204", "0.49913204", "0.49876744", "0.49868786", "0.49863848", "0.4975368", "0.49737215", "0.49693298", "0.49666488", "0.49652252", "0.49640793", "0.4958254", "0.49540526", "0.49252623", "0.49241757", "0.4917142", "0.49165425", "0.49145448", "0.4912533", "0.4906831", "0.4904781", "0.48959973", "0.48938525", "0.4888486", "0.48883498", "0.4880765", "0.48788577", "0.48726338", "0.48713526", "0.486612", "0.48614222", "0.48607326", "0.48599505" ]
0.595715
6
Function to crop random bboxes.
def random_bbox(data, lam): img_h, img_w = data.shape[2:] cx = np.random.uniform(0, img_w) cy = np.random.uniform(0, img_h) w = img_w * np.sqrt(1 - lam) h = img_h * np.sqrt(1 - lam) x0 = int(np.round(max(cx - w / 2, 0))) x1 = int(np.round(min(cx + w / 2, img_w))) y0 = int(np.round(max(cy - h / 2, 0))) y1 = int(np.round(min(cy + h / 2, img_h))) return x0, x1, y0, y1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_crop_bbox(fs_im, fs_mask, bbox, scale_factor_range):\n # Convert bbox floats to ints.\n bb_x1, bb_y1, bb_x2, bb_y2 = bbox\n # When going from continuous float coordinates to pixel index coordinates, need to subtract 1 from x2 and y2.\n bb_x1, bb_y1, bb_x2, bb_y2 = int(bb_x1), int(bb_y1), int(bb_x2) - 1, int(bb_y2) - 1\n bb_w, bb_h = bb_x2 - bb_x1 + 1, bb_y2 - bb_y1 + 1\n\n # Calculate crop box size.\n scale_factor = np.random.uniform(*scale_factor_range)\n cb_size = int(scale_factor * max(bb_w, bb_h)) # cb is an acronym for crop_box\n cb_size = min(cb_size, fs_im.width, fs_im.height)\n\n # Randomly compute x1 and x2 of crop_box.\n # There are two constraints, 0 <= x1, x2 < width, x1 <= bbox_x1, bbox_x2 <= x2.\n cb_x1_min, cb_x1_max = max(0, bb_x2 - cb_size + 1), min(fs_im.width - cb_size, bb_x1)\n if (cb_x1_min > cb_x1_max):\n print('fs_im.size - {}'.format(fs_im.size))\n print(\"bb_x1, bb_y1, bb_x2, bb_y2 - ({}, {}, {}, {})\".format(bb_x1, bb_y1, bb_x2, bb_y2))\n print(\"bb_w, bb_h - ({}, {})\".format(bb_w, bb_h))\n print(\"scale factor - {}\".format(scale_factor))\n print(\"cb_size - {}\".format(cb_size))\n print(\"cb_x1_min, cb_x1_max - ({}, {})\".format(cb_x1_min, cb_x1_max))\n raise Exception(\"cb_x1_min greater than cb_x1_max\")\n cb_x1 = np.random.randint(cb_x1_min, cb_x1_max + 1)\n cb_x2 = cb_x1 + cb_size - 1\n\n # Randomly compute y1 and y2 of crop_box.\n # There are two constraints, 0 <= y1, y2 < height, y1 <= bbox_y1, bbox_y2 <= y2.\n cb_y1_min, cb_y1_max = max(0, bb_y2 - cb_size + 1), min(fs_im.height - cb_size, bb_y1)\n if (cb_y1_min > cb_y1_max):\n print('fs_im.size - {}'.format(fs_im.size))\n print(\"bb_x1, bb_y1, bb_x2, bb_y2 - ({}, {}, {}, {})\".format(bb_x1, bb_y1, bb_x2, bb_y2))\n print(\"bb_w, bb_h - ({}, {})\".format(bb_w, bb_h))\n print(\"scale factor - {}\".format(scale_factor))\n print(\"cb_size - {}\".format(cb_size))\n print(\"cb_y1_min, cb_y1_max - ({}, {})\".format(cb_y1_min, cb_y1_max))\n raise Exception(\"cb_y1_min greater than cb_y1_max\")\n cb_y1 = np.random.randint(cb_y1_min, cb_y1_max + 1)\n cb_y2 = cb_y1 + cb_size - 1\n\n # Get cropped scene image.\n cs_arr = np.array(fs_im)[cb_y1:cb_y2 + 1, cb_x1:cb_x2 + 1, :]\n cs_im = Image.fromarray(cs_arr)\n\n # Get cropped mask.\n cs_mask = fs_mask[cb_y1:cb_y2 + 1, cb_x1:cb_x2 + 1]\n\n # Get crop_bbox w.r.t. full_scene.\n cb_x1, cb_y1 = cb_x1 / fs_im.width, cb_y1 / fs_im.height\n # While converting int coords to continuous float coords, 1 has to be added to the higher coord.\n cb_x2, cb_y2 = (cb_x2 + 1) / fs_im.width, (cb_y2 + 1) / fs_im.height\n cs_bbox = (cb_x1, cb_y1, cb_x2, cb_y2)\n\n return cs_im, cs_mask, cs_bbox", "def random_sample_crop(image, boxes):\n height, width, _ = image.shape\n min_iou = np.random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])\n\n if min_iou is None:\n return image, boxes\n\n # max trails (50)\n for _ in range(50):\n image_t = image\n\n w = _rand(0.3, 1.0) * width\n h = _rand(0.3, 1.0) * height\n\n # aspect ratio constraint b/t .5 & 2\n if h / w < 0.5 or h / w > 2:\n continue\n\n left = _rand() * (width - w)\n top = _rand() * (height - h)\n\n rect = np.array([int(top), int(left), int(top + h), int(left + w)])\n overlap = jaccard_numpy(boxes, rect)\n\n # dropout some boxes\n drop_mask = overlap > 0\n if not drop_mask.any():\n continue\n\n if overlap[drop_mask].min() < min_iou and overlap[drop_mask].max() > (min_iou + 0.2):\n continue\n\n image_t = image_t[rect[0]:rect[2], rect[1]:rect[3], :]\n\n centers = (boxes[:, :2] + boxes[:, 2:4]) / 2.0\n\n m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])\n m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])\n\n # mask in that both m1 and m2 are true\n mask = m1 * m2 * drop_mask\n\n # have any valid boxes? try again if not\n if not mask.any():\n continue\n\n # take only matching gt boxes\n boxes_t = boxes[mask, :].copy()\n\n boxes_t[:, :2] = np.maximum(boxes_t[:, :2], rect[:2])\n boxes_t[:, :2] -= rect[:2]\n boxes_t[:, 2:4] = np.minimum(boxes_t[:, 2:4], rect[2:4])\n boxes_t[:, 2:4] -= rect[:2]\n\n return image_t, boxes_t\n return image, boxes", "def crop_bboxes(bbox_ref, bboxes):\r\n v = np.stack([bbox_ref[0], bbox_ref[1], bbox_ref[0], bbox_ref[1]])\r\n bboxes = bboxes - v\r\n return bboxes", "def random_crop(img, mask):\n if str(img.dtype) != 'uint8':\n img = (img * 255).astype(np.uint8)\n if str(mask.dtype) != 'uint8':\n mask = (mask * 255).astype(np.uint8)\n img = Image.fromarray(img)\n mask = Image.fromarray(mask)\n x, y = img.size\n matrix = 256\n img_list = []\n label_list = []\n for i in range(CROP_NUM):\n x1 = randrange(0, x - matrix)\n y1 = randrange(0, y - matrix)\n img_list.append(img.crop((x1, y1, x1 + matrix, y1 + matrix)))\n label_list.append(mask.crop((x1, y1, x1 + matrix, y1 + matrix)))\n\n return img_list, label_list", "def random_crop(image, gt, crop_height, crop_width, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n height, width = image.shape[:2]\n\n y = random_state.randint(0, height - crop_height)\n x = random_state.randint(0, width - crop_width)\n\n cropped_image = image[y:y + crop_height, x:x + crop_width, :]\n cropped_gt = gt[y:y + crop_height, x:x + crop_height]\n\n cropped_image = cv2.resize(cropped_image, (width, height), interpolation=cv2.INTER_NEAREST)\n cropped_gt = cv2.resize(cropped_gt, (width, height), interpolation=cv2.INTER_NEAREST)\n\n return cropped_image, cropped_gt", "def random_crop_padding(imgs, target_size, from_tt=False):\n h, w = imgs[0].shape[0:2]\n t_w, t_h = target_size\n p_w, p_h = target_size\n if w == t_w and h == t_h:\n return imgs\n\n t_h = t_h if t_h < h else h\n t_w = t_w if t_w < w else w\n\n\n\n if from_tt==False and random.random() > 3.0 / 8.0 and np.max(imgs[1]) > 0:\n # make sure to crop the text region\n tl = np.min(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)\n tl[tl < 0] = 0\n br = np.max(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)\n br[br < 0] = 0\n br[0] = min(br[0], h - t_h)\n br[1] = min(br[1], w - t_w)\n\n i = random.randint(tl[0], br[0]) if tl[0] < br[0] else 0\n j = random.randint(tl[1], br[1]) if tl[1] < br[1] else 0\n else:\n i = random.randint(0, h - t_h) if h - t_h > 0 else 0\n j = random.randint(0, w - t_w) if w - t_w > 0 else 0\n\n n_imgs = []\n for idx in range(len(imgs)):\n if len(imgs[idx].shape) == 3:\n s3_length = int(imgs[idx].shape[-1])\n img = imgs[idx][i:i + t_h, j:j + t_w, :]\n img_p = cv2.copyMakeBorder(img, 0, p_h - t_h, 0, p_w - t_w, borderType=cv2.BORDER_CONSTANT,\n value=tuple(0 for i in range(s3_length)))\n else:\n img = imgs[idx][i:i + t_h, j:j + t_w]\n img_p = cv2.copyMakeBorder(img, 0, p_h - t_h, 0, p_w - t_w, borderType=cv2.BORDER_CONSTANT, value=(0,))\n n_imgs.append(img_p)\n return n_imgs", "def sample_crop_box(self, img_size, results):\n\n assert isinstance(img_size, tuple)\n h, w = img_size[:2]\n\n key_masks = results[self.instance_key].masks\n x_valid_array = np.ones(w, dtype=np.int32)\n y_valid_array = np.ones(h, dtype=np.int32)\n\n selected_mask = key_masks[np.random.randint(0, len(key_masks))]\n selected_mask = selected_mask[0].reshape((-1, 2)).astype(np.int32)\n max_x_start = max(np.min(selected_mask[:, 0]) - 2, 0)\n min_x_end = min(np.max(selected_mask[:, 0]) + 3, w - 1)\n max_y_start = max(np.min(selected_mask[:, 1]) - 2, 0)\n min_y_end = min(np.max(selected_mask[:, 1]) + 3, h - 1)\n\n for key in results.get('mask_fields', []):\n if len(results[key].masks) == 0:\n continue\n masks = results[key].masks\n for mask in masks:\n assert len(mask) == 1\n mask = mask[0].reshape((-1, 2)).astype(np.int32)\n clip_x = np.clip(mask[:, 0], 0, w - 1)\n clip_y = np.clip(mask[:, 1], 0, h - 1)\n min_x, max_x = np.min(clip_x), np.max(clip_x)\n min_y, max_y = np.min(clip_y), np.max(clip_y)\n\n x_valid_array[min_x - 2:max_x + 3] = 0\n y_valid_array[min_y - 2:max_y + 3] = 0\n\n min_w = int(w * self.min_side_ratio)\n min_h = int(h * self.min_side_ratio)\n\n x1, x2 = self.sample_valid_start_end(x_valid_array, min_w, max_x_start,\n min_x_end)\n y1, y2 = self.sample_valid_start_end(y_valid_array, min_h, max_y_start,\n min_y_end)\n\n return np.array([x1, y1, x2, y2])", "def clip_bboxes(self, bboxes, max_width, max_height):\n with tf.variable_scope('clip_bboxes'):\n max_width = tf.cast(max_width, tf.float32)\n max_height = tf.cast(max_height, tf.float32)\n\n y, x, h, w = tf.split(bboxes, 4, axis=1)\n\n minx = tf.minimum(tf.maximum(x - w / 2.0, 0.0), max_width)\n maxx = tf.minimum(tf.maximum(x + w / 2.0, 0.0), max_width)\n miny = tf.minimum(tf.maximum(y - h / 2.0, 0.0), max_height)\n maxy = tf.minimum(tf.maximum(y + h / 2.0, 0.0), max_height)\n\n width = maxx - minx + 1e-10\n x = (minx + maxx) / 2.0\n height = maxy - miny + 1e-10\n y = (miny + maxy) / 2.0\n\n bboxes = tf.concat([y, x, height, width],\n axis=1)\n\n return bboxes", "def test_crop_by_bbox(self):\n with Image.open(self.subject) as im:\n image = im.convert(\"RGB\")\n\n cropped = image_helper.crop_by_bbox(image, BoundingBox(0,0,15,15))\n\n self.assertEqual(cropped.size, (15, 15))", "def random_crop_with_constraints(bbox, size, min_scale=0.3, max_scale=1, min_object_overlap=0.95,\r\n min_aspect_ratio=0.9, max_aspect_ratio=1.1, max_trial=50, eps=1e-5):\r\n candidates = []\r\n assert max_scale == 1, \"required max_scale=1 but got {}\".format(max_scale)\r\n mis, mas, mir, mar = min_scale, max_scale, min_aspect_ratio, max_aspect_ratio\r\n sample_params = [\r\n [1, 1, 1, 1],\r\n [1, 1, mir, mar],\r\n [mis, mas, 1, 1],\r\n [mis, mas, mir, mar]]\r\n w, h = size\r\n for i in range(4):\r\n mis, mas, mir, mar = sample_params[i]\r\n for _ in range(max_trial):\r\n scale = random.uniform(mis, mas)\r\n aspect_ratio = random.uniform(\r\n max(mir, scale ** 2),\r\n min(mar, 1 / (scale ** 2)))\r\n if w >= h * aspect_ratio:\r\n crop_h = h * scale\r\n crop_w = crop_h * aspect_ratio\r\n else:\r\n crop_w = w * scale\r\n crop_h = crop_w / aspect_ratio\r\n crop_h, crop_w = int(crop_h), int(crop_w)\r\n crop_t = random.randrange(h - crop_h + 1)\r\n crop_l = random.randrange(w - crop_w + 1)\r\n crop_bb = np.array((crop_l, crop_t, crop_l + crop_w, crop_t + crop_h))\r\n iob = bbox_iob(bbox, crop_bb[np.newaxis]).flatten()\r\n iob = iob[iob > 0]\r\n if len(iob) >= bbox.shape[0] * 0.75 and iob.min() >= min_object_overlap - eps:\r\n if i != 3: # 1:1:1:6\r\n candidates.append((crop_l, crop_t, crop_w, crop_h))\r\n else:\r\n candidates.extend([(crop_l, crop_t, crop_w, crop_h)] * 6)\r\n break\r\n\r\n # random select one\r\n while candidates:\r\n crop = candidates.pop(np.random.randint(0, len(candidates)))\r\n new_bbox = gbbox.crop(bbox, crop, allow_outside_center=False)\r\n if new_bbox.size < 1:\r\n continue\r\n new_crop = (crop[0], crop[1], crop[2], crop[3])\r\n return new_bbox, new_crop\r\n\r\n min_len = int(min(h, w) * random.uniform(min_scale, max_scale))\r\n crop_h, crop_w = min_len, min_len\r\n for _ in range(max_trial):\r\n crop_t = random.randrange(h - crop_h + 1)\r\n crop_l = random.randrange(w - crop_w + 1)\r\n crop = (crop_l, crop_t, crop_w, crop_h)\r\n new_bbox = gbbox.crop(bbox, crop, allow_outside_center=False)\r\n if new_bbox.size >= bbox.size * 0.5:\r\n return new_bbox, crop\r\n\r\n return bbox, (0, 0, w, h)", "def bbox_clip(bboxes,img_shape):\n assert bboxes.shape[-1] % 4 == 0\n clipped_bboxes = np.empty_like(bboxes,dtype=bboxes.dtype)\n clipped_bboxes[...,0::2] = np.maximum(np.minimum(bboxes[...,0::2],img_shape[1]-1),0)\n clipped_bboxes[...,1::2] = np.maximum(np.minimum(bboxes[...,1::2],img_shape[0]-1),0)\n \n return clipped_bboxes", "def __randomCrop(self, img):\n limit = self.PROCESSING_DIM - self.INPUT_DIM\n # pick 2 random integers less than this limit as the origin of the cropped image\n x_start = np.random.randint(limit)\n y_start = np.random.randint(limit)\n return img.crop((x_start, y_start, x_start + self.INPUT_DIM, y_start + self.INPUT_DIM))", "def random_bbox(self, shape, margin, bbox_shape):\r\n img_height = shape\r\n img_width = shape\r\n height = bbox_shape\r\n width = bbox_shape\r\n ver_margin = margin\r\n hor_margin = margin\r\n maxt = img_height - ver_margin - height\r\n maxl = img_width - hor_margin - width\r\n t = np.random.randint(low = ver_margin, high = maxt)\r\n l = np.random.randint(low = hor_margin, high = maxl)\r\n h = height\r\n w = width\r\n return (t, l, h, w)", "def random_bbox(self, shape, margin, bbox_shape):\r\n img_height = shape\r\n img_width = shape\r\n height = bbox_shape\r\n width = bbox_shape\r\n ver_margin = margin\r\n hor_margin = margin\r\n maxt = img_height - ver_margin - height\r\n maxl = img_width - hor_margin - width\r\n t = np.random.randint(low = ver_margin, high = maxt)\r\n l = np.random.randint(low = hor_margin, high = maxl)\r\n h = height\r\n w = width\r\n return (t, l, h, w)", "def test_random_crop(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = RandomCrop(size=(64, 64))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, image.shape[2])\n assert _label.shape == (64, 64, label.shape[2])\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = RandomCrop(size=(64, 64, 8))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, 8, image.shape[3])\n assert _label.shape == (64, 64, 8, label.shape[3])", "def img_agu_crop(img_):\n\tscale_ = 5\n\txmin = max(0, random.randint(0, scale_))\n\tymin = max(0, random.randint(0, scale_))\n\txmax = min(img_.shape[1]-1, img_.shape[1]-random.randint(0, scale_))\n\tymax = min(img_.shape[0]-1, img_.shape[0]-random.randint(0, scale_))\n\treturn img_[ymin : ymax, xmin : xmax , : ]", "def __call__(self, results):\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert 'bbox_fields' in results\n boxes = [results[key] for key in results['bbox_fields']]\n boxes = np.concatenate(boxes, 0)\n h, w, c = img.shape\n while True:\n mode = random.choice(self.sample_mode)\n self.mode = mode\n if mode == 1:\n return results\n\n min_iou = mode\n for i in range(50):\n new_w = random.uniform(self.min_crop_size * w, w)\n new_h = random.uniform(self.min_crop_size * h, h)\n\n # h / w in [0.5, 2]\n if new_h / new_w < 0.5 or new_h / new_w > 2:\n continue\n\n left = random.uniform(w - new_w)\n top = random.uniform(h - new_h)\n\n patch = np.array(\n (int(left), int(top), int(left + new_w), int(top + new_h)))\n # Line or point crop is not allowed\n if patch[2] == patch[0] or patch[3] == patch[1]:\n continue\n overlaps = bbox_overlaps(\n patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)\n if len(overlaps) > 0 and overlaps.min() < min_iou:\n continue\n\n # center of boxes should inside the crop img\n # only adjust boxes and instance masks when the gt is not empty\n if len(overlaps) > 0:\n # adjust boxes\n def is_center_of_bboxes_in_patch(boxes, patch):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = ((center[:, 0] > patch[0]) *\n (center[:, 1] > patch[1]) *\n (center[:, 0] < patch[2]) *\n (center[:, 1] < patch[3]))\n return mask\n\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n if not mask.any():\n continue\n for key in results.get('bbox_fields', []):\n boxes = results[key].copy()\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n boxes = boxes[mask]\n if self.bbox_clip_border:\n boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])\n boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])\n boxes -= np.tile(patch[:2], 2)\n\n results[key] = boxes\n # labels\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][mask]\n\n # mask fields\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n mask.nonzero()[0]].crop(patch)\n # adjust the img no matter whether the gt is empty before crop\n img = img[patch[1]:patch[3], patch[0]:patch[2]]\n results['img'] = img\n results['img_shape'] = img.shape\n\n # seg fields\n for key in results.get('seg_fields', []):\n results[key] = results[key][patch[1]:patch[3],\n patch[0]:patch[2]]\n return results", "def random_offset_bounds(self) -> utils.BoxRegion:\n extra_size = self.random_canvas_extra_ratio * self.canvas_bounds().size / 2\n return utils.BoxRegion(\n minimum=-extra_size,\n maximum=extra_size\n )", "def crop(self, bbox):\n skeleton = self.clone()\n bbox = Bbox.create(bbox)\n\n if skeleton.empty():\n return skeleton\n\n nodes_valid_mask = np.array(\n [ bbox.contains(vtx) for vtx in skeleton.vertices ], dtype=bool\n )\n nodes_valid_idx = np.where(nodes_valid_mask)[0]\n\n # Set invalid vertices to be duplicates\n # so they'll be removed during consolidation\n if nodes_valid_idx.shape[0] == 0:\n return Skeleton()\n\n first_node = nodes_valid_idx[0]\n skeleton.vertices[~nodes_valid_mask] = skeleton.vertices[first_node]\n \n edges_valid_mask = np.isin(skeleton.edges, nodes_valid_idx)\n edges_valid_idx = edges_valid_mask[:,0] * edges_valid_mask[:,1] \n skeleton.edges = skeleton.edges[edges_valid_idx,:]\n return skeleton.consolidate()", "def n_random_crop(img, height, width, n):\n crops = []\n img_width, img_height = img.shape\n for i in range(n):\n x = np.random.randint(0, img_width - width)\n y = np.random.randint(0, img_height - height)\n crops.append(img[x:x + height, y:y + width])\n return np.array(crops)", "def crop_from_dets(\n img, \n bboxes, \n target_height, \n target_width,\n extra_zoom\n):\n\n imght = img.size(1)\n imgwidth = img.size(2)\n tmp_img = img\n # normalization (per-channel)\n tmp_img[0].add_(-0.406)\n tmp_img[1].add_(-0.457)\n tmp_img[2].add_(-0.480)\n \n crops = []\n bboxes_zoomed = []\n for box in bboxes:\n upLeft = torch.Tensor(\n (float(box[0]), float(box[1])))\n bottomRight = torch.Tensor(\n (float(box[2]), float(box[3])))\n\n ht = bottomRight[1] - upLeft[1]\n width = bottomRight[0] - upLeft[0]\n if width > 100:\n scaleRate = 0.2\n else:\n scaleRate = 0.3\n\n # zooming the predicted bounding box\n upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)\n upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)\n bottomRight[0] = max(\n min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)\n bottomRight[1] = max(\n min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)\n \n # ADD EXTRA EXPANSION BECAUSE OF ARMS OUT OF THE BOX !!!\n # i.e. shift x-coordinate of the box corner to right or to left\n if extra_zoom == 'right_cam':\n bottomRight[0] += min(bottomRight[0]-upLeft[0], imgwidth-bottomRight[0])\n elif extra_zoom == 'left_cam':\n upLeft[0] -= min(upLeft[0], bottomRight[0]-upLeft[0])\n \n crops.append(cropBox(tmp_img, upLeft, bottomRight, target_height, target_width)[None,...])\n bboxes_zoomed.append(torch.cat((upLeft, bottomRight))[None,...])\n \n crops = torch.cat(crops, dim=0)\n bboxes_zoomed = torch.cat(bboxes_zoomed)\n \n return crops, bboxes_zoomed", "def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois", "def _train_aug(self, results):\n img = results['img']\n h, w, c = img.shape\n boxes = results['gt_bboxes']\n while True:\n scale = random.choice(self.ratios)\n new_h = int(self.crop_size[0] * scale)\n new_w = int(self.crop_size[1] * scale)\n h_border = self._get_border(self.border, h)\n w_border = self._get_border(self.border, w)\n\n for i in range(50):\n center_x = random.randint(low=w_border, high=w - w_border)\n center_y = random.randint(low=h_border, high=h - h_border)\n\n cropped_img, border, patch = self._crop_image_and_paste(\n img, [center_y, center_x], [new_h, new_w])\n\n mask = self._filter_boxes(patch, boxes)\n # if image do not have valid bbox, any crop patch is valid.\n if not mask.any() and len(boxes) > 0:\n continue\n\n results['img'] = cropped_img\n results['img_shape'] = cropped_img.shape\n results['pad_shape'] = cropped_img.shape\n\n x0, y0, x1, y1 = patch\n\n left_w, top_h = center_x - x0, center_y - y0\n cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n mask = self._filter_boxes(patch, results[key])\n bboxes = results[key][mask]\n bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n if self.bbox_clip_border:\n bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)\n bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n bboxes = bboxes[keep]\n results[key] = bboxes\n if key in ['gt_bboxes']:\n if 'gt_labels' in results:\n labels = results['gt_labels'][mask]\n labels = labels[keep]\n results['gt_labels'] = labels\n if 'gt_masks' in results:\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n return results", "def _crop_bbox(batch_data: List[List[int]],\n crop_dims: Tuple[int, int, int, int]) -> None:\n for idx, bbox in enumerate(batch_data['bboxes']):\n # Shift bbox top left, clip at 0\n b_x1 = max(bbox[0] - crop_dims[0], 0)\n b_y1 = max(bbox[1] - crop_dims[1], 0)\n # Shift bbox top left, clip at bottom right boundary\n # if image crop encroaches on transformed bbox.\n b_x2 = min(bbox[2] - crop_dims[0], crop_dims[2])\n b_y2 = min(bbox[3] - crop_dims[1], crop_dims[3])\n\n if b_x1 > crop_dims[2] or b_y1 > crop_dims[3]:\n Warning(f\"Box {idx}: {bbox} outside of crop {crop_dims}\")\n del batch_data['labels'][idx]\n del batch_data['bboxes'][idx]\n else:\n batch_data['bboxes'][idx] = [b_x1, b_y1, b_x2, b_y2]", "def _crop_data(self, results, crop_size, allow_negative_crop):\n assert crop_size[0] > 0 and crop_size[1] > 0\n for key in results.get('img_fields', ['img']):\n img = results[key]\n margin_h = max(img.shape[0] - crop_size[0], 0)\n margin_w = max(img.shape[1] - crop_size[1], 0)\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results[key] = img\n results['img_shape'] = img_shape\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n # e.g. gt_bboxes and gt_bboxes_ignore\n bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],\n dtype=np.float32)\n bboxes = results[key] - bbox_offset\n if self.bbox_clip_border:\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n # If the crop does not contain any gt-bbox area and\n # allow_negative_crop is False, skip this image.\n if (key == 'gt_bboxes' and not valid_inds.any()\n and not allow_negative_crop):\n return None\n results[key] = bboxes[valid_inds, :]\n # label fields. e.g. gt_labels and gt_labels_ignore\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][valid_inds]\n\n # mask fields, e.g. gt_masks and gt_masks_ignore\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n if self.recompute_bbox:\n results[key] = results[mask_key].get_bboxes()\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]\n\n return results", "def crop_random(X,Y,random_crop=False,size_crop=_size_crop):\n b = size_crop//2\n shape = tf.shape(X)\n if random_crop: \n cx = tf.random.uniform(shape=(1,),minval=b,maxval=(shape[0]-b),dtype=tf.int32)[0]\n cy = tf.random.uniform(shape=(1,),minval=b,maxval=(shape[1]-b),dtype=tf.int32)[0]\n return X[cx-b:cx+b,cy-b:cy+b,...], Y[cx-b:cx+b,cy-b:cy+b,...]\n else: \n return crop(X,size_crop=size_crop),crop(Y,size_crop=size_crop)", "def gen_random_bboxes(self, n, meta):\n def get_mins_and_maxs(d):\n a = int(d / (4 * grid_dim + 1))\n b = 3 * a\n mins = []\n maxs = []\n for i in range(grid_dim):\n mn = (1 + i) * a + i * b\n mx = mn + b\n mins.append(mn)\n maxs.append(mx)\n return mins, maxs\n\n grid_dim = int(np.ceil(np.sqrt(n)))\n xmins, xmaxs = get_mins_and_maxs(meta['width'])\n ymins, ymaxs = get_mins_and_maxs(meta['height'])\n\n bboxes = []\n for ymin, ymax in zip(ymins, ymaxs):\n for xmin, xmax in zip(xmins, xmaxs):\n bboxes.append({'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax})\n\n return bboxes", "def filter_bboxes(\n bboxes: Sequence[BoxType],\n rows: int,\n cols: int,\n min_area: float = 0.0,\n min_visibility: float = 0.0,\n min_width: float = 0.0,\n min_height: float = 0.0,\n) -> List[BoxType]:\n resulting_boxes: List[BoxType] = []\n for bbox in bboxes:\n # Calculate areas of bounding box before and after clipping.\n transformed_box_area = calculate_bbox_area(bbox, rows, cols)\n bbox, tail = cast(BoxType, tuple(np.clip(bbox[:4], 0, 1.0))), tuple(bbox[4:])\n clipped_box_area = calculate_bbox_area(bbox, rows, cols)\n\n # Calculate width and height of the clipped bounding box.\n x_min, y_min, x_max, y_max = denormalize_bbox(bbox, rows, cols)[:4]\n clipped_width, clipped_height = x_max - x_min, y_max - y_min\n\n if (\n clipped_box_area != 0 # to ensure transformed_box_area!=0 and to handle min_area=0 or min_visibility=0\n and clipped_box_area >= min_area\n and clipped_box_area / transformed_box_area >= min_visibility\n and clipped_width >= min_width\n and clipped_height >= min_height\n ):\n resulting_boxes.append(cast(BoxType, bbox + tail))\n return resulting_boxes", "def crop_all_bounding_boxes(boxes, image_path, crop_path):\n index = 0\n for box in boxes:\n object_class = box[0]\n cropped_image = crop_bounding_box_from_image(\n box, image_path, crop_path)\n filename = object_class + \"_\" + os.path.basename(image_path)\n while os.path.isfile(os.path.join(crop_path, filename)):\n print('File %s already exists!' % (filename))\n index += 1\n filename = str(index) + \"_\" + filename\n cropped_image.save(filename)", "def random_crop(image, ratio = 0.75):\n reshape_size = image.shape[0]\n width = int(reshape_size * ratio)\n height = int(reshape_size * ratio)\n x = random.randint(0, reshape_size - width)\n y = random.randint(0, reshape_size - height)\n image = image[y:y+height, x:x+width, :] \n return image", "def clip_boxes(bboxes, imshape):\n with tf.name_scope('BoundingBoxTransform/clip_bboxes'):\n bboxes = tf.cast(bboxes, dtype=tf.float32)\n imshape = tf.cast(imshape, dtype=tf.float32)\n\n #pylint:disable=redundant-keyword-arg\n x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)\n width = imshape[1]\n height = imshape[0]\n x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0)\n x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0)\n\n y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0)\n y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0)\n\n bboxes = tf.concat([x1, y1, x2, y2], axis=1)\n\n return bboxes", "def distorted_bounding_box_crop(image,\n labels,\n bboxes,\n min_object_covered=0.5,\n aspect_ratio_range=(0.9, 1.1),\n area_range=(0.2, 1.0),\n max_attempts=200,\n clip_bboxes=True,\n scope=None):\n with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bboxes]):\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=tf.expand_dims(bboxes, 0),\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n distort_bbox = distort_bbox[0, 0]\n\n # Crop the image to the specified bounding box.\n cropped_image = tf.slice(image, bbox_begin, bbox_size)\n # Restore the shape since the dynamic slice loses 3rd dimension.\n cropped_image.set_shape([None, None, 3])\n\n # Update bounding boxes: resize and filter out.\n bboxes = tfe.bboxes_resize(distort_bbox, bboxes)\n labels, bboxes = tfe.bboxes_filter_overlap(labels, bboxes,\n threshold=BBOX_CROP_OVERLAP,\n assign_negative=False)\n return cropped_image, labels, bboxes, distort_bbox", "def randomcorners():\n r = lambda x: random.randint(int(x*0.4), int(x*0.6))\n cx = r(gs.DEFAULTS['width'])\n cy = r(gs.DEFAULTS['height'])\n\n w = int(gs.DEFAULTS['width'] * random.random() * 0.2)\n h = int(gs.DEFAULTS['height'] * random.random() * 0.2)\n\n rcrns = [(cx-w, cy-h), (cx+w, cy-h), (cx+w, cy+h), (cx-w, cy+h)]\n random.shuffle(rcrns)\n\n return rcrns", "def _decode_and_random_crop(image_buffer, bbox, image_size):\n with tf.name_scope('distorted_bounding_box_crop',\n values=[image_buffer, bbox]):\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n tf.image.extract_jpeg_shape(image_buffer),\n bounding_boxes=bbox,\n min_object_covered=0.1,\n aspect_ratio_range=[0.75, 1.33],\n area_range=[0.08, 1.0],\n max_attempts=10,\n use_image_if_no_bounding_boxes=True)\n bbox_begin, bbox_size, _ = sample_distorted_bounding_box\n\n # Crop the image to the specified bounding box.\n offset_y, offset_x, _ = tf.unstack(bbox_begin)\n target_height, target_width, _ = tf.unstack(bbox_size)\n crop_window = tf.stack([offset_y, offset_x, target_height, target_width])\n image = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=3)\n image = tf.image.convert_image_dtype(\n image, dtype=tf.float32)\n\n image = tf.image.resize_bicubic([image],\n [image_size, image_size])[0]\n\n return image", "def custom_cutout(min_box=None, max_box=None):\n def _inner(img):\n w, h = img.size\n\n # find left, upper, right, lower\n box_sz = np.random.randint(min_box, max_box + 1)\n half_box_sz = int(np.floor(box_sz / 2.))\n x_c = np.random.randint(half_box_sz, w - half_box_sz)\n y_c = np.random.randint(half_box_sz, h - half_box_sz)\n box = (\n x_c - half_box_sz, \n y_c - half_box_sz, \n x_c + half_box_sz,\n y_c + half_box_sz\n )\n img.paste(0, box=box)\n return img\n\n return _inner", "def resize_bboxes(ratios, bboxes):\r\n ymin = bboxes[..., 0] * ratios[0]\r\n xmin = bboxes[..., 1] * ratios[1]\r\n ymax = bboxes[..., 2] * ratios[0]\r\n xmax = bboxes[..., 3] * ratios[1]\r\n bboxes = np.stack([ymin, xmin, ymax, xmax], axis=-1)\r\n return bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def crop(self, xdiv, ydiv, img, bBoxes=None):\n xstride = img.shape[1] // xdiv\n ystride = img.shape[0] // ydiv\n\n widthLimits = np.zeros((xdiv+1,), dtype=np.int32)\n heightLimits = np.zeros((ydiv+1), dtype=np.int32)\n croppedImages = [[] for _ in range(xdiv*ydiv)]\n croppedBoxes = [[] for _ in range(xdiv*ydiv)]\n index = 0\n for x in range(0, img.shape[1]+1, xstride):\n widthLimits[index] = x\n index += 1\n index = 0\n for y in range(0, img.shape[0]+1, ystride):\n heightLimits[index] = y\n index+=1\n index = 0\n for i in range(len(widthLimits)-1):\n for j in range(len(heightLimits)-1):\n croppedImages[index] = img[heightLimits[j]:heightLimits[j+1], widthLimits[i]:widthLimits[i+1]]\n index += 1\n if bBoxes:\n for box in bBoxes:\n index = 0\n for i in range(len(widthLimits)-1):\n for j in range(len(heightLimits)-1):\n if box[0] >= widthLimits[i] and box[2] < widthLimits[i+1] \\\n and box[1] >= heightLimits[j] and box[3] < heightLimits[j+1]:\n box[0] -= widthLimits[i]\n box[2] -= widthLimits[i]\n box[1] -= heightLimits[j]\n box[3] -= heightLimits[j]\n croppedBoxes[index].append(box)\n index += 1\n return croppedImages, croppedBoxes", "def crop_random(crop_size_x, crop_size_y, image, corrupted_im=None):\r\n h, w = image.shape\r\n limit_x, limit_y = h - crop_size_x, w - crop_size_y\r\n start_x = random.randint(0, limit_x)\r\n start_y = random.randint(0, limit_y)\r\n cropped_im = image[start_x: start_x + crop_size_x, start_y: start_y + crop_size_y]\r\n if corrupted_im is not None:\r\n corrupted_im = corrupted_im[start_x: start_x + crop_size_x, start_y: start_y + crop_size_y]\r\n return cropped_im, corrupted_im", "def random_flip(img, bbox):\n _, H, W = img.shape\n bbox = bbox.copy()\n y_flip = random.choice([True, False])\n x_flip = random.choice([True, False])\n if y_flip:\n y_max = H - bbox[:, 0]\n y_min = H - bbox[:, 2]\n bbox[:, 0] = y_min\n bbox[:, 2] = y_max\n img = img[:, ::-1, :]\n if x_flip:\n x_max = W - bbox[:, 1]\n x_min = W - bbox[:, 3]\n bbox[:, 1] = x_min\n bbox[:, 3] = x_max\n img = img[:, :, ::-1]\n\n return img, bbox", "def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"", "def bboxes_clip(bbox_ref, bboxes):\n bboxes = np.copy(bboxes)\n bboxes = np.transpose(bboxes)\n bbox_ref = np.transpose(bbox_ref)\n bboxes[0] = np.maximum(bboxes[0], bbox_ref[0])\n bboxes[1] = np.maximum(bboxes[1], bbox_ref[1])\n bboxes[2] = np.minimum(bboxes[2], bbox_ref[2])\n bboxes[3] = np.minimum(bboxes[3], bbox_ref[3])\n bboxes = np.transpose(bboxes)\n return bboxes", "def crop_img(img, random_tab):\n dy, dx = (i / 6 for i in img.shape[:2])\n x1 = int(random_tab[0] * dx)\n x2 = int((random_tab[1] + 5) * dx)\n y1 = int(random_tab[2] * dy)\n y2 = int((random_tab[1] + 5) * dy)\n img = img[y1:y2, x1:x2]\n return img", "def crop(self, padding, random=True):\n self.get_roi(padding=padding, random=random)\n self.bgr = self.camera_model.crop_resize_image(self.bgr)\n self.depth = self.camera_model.crop_resize_image(\n self.depth, interpolation=Image.NEAREST)", "def center_crop2fixed_cut(im, masks, mask, boxes, classes, target_width, target_height, min_size=2):\n\n h, w, c = im.shape\n if float(target_width) / w > float(target_height) / h:\n new_w, new_h = int(target_width), int(float(target_width) / w * h)\n else:\n new_w, new_h = int(float(target_height) / h * w), int(target_height)\n\n scale = float(new_w) / w\n offset_w, offset_h = 0, 0\n if new_w - target_width + 1 > 0 and new_h - target_height + 1 > 0:\n offset_w = np.random.randint(0, new_w - target_width + 1)\n offset_h = np.random.randint(0, new_h - target_height + 1)\n # offset_w = int((new_w - target_width) / 2)\n # offset_h = int((new_h - target_height) / 2)\n\n im = cv2.resize(im, (new_w, new_h))\n mask = cv2.resize(mask, (new_w, new_h), interpolation=cv2.INTER_NEAREST)\n im = im[offset_h: (offset_h + target_height), offset_w: (offset_w + target_width)]\n mask = mask[offset_h: (offset_h + target_height), offset_w: (offset_w + target_width)]\n\n flip = np.random.uniform() > 0.5\n if flip:\n im = cv2.flip(im, 1)\n mask = cv2.flip(mask, 1)\n\n if masks.size > 0:\n masks = np.transpose(masks, (1, 2, 0)) # to (h, w, n)\n masks = cv2.resize(masks, (new_w, new_h), interpolation=cv2.INTER_NEAREST)\n masks = masks[offset_h: (offset_h + target_height), offset_w: (offset_w + target_width)]\n if flip:\n masks = cv2.flip(masks, 1)\n try:\n if masks.ndim > 2:\n masks = np.transpose(masks, (2, 0, 1)) # to (n, h, w)\n else:\n masks = masks.reshape((1, target_height, target_width))\n except ValueError:\n print (masks.ndim, masks.shape)\n raise\n else:\n masks = np.zeros((0, target_height, target_width), masks.dtype)\n\n # bboxes\n boxes = _offset_boxes(boxes, [target_height, target_width], scale, [offset_w, offset_h], flip)\n # boxes *= scale\n # boxes = clip_boxes(boxes, [target_height, target_width])\n # if flip:\n # boxes_x = np.copy(boxes[:, 0])\n # boxes[:, 0] = target_width - boxes[:, 2]\n # boxes[:, 2] = target_width - boxes_x\n\n boxes, classes, masks = _filter_invalid_boxes(boxes, classes, masks, min_size=min_size)\n\n return im, masks, mask, boxes, classes", "def distorted_bounding_box_crop(image,\n bboxes,\n labels,\n min_object_covered=0.05,\n aspect_ratio_range=(0.8, 1.2),\n area_range=(0.1, 1.0),\n max_attempts=200,\n scope=None):\n bboxes = tf.clip_by_value(bboxes, 0.0, 1.0)\n\n with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bboxes]):\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=tf.expand_dims(bboxes, 0),\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n distort_bbox = distort_bbox[0, 0]\n\n # Crop the image to the specified bounding box.\n cropped_image = tf.slice(image, bbox_begin, bbox_size)\n # Restore the shape since the dynamic slice loses 3rd dimension.\n cropped_image.set_shape([None, None, 3])\n\n # Update bounding boxes: resize and filter out.\n cropped_bboxes = bboxes_resize(distort_bbox, bboxes)\n cropped_labels, cropped_bboxes = bboxes_filter_overlap(labels, cropped_bboxes)\n \n no_box = tf.equal(tf.shape(cropped_bboxes)[0], 0) # If there is no box in the image, it returns the original image.\n image, bboxes, labels = tf.cond(no_box, lambda:(image, bboxes, labels), lambda:(cropped_image, cropped_bboxes, cropped_labels))\n\n return image, bboxes, labels", "def _initial_normal_bbox(self):\n cx = cy = .5\n width = FLAGS.bbox_grid / float(FLAGS.cropbox_grid)\n x1 = cx - width / 2\n x2 = cx + width / 2\n y1 = cy - width / 2\n y2 = cy + width / 2\n return [y1,x1,y2,x2]", "def random_crop(image, steering = 0.0, tx_lower = -20, tx_upper = 20, ty_lower = -2, ty_upper = 2, rand = True):\n\n shape = image.shape\n (col_start, col_end) = (abs(tx_lower), shape[1] - tx_upper)\n horizon = 60\n bonnet = 136\n if rand:\n tx = np.random.randint(tx_lower, tx_upper + 1)\n ty = np.random.randint(ty_lower, ty_upper + 1)\n else:\n (tx, ty) = (0, 0)\n\n crop = image[horizon + ty: bonnet + ty, col_start + tx: col_end + tx, :]\n image = cv2.resize(crop, (320, 160), cv2.INTER_AREA)\n # the steering variable needs to be updated to counteract the shift \n if tx_lower != tx_upper:\n dsteering = -tx / (tx_upper - tx_lower) / 3.0\n else:\n dsteering = 0\n steering += dsteering\n\n return image, steering", "def _resize_bboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('bbox_fields', []):\n bboxes = []\n for box in results[key]:\n tmp_box = np.array(box, dtype=np.float32)\n tmp_box[0::2] *= results['scale_factor'][0]\n tmp_box[1::2] *= results['scale_factor'][1]\n if self.bbox_clip_border:\n tmp_box[0::2] = np.clip(tmp_box[0::2], 0, img_shape[1])\n tmp_box[1::2] = np.clip(tmp_box[1::2], 0, img_shape[0])\n bboxes.append(tmp_box)\n if len(results[key]) > 0:\n results[key] = bboxes", "def random_rotate_and_crop(fs_im, tg_mask, fs_bbox, theta_range, scale_factor_range):\n assert (fs_im.width == tg_mask.shape[1] and fs_im.height == tg_mask.shape[0])\n\n # STEP 1: Rotate fs_im and tg_mask.\n theta = np.random.uniform(*theta_range)\n\n # Rotate fs_im : tfs_im.\n tfs_im = fs_im.rotate(theta, expand=True) # tilted full scene\n\n # Create tfs_mask by rotating an all-ones array with expand=True.\n tfs_mask = np.ones((fs_im.height, fs_im.width), dtype=np.uint8)\n tfs_mask = Image.fromarray(tfs_mask).rotate(theta, expand=True)\n tfs_mask = np.array(tfs_mask)\n\n # Rotate tg_mask.\n tfs_tg_mask = Image.fromarray(tg_mask).rotate(theta, expand=True)\n tfs_tg_mask = np.array(tfs_tg_mask)\n\n # STEP 2: Compute tfs_bbox in tilted_fs_im.\n x_inds, y_inds = tfs_tg_mask.max(axis=0).nonzero()[0], tfs_tg_mask.max(axis=1).nonzero()[0]\n # While converting int coords to float coords, add 1 to higher coord.\n x1, x2, y1, y2 = float(x_inds.min()), float(x_inds.max()) + 1., float(y_inds.min()), float(y_inds.max()) + 1.\n tfs_bbox = (x1, y1, x2, y2)\n\n # STEP 3: Randomly crop scene around aa_bbox.\n cs_im, cs_mask, cs_bbox = random_crop_bbox(tfs_im, tfs_mask, tfs_bbox, scale_factor_range)\n\n # STEP 4: Compute rotation matrix that maps (u, v) wrt fs_bbox to (u, v) wrt cs_bbox in crop_scene.\n\n # First compute rotation matrix that maps (u, v) wrt fs_bbox (abb. as fsbb) to (x, y) wrt fs_im.\n # rmat_fsbb_uv_fsim_xy: (u, v) -> (u * fsbb_w + fsbb_x1, v * fsbb_h + fsbb_y1).\n fsbb_x1, fsbb_y1, fsbb_x2, fsbb_y2 = fs_bbox\n fsbb_w, fsbb_h = fsbb_x2 - fsbb_x1, fsbb_y2 - fsbb_y1\n rmat_fsbb_uv_fsim_xy = np.array([[fsbb_w, 0., fsbb_x1],\n [0., fsbb_h, fsbb_y1],\n [0., 0., 1. ]])\n\n # Then compute rotation matrix that maps (x, y) wrt fs_im to (u, v) wrt tilted_fs_im.\n # The idea is that the midpoint of fs_im corresponds to the midpoint of tilted_fs_im.\n # Step 1: Center about center of fs_im: (x, y) -> (x - fs_xmid, y - fs_ymid).\n # Step 2: Rotate by an angle theta.\n # Step 3: Add center of tfs_im: (x, y) -> ((x + tfs_xmid) / tfs_w, (y + tfs_ymid) / tfs_h).\n fs_xmid, fs_ymid = fs_im.width / 2, fs_im.height / 2\n tfs_xmid, tfs_ymid = tfs_im.width / 2, tfs_im.height / 2\n rmat_step1 = np.array([[1., 0., -fs_xmid],\n [0., 1., -fs_ymid],\n [0., 0., 1. ]])\n angle = np.deg2rad(-theta) # negative since y axis in image coordinates is inverted\n rmat_step2 = np.array([[np.cos(angle), -np.sin(angle), 0.],\n [np.sin(angle), np.cos(angle), 0.],\n [0., 0., 1.]])\n rmat_step3 = np.array([[1., 0., tfs_xmid],\n [0., 1., tfs_ymid],\n [0., 0., 1.]])\n rmat_step3[0] /= tfs_im.width; rmat_step3[1] /= tfs_im.height\n rmat_fsim_xy_tfsim_uv = rmat_step3 @ rmat_step2 @ rmat_step1 # multiply all matrices\n\n # Finally compute rotation matrix that maps (u, v) wrt tfs_im to (u, v) wrt cs_bbox.\n # rmat_tfsim_uv_csbbox_uv: (u, v) -> ((u - cs_bbox_x1) / cs_bbox_w, (v - cs_bbox_y1) / cs_bbox_h).\n cb_x1, cb_y1, cb_x2, cb_y2 = cs_bbox\n cb_w, cb_h = cb_x2 - cb_x1, cb_y2 - cb_y1\n rmat_tfsim_uv_cb_uv = np.array([[1., 0., -cb_x1],\n [0., 1., -cb_y1],\n [0., 0., 1.]])\n rmat_tfsim_uv_cb_uv[0] /= cb_w; rmat_tfsim_uv_cb_uv[1] /= cb_h\n\n # Compute overall rotation matrix.\n cs_rmat = rmat_tfsim_uv_cb_uv @ rmat_fsim_xy_tfsim_uv @ rmat_fsbb_uv_fsim_xy\n cs_rmat = cs_rmat.astype(np.float32)\n\n return tfs_im, tfs_bbox, cs_im, cs_mask, cs_bbox, cs_rmat", "def crop(masks, boxes, padding: int = 1):\n h, w, n = masks.shape\n x1, x2 = sanitize_coordinates(boxes[:, 0:1:1], boxes[:, 2:3:1], w, padding, cast=False)\n y1, y2 = sanitize_coordinates(boxes[:, 1:2:1], boxes[:, 3:4:1], h, padding, cast=False)\n\n cast = P.Cast()\n broadcast_to = P.BroadcastTo((h, w, n))\n row = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(1, -1, 1)))\n rows = cast(row, x1.dtype)\n col = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(-1, 1, 1)))\n cols = cast(col, x2.dtype)\n\n\n masks_left = rows >= x1.view(1, 1, -1)\n masks_right = rows < x2.view(1, 1, -1)\n masks_left = P.Cast()(masks_left, mindspore.float16)\n masks_right = P.Cast()(masks_right, mindspore.float16)\n crop_mask = masks_left * masks_right\n masks_up = cols >= y1.view(1, 1, -1)\n masks_up = P.Cast()(masks_up, mindspore.float16)\n crop_mask *= masks_up\n masks_down = cols < y2.view(1, 1, -1)\n masks_down = P.Cast()(masks_down, mindspore.float16)\n crop_mask *= masks_down\n\n return masks * crop_mask", "def random_crop(img, target_shape):\n rest = imgproc._get_crop2d_rest(img, target_shape)\n start = _rand_2dshape(rest)\n return imgproc._crop2d(img, start, target_shape)", "def random_crop_generator(batches, crop_length):\n while True:\n batch_x, batch_y = next(batches)\n batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))\n for i in range(batch_x.shape[0]):\n batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))\n yield (batch_crops, batch_y)", "def generate_random_bbox(n, img_size, min_length, max_length):\n H, W = img_size\n y_min = np.random.uniform(0, H - max_length, size=(n,))\n x_min = np.random.uniform(0, W - max_length, size=(n,))\n y_max = y_min + np.random.uniform(min_length, max_length, size=(n,))\n x_max = x_min + np.random.uniform(min_length, max_length, size=(n,))\n bbox = np.stack((y_min, x_min, y_max, x_max), axis=1).astype(np.float32)\n return bbox", "def distorted_bounding_box_crop(image,\n labels,\n bboxes,\n min_object_covered=0.05,\n aspect_ratio_range=(0.9, 1.1),\n area_range=(0.1, 1.0),\n max_attempts=200,\n scope=None):\n with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bboxes]):\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n bboxes = tf.minimum(bboxes, 1.0)\n bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=tf.expand_dims(bboxes, 0),\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n distort_bbox)\n \n #tf_image.tf_summary_image(dst_image, bboxes, 'images_with_bounding_box')\n tf.summary.image('images_with_bounding_box', image_with_box)\n\n distort_bbox = distort_bbox[0, 0]\n\n # Crop the image to the specified bounding box.\n cropped_image = tf.slice(image, bbox_begin, bbox_size)\n cropped_image.set_shape([None, None, 3])\n # Update bounding boxes: resize and filter out.\n bboxes = tfe.bboxes_resize(distort_bbox, bboxes)\n labels, bboxes, num = tfe.bboxes_filter_overlap(labels, bboxes,\n BBOX_CROP_OVERLAP)\n return cropped_image, labels, bboxes, distort_bbox,num", "def cube_plus_crop(image, model_input_image_size, seed=None):\n vol_size = image.get_shape().as_list()\n # crop_locations = [1050, 2050]\n\n # Concat volume and label into a single volume for cropping\n comb_size = image.get_shape().as_list()\n crop_size = [comb_size[0]] + model_input_image_size + [comb_size[-1]]\n crop_size = [comb_size[0]] + model_input_image_size + [comb_size[-1]]\n with ops.name_scope(\n 'color_crop', 'random_crop', [image, crop_size]) as name:\n combined_volume = ops.convert_to_tensor(image, name='value')\n crop_size = ops.convert_to_tensor(\n crop_size, dtype=dtypes.int32, name='size')\n vol_shape = array_ops.shape(combined_volume)\n control_flow_ops.Assert(\n math_ops.reduce_all(vol_shape >= crop_size),\n ['Need vol_shape >= vol_size, got ', vol_shape, crop_size],\n summarize=1000)\n limit = vol_shape - crop_size + 1\n offset = tf.random_uniform(\n array_ops.shape(vol_shape),\n dtype=crop_size.dtype,\n maxval=crop_size.dtype.max,\n seed=seed) % limit\n # offset_2 = tf.random_uniform(\n # array_ops.shape(vol_shape),\n # dtype=crop_size.dtype,\n # maxval=crop_size.dtype.max,\n # seed=seed) % limit\n\n cropped_combined = array_ops.slice(\n combined_volume, offset, crop_size, name=name)\n cropped_volume = cropped_combined[:, :, :, :vol_size[-1]]\n cropped_label = cropped_combined[:, :, :, vol_size[-1]:]\n return cropped_volume, cropped_label", "def crop(self, bbox):\n return self.__pyramid[0].crop(bbox)", "def crop_inference_bbox(image, boxes, file_name=\"cropped_inference_result\"):\n # create output folder if not present\n create_dir(\"output/\")\n # crop detections\n if len(boxes) > 0:\n for ind in range(len(boxes)):\n cropped_img = image[\n int(boxes[ind][0][1]) : int(boxes[ind][1][1]),\n int(boxes[ind][0][0]) : int(boxes[ind][1][0]),\n :,\n ]\n save_path = os.path.join(\"output/\", file_name + \"_\" + str(ind) + \".png\")\n cv2.imwrite(save_path, cv2.cvtColor(cropped_img, cv2.COLOR_RGB2BGR))", "def crop_square(image, size):\n width, height = image.size\n top = random.randint(0, max(0, height-size))\n left = random.randint(0, max(0, width-size))\n bottom = min(top + size, height)\n right = min(left + size, width)\n\n return image.crop((left, top, right, bottom))", "def rectify_bbox(bboxes, max_shape): \n bboxes = np.array(bboxes, np.int32)\n n = bboxes.shape[0]\n if n == 0:\n return bboxes\n\n h, w = max_shape\n\n bboxes[:, 0] = np.maximum(bboxes[:, 0], np.zeros((n)))\n bboxes[:, 0] = np.minimum(bboxes[:, 0], (h-1) * np.ones((n)))\n bboxes[:, 1] = np.maximum(bboxes[:, 1], np.zeros((n)))\n bboxes[:, 1] = np.minimum(bboxes[:, 1], (w-1) * np.ones((n)))\n bboxes[:, 2] = np.maximum(bboxes[:, 2], np.ones((n)))\n bboxes[:, 2] = np.minimum(bboxes[:, 2], h * np.ones((n)) - bboxes[:, 0])\n bboxes[:, 3] = np.maximum(bboxes[:, 3], np.ones((n)))\n bboxes[:, 3] = np.minimum(bboxes[:, 3], w * np.ones((n)) - bboxes[:, 1])\n\n return bboxes", "def crop_generator(batches, crop_length):\n while True:\n batch_x, batch_y = next(batches)\n batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))\n for i in range(batch_x.shape[0]):\n batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))\n yield (batch_crops, batch_y)", "def bbox2mask(self, shape, margin, bbox_shape, times):\r\n bboxs = []\r\n for i in range(times):\r\n bbox = self.random_bbox(shape, margin, bbox_shape)\r\n bboxs.append(bbox)\r\n height = shape\r\n width = shape\r\n mask = np.zeros((height, width), np.float32)\r\n for bbox in bboxs:\r\n h = int(bbox[2] * 0.1) + np.random.randint(int(bbox[2] * 0.2 + 1))\r\n w = int(bbox[3] * 0.1) + np.random.randint(int(bbox[3] * 0.2) + 1)\r\n mask[(bbox[0] + h) : (bbox[0] + bbox[2] - h), (bbox[1] + w) : (bbox[1] + bbox[3] - w)] = 1.\r\n return mask.reshape((1, ) + mask.shape).astype(np.float32)", "def bbox2mask(self, shape, margin, bbox_shape, times):\r\n bboxs = []\r\n for i in range(times):\r\n bbox = self.random_bbox(shape, margin, bbox_shape)\r\n bboxs.append(bbox)\r\n height = shape\r\n width = shape\r\n mask = np.zeros((height, width), np.float32)\r\n for bbox in bboxs:\r\n h = int(bbox[2] * 0.1) + np.random.randint(int(bbox[2] * 0.2 + 1))\r\n w = int(bbox[3] * 0.1) + np.random.randint(int(bbox[3] * 0.2) + 1)\r\n mask[(bbox[0] + h) : (bbox[0] + bbox[2] - h), (bbox[1] + w) : (bbox[1] + bbox[3] - w)] = 1.\r\n return mask.reshape((1, ) + mask.shape).astype(np.float32)", "def cropNwriteBBs(img, digitCnts,writeToFile=False,folderName='outcome',cropW=20,cropH=30):\r\n d = 0\r\n for contour in digitCnts:\r\n # https://stackoverflow.com/questions/50331025/how-to-crop-a-bounding-box-out-of-an-image\r\n (x, y, w, h) = cv2.boundingRect(contour)\r\n\r\n ext_left = tuple(contour[contour[:, :, 0].argmin()][0])\r\n ext_right = tuple(contour[contour[:, :, 0].argmax()][0])\r\n ext_top = tuple(contour[contour[:, :, 1].argmin()][0])\r\n ext_bot = tuple(contour[contour[:, :, 1].argmax()][0])\r\n\r\n img1 = img.copy()\r\n img2 = img.copy()\r\n cv2.rectangle(img1, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n\r\n cropped_image = img2[ext_top[1]:ext_bot[1], ext_left[0]:ext_right[0]]\r\n cropped_image = cv2.resize(cropped_image, (cropW, cropH))\r\n filename = os.path.join(os.path.join(BASE_PATH,folderName),'output_%d'%d+'.jpg')\r\n d += 1\r\n if writeToFile:\r\n cv2.imwrite(filename, cropped_image)\r\n cv2.waitKey(0)\r\n yield cropped_image,(x,y,w,h),contour", "def random_scale(im, inst_masks, mask, boxes, classes, scale):\n # scale = np.random.uniform(down, upper)\n h, w, c = im.shape\n if scale > 1:\n \"\"\"\"\"\"\n max_offx = (scale - 1.) * w\n max_offy = (scale - 1.) * h\n offx = int(np.random.uniform() * max_offx)\n offy = int(np.random.uniform() * max_offy)\n im = cv2.resize(im, (0, 0), fx=scale, fy=scale)\n mask = cv2.resize(mask, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n im = im[offy: (offy + h), offx: (offx + w)]\n mask = mask[offy: (offy + h), offx: (offx + w)]\n if inst_masks.size > 0:\n inst_masks = np.transpose(inst_masks, (1, 2, 0)) # to (h, w, n)\n inst_masks = cv2.resize(inst_masks, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n inst_masks = inst_masks[offy: (offy + h), offx: (offx + w)]\n try:\n if inst_masks.ndim > 2:\n inst_masks = np.transpose(inst_masks, (2, 0, 1)) # to (n, h, w)\n else:\n inst_masks = inst_masks.reshape((1, h, w))\n except ValueError:\n print (inst_masks.ndim, inst_masks.shape)\n raise\n else:\n inst_masks = np.zeros((0, h, w), inst_masks.dtype)\n else:\n \"\"\"\"\"\"\n canvas = np.zeros(im.shape, im.dtype) + np.array([103, 116, 123], im.dtype)\n canvas_mask = np.zeros(mask.shape, mask.dtype)\n max_offx = (scale - 1.) * w\n max_offy = (scale - 1.) * h\n offx = int(np.random.uniform() * max_offx)\n offy = int(np.random.uniform() * max_offy)\n im = cv2.resize(im, (0, 0), fx=scale, fy=scale)\n mask = cv2.resize(mask, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n h_, w_, _ = im.shape\n canvas[-offy: (-offy + h_), -offx: (-offx + w_)] = im\n canvas_mask[-offy: (-offy + h_), -offx: (-offx + w_)] = mask\n if inst_masks.size > 0:\n inst_masks = np.transpose(inst_masks, (1, 2, 0)) # to (h, w, n)\n canvas_instmask = np.zeros(inst_masks.shape, inst_masks.dtype)\n inst_masks = cv2.resize(inst_masks, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n if inst_masks.ndim == 2:\n inst_masks = inst_masks[:,:, np.newaxis]\n canvas_instmask[-offy: (-offy + h_), -offx: (-offx + w_)] = inst_masks\n canvas_instmask = np.transpose(canvas_instmask, (2, 0, 1)) # to (n, h, w)\n else:\n canvas_instmask = np.zeros((0, h, w), inst_masks.dtype)\n\n im, mask, inst_masks = canvas, canvas_mask, canvas_instmask\n\n boxes = _offset_boxes(boxes, im.shape, scale, [offx, offy], False)\n boxes, classes, inst_masks = _filter_invalid_boxes(boxes, classes, inst_masks, min_size=3)\n\n return im, inst_masks, mask, boxes, classes", "def test_cropping(self, scaffold_res=9):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n func = self._get_simple_implicit_function(scaffold_res=scaffold_res).to(device)\n\n assert scaffold_res >= 8\n div = (scaffold_res - 1) / 2\n true_min_point = torch.tensor(\n [-3 / div, 0 / div, -3 / div],\n device=device,\n )\n true_max_point = torch.tensor(\n [1 / div, 2 / div, 3 / div],\n device=device,\n )\n\n def new_scaffold(points):\n # 1 if between true_min and true_max point else 0\n # return points.new_ones((*points.shape[:-1], 1))\n return (\n torch.logical_and(true_min_point <= points, points <= true_max_point)\n .all(dim=-1)\n .float()[..., None]\n )\n\n called_crop = []\n\n def assert_min_max_points(min_point, max_point):\n called_crop.append(1)\n self.assertClose(min_point, true_min_point)\n self.assertClose(max_point, true_max_point)\n\n func.voxel_grid_density.crop_self = assert_min_max_points\n func.voxel_grid_color.crop_self = assert_min_max_points\n func.voxel_grid_scaffold.forward = new_scaffold\n func._scaffold_ready = True\n func._crop(epoch=0)\n assert len(called_crop) == 2", "def crop_and_resize(image, boxes, size):\n box_ind = keras.backend.zeros_like(boxes, tensorflow.int32)\n box_ind = box_ind[..., 0]\n box_ind = keras.backend.reshape(box_ind, [-1])\n\n boxes = keras.backend.reshape(boxes, [-1, 4])\n\n return tensorflow.image.crop_and_resize(image, boxes, box_ind, size)", "def crop_box_for_class(boxes, image_path, crop_path, object_class):\n if object_class == \"all\":\n return image_path\n class_prob = 0\n best_box = []\n for box in boxes:\n box_prob = float(box[1].strip('%')) / 100.0\n if box[0] == object_class:\n if box_prob > class_prob:\n class_prob = box_prob\n best_box = box\n cropped_image = crop_bounding_box_from_image(\n best_box, image_path, with_margin=True)\n width, height = cropped_image.size\n if (height > 140) and (width > 140):\n cropped_image_path = object_class + \"_\" + os.path.basename(image_path)\n cropped_image.save(os.path.join(crop_path, cropped_image_path))\n # image to show / no margin\n cropped_image_show = crop_bounding_box_from_image(\n best_box, image_path, with_margin=False)\n cropped_image_show_path = object_class + \\\n \"_show_\" + os.path.basename(image_path)\n cropped_image_show.save(os.path.join(\n crop_path, cropped_image_show_path))\n return os.path.join(crop_path, cropped_image_path)\n else:\n return image_path", "def create_crops(merged_boxes, hyperspectral_pool=None, rgb_pool=None, sensor=\"hyperspectral\", expand=0, hyperspectral_savedir=\".\"): \n crops = []\n labels = []\n box_index = []\n for index, row in merged_boxes.iterrows():\n #Crop and append\n box = row[\"geometry\"] \n plot_name = row[\"plotID\"] \n \n #get sensor data\n if sensor == \"rgb\":\n sensor_path = find_sensor_path(bounds=box.bounds, lookup_pool=rgb_pool, sensor=\"rgb\")\n elif sensor == \"hyperspectral\":\n rgb_path = find_sensor_path(bounds=box.bounds, lookup_pool=rgb_pool, sensor=\"rgb\")\n hyperspectral_h5_path = find_sensor_path(bounds=box.bounds, lookup_pool=hyperspectral_pool, sensor=\"hyperspectral\")\n sensor_path = convert_h5(hyperspectral_h5_path, rgb_path, savedir=hyperspectral_savedir)\n \n crop = crop_image(sensor_path=sensor_path, box=box, expand=expand)\n \n crops.append(crop)\n labels.append(row[\"taxonID\"])\n box_index.append(\"{}_{}\".format(plot_name,index))\n \n return crops, labels, box_index", "def randomize_first_box():\n random_x = random.randint(0, 3)\n random_y = random.randint(0, 3)\n return random_x, random_y", "def voc_rand_crop(feature, label, height, width):\n i, j, h, w = torchvision.transforms.RandomCrop.get_params(\n feature, output_size=(height, width))\n \n feature = torchvision.transforms.functional.crop(feature, i, j, h, w)\n label = torchvision.transforms.functional.crop(label, i, j, h, w) \n\n return feature, label", "def _resize_cbboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('cbbox_fields', []):\n cbboxes = []\n for cbox in results[key]:\n tmp_cbox = np.array(cbox, dtype=np.float32)\n new_tmp_cbox = []\n for ccbox in tmp_cbox:\n ccbox = np.array(ccbox, dtype=np.float32)\n ccbox[0::2] *= results['scale_factor'][0]\n ccbox[1::2] *= results['scale_factor'][1]\n new_tmp_cbox.append(ccbox)\n tmp_cbox = np.array(new_tmp_cbox, dtype=np.float32)\n if self.bbox_clip_border:\n tmp_cbox[:, 0::2] = np.clip(tmp_cbox[:, 0::2], 0, img_shape[1])\n tmp_cbox[:, 1::2] = np.clip(tmp_cbox[:, 1::2], 0, img_shape[0])\n cbboxes.append(tmp_cbox)\n results[key] = cbboxes", "def sample_rois(rois, gt_boxes, num_classes, rois_per_image, fg_rois_per_image, fg_overlap, bb8_variance, im_info, granularity):\n overlaps = bbox_overlaps(rois[:, 1:], gt_boxes[:, 1:5] * np.array([im_info[1], im_info[0], im_info[1], im_info[0]]))\n gt_assignment = overlaps.argmax(axis=1)\n cid_labels = gt_boxes[gt_assignment, 0]\n max_overlaps = overlaps.max(axis=1)\n if DEBUG:\n print(\"max_overlaps: {}\".format(max_overlaps))\n\n # select foreground RoI with FG_THRESH overlap\n fg_indexes = np.where(max_overlaps >= fg_overlap)[0]\n # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs\n fg_rois_this_image = min(fg_rois_per_image, len(fg_indexes))\n # sample foreground regions without replacement\n if len(fg_indexes) > fg_rois_this_image:\n fg_indexes = np.random.choice(fg_indexes, size=fg_rois_this_image, replace=False)\n\n # select background RoIs as those within [0, FG_THRESH)\n bg_indexes = np.where(max_overlaps < fg_overlap)[0]\n # compute number of background RoIs to take from this image (guarding against there being fewer than desired)\n bg_rois_this_image = rois_per_image - fg_rois_this_image\n bg_rois_this_image = min(bg_rois_this_image, len(bg_indexes))\n # sample bg rois without replacement\n if len(bg_indexes) > bg_rois_this_image:\n bg_indexes = np.random.choice(bg_indexes, size=bg_rois_this_image, replace=False)\n\n # indexes selected\n keep_indexes = np.append(fg_indexes, bg_indexes)\n # keep_indexes = fg_indexes\n\n # pad more bg rois to ensure a fixed minibatch size\n while len(keep_indexes) < rois_per_image:\n gap = min(len(bg_indexes), rois_per_image - len(keep_indexes))\n gap_indexes = np.random.choice(range(len(bg_indexes)), size=gap, replace=False)\n keep_indexes = np.append(keep_indexes, bg_indexes[gap_indexes])\n\n if DEBUG:\n print(\"fg_indexes length: {}\".format(len(fg_indexes)))\n print(\"keep_indexes length: {}\".format(len(keep_indexes)))\n # sample rois and labels\n rois = rois[keep_indexes]\n # cid_labels = cid_labels[keep_indexes]\n # # set labels of bg rois to be 0\n # cid_labels[fg_rois_this_image:] = 0\n\n # load or compute bbox_target\n # targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4], box_stds=box_stds)\n FGA_cls_targets, FGA_reg_targets, FGA_reg_weights = \\\n bb8_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], 8:24],\n bb8_variance=bb8_variance, granularity=granularity, im_info=im_info)\n\n for i in range(fg_rois_this_image, rois_per_image):\n FGA_cls_targets[i] = -1\n FGA_reg_weights[i] = 0\n\n if DEBUG:\n print(\"FGA_cls_targets: {}\".format(FGA_cls_targets[-1]))\n print(\"FGA_reg_targets: {}\".format(FGA_reg_targets[-1]))\n print(\"FGA_reg_weights: {}\".format(FGA_reg_weights[-1]))\n\n return rois, FGA_cls_targets, FGA_reg_targets, FGA_reg_weights", "def clip_boxes(boxes, im_shape):\n if not hasattr(boxes, 'data'):\n boxes_ = boxes.numpy()\n\n boxes = boxes.view(boxes.size(0), -1, 4)\n boxes = torch.stack([\n boxes[:, :, 0].clamp(0, im_shape[1] - 1),\n boxes[:, :, 1].clamp(0, im_shape[0] - 1),\n boxes[:, :, 2].clamp(0, im_shape[1] - 1),\n boxes[:, :, 3].clamp(0, im_shape[0] - 1)\n ], 2).view(boxes.size(0), -1)\n\n return boxes", "def _resize_bboxes(self, ori_bboxes, scale_factor):\n bboxes = ori_bboxes * scale_factor\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, self.img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, self.img_shape[0])\n return bboxes", "def clip_boxes(boxes, im_shape):\n boxes[:, 0::2]=threshold(boxes[:, 0::2], 0, im_shape[1]-1)\n boxes[:, 1::2]=threshold(boxes[:, 1::2], 0, im_shape[0]-1)\n return boxes", "def clip_boxes(boxes, im_shape):\n boxes[:, 0::2]=threshold(boxes[:, 0::2], 0, im_shape[1]-1)\n boxes[:, 1::2]=threshold(boxes[:, 1::2], 0, im_shape[0]-1)\n return boxes", "def _images_and_boxes_preprocessing_cv2(self, imgs, boxes, gt_boxes=None, min_scale=None, crop_size=None, n_imgs=0):\n\n height, width, _ = imgs[0].shape\n\n # boxes[:, [0, 2]] *= width\n # boxes[:, [1, 3]] *= height\n boxes = cv2_transform.clip_boxes_to_image(boxes, height, width)\n\n # `transform.py` is list of np.array. However, for AVA, we only have\n # one np.array.\n boxes = [boxes]\n\n crop_size = crop_size if self.multigrid_enabled and crop_size is not None else self._crop_size\n \n if self._split != 'train':\n assert gt_boxes is not None\n gt_boxes = cv2_transform.clip_boxes_to_image(gt_boxes, height, width)\n gt_boxes = [gt_boxes]\n\n # The image now is in HWC, BGR format.\n if self._split == \"train\": # \"train\"\n imgs, boxes = cv2_transform.random_short_side_scale_jitter_list(\n imgs,\n min_size=self._jitter_min_scale if not self.multigrid_enabled and min_scale is None else min_scale,\n max_size=self._jitter_max_scale,\n boxes=boxes,\n )\n imgs, boxes = cv2_transform.random_crop_list(\n imgs, crop_size, order=\"HWC\", boxes=boxes, n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n if self.random_horizontal_flip:\n # random flip\n # if self.cfg.MODEL.USE_SPA_CONF and len(imgs[n_imgs].shape) != 3:\n # for i in range(n_imgs, len(imgs) + 1):\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n imgs, boxes = cv2_transform.horizontal_flip_list(\n 0.5, imgs, order=\"HWC\", boxes=boxes, \n n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n # elif self._split == \"val\":\n # # Short side to test_scale. Non-local and STRG uses 256.\n # imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\n # boxes, gt_boxes = cv2_transform.scale_boxes(\n # self._crop_size, boxes[0], height, width, gt_boxes=gt_boxes[0]\n # )\n # boxes, gt_boxes = [boxes], [gt_boxes]\n # imgs, boxes, gt_boxes = cv2_transform.spatial_shift_crop_list(\n # self._crop_size, imgs, 1, boxes=boxes, gt_boxes=gt_boxes\n # )\n\n # if self._test_force_flip:\n # imgs, boxes = cv2_transform.horizontal_flip_list(\n # 1, imgs, order=\"HWC\", boxes=boxes, gt_boxes=gt_boxes\n # )\n elif self._split == \"val\" or self._split == \"test\":\n # Short side to test_scale. Non-local and STRG uses 256.\n imgs = [cv2_transform.scale(crop_size, img) for img in imgs]\n boxes, gt_boxes = cv2_transform.scale_boxes(\n crop_size, boxes[0], height, width, gt_boxes=gt_boxes[0]\n )\n boxes, gt_boxes = [boxes], [gt_boxes]\n\n if self._test_force_flip:\n # if self.cfg.MODEL.USE_SPA_CONF and len(imgs[n_imgs].shape) != 3:\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n # imgs[n_imgs:] = [np.expand_dims(img, axis=-1) for img in imgs[n_imgs:]]\n imgs, boxes = cv2_transform.horizontal_flip_list(\n 1, imgs, order=\"HWC\", boxes=boxes, gt_boxes=gt_boxes,\n n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n else:\n raise NotImplementedError(\n \"Unsupported split mode {}\".format(self._split)\n )\n\n # Convert image to CHW keeping BGR order.\n if self.cfg.MODEL.USE_SPA_CONF:\n try:\n if len(imgs[n_imgs].shape) == 2:\n imgs[n_imgs:] = [np.expand_dims(img, axis=-1) for img in imgs[n_imgs:]]\n elif len(imgs[n_imgs].shape) > 3:\n imgs[n_imgs:] = [np.expand_dims(img.squeeze(), axis=-1) for img in imgs[n_imgs:]]\n except:\n import pdb; pdb.set_trace()\n \n # for i in range(n_imgs, len(imgs) + 1):\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n # try:\n imgs = [cv2_transform.HWC2CHW(img) for img in imgs]\n # except:\n # print('imgs[n_imgs].shape:', imgs[n_imgs].shape)\n # print('len(imgs):', len(imgs))\n # print('n_imgs:', n_imgs)\n # import pdb; pdb.set_trace()\n\n # Image [0, 255] -> [0, 1].\n if self.cfg.MODEL.USE_SPA_CONF:\n imgs[:n_imgs] = [img / 255.0 for img in imgs[:n_imgs]]\n else: \n imgs = [img / 255.0 for img in imgs]\n\n if self.cfg.MODEL.USE_SPA_CONF:\n imgs[:n_imgs] = [\n np.ascontiguousarray(\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs[:n_imgs]\n ]\n imgs[n_imgs:] = [\n np.ascontiguousarray(\n img.reshape((1, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs[n_imgs:]\n ]\n else:\n imgs = [\n np.ascontiguousarray(\n # img.reshape((3, self._crop_size, self._crop_size))\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs\n ]\n\n # Do color augmentation (after divided by 255.0).\n if self.cfg.MODEL.USE_SPA_CONF:\n skeleton_imgs = imgs[n_imgs:]\n imgs = imgs[:n_imgs]\n if self._split == \"train\" and self._use_color_augmentation: # False\n if not self._pca_jitter_only:\n imgs = cv2_transform.color_jitter_list(\n imgs,\n img_brightness=0.4,\n img_contrast=0.4,\n img_saturation=0.4,\n )\n\n imgs = cv2_transform.lighting_list(\n imgs,\n alphastd=0.1,\n eigval=np.array(self._pca_eigval).astype(np.float32),\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\n )\n\n # Normalize images by mean and std.\n imgs = [\n cv2_transform.color_normalization(\n img,\n np.array(self._data_mean, dtype=np.float32),\n np.array(self._data_std, dtype=np.float32),\n )\n for img in imgs\n ]\n\n # Concat list of images to single ndarray.\n imgs = np.concatenate(\n [np.expand_dims(img, axis=1) for img in imgs], axis=1\n )\n\n if not self._use_bgr:\n # Convert image format from BGR to RGB.\n imgs = imgs[::-1, ...]\n\n imgs = np.ascontiguousarray(imgs)\n imgs = torch.from_numpy(imgs)\n \n if self.cfg.MODEL.USE_SPA_CONF:\n skeleton_imgs = np.concatenate(\n [np.expand_dims(img, axis=1) for img in skeleton_imgs], axis=1\n )\n skeleton_imgs = np.ascontiguousarray(skeleton_imgs)\n skeleton_imgs = torch.from_numpy(skeleton_imgs)\n\n boxes = cv2_transform.clip_boxes_to_image(\n boxes[0], imgs[0].shape[1], imgs[0].shape[2]\n )\n if gt_boxes is not None:\n gt_boxes = cv2_transform.clip_boxes_to_image(\n gt_boxes[0], imgs[0].shape[1], imgs[0].shape[2]\n )\n if self.cfg.MODEL.USE_SPA_CONF:\n return (imgs, skeleton_imgs, boxes) if gt_boxes is None else (imgs, skeleton_imgs, boxes, gt_boxes)\n else:\n return (imgs, boxes) if gt_boxes is None else (imgs, boxes, gt_boxes)", "def clip_boxes(boxes, im_shape):\n\n if not hasattr(boxes, 'data'):\n boxes_ = boxes.numpy()\n\n boxes = boxes.view(boxes.size(0), -1, 4)\n boxes = torch.stack( \\\n [boxes[:, :, 0].clamp(0, im_shape[1] - 1),\n boxes[:, :, 1].clamp(0, im_shape[0] - 1),\n boxes[:, :, 2].clamp(0, im_shape[1] - 1),\n boxes[:, :, 3].clamp(0, im_shape[0] - 1)], 2).view(boxes.size(0), -1)\n\n return boxes", "def _crop_region(polygons, left, bottom, right, top, precision):\n cropped_polygons = []\n for p in polygons:\n clipped_polys = clipper._chop(p, [top, bottom], 1, 1 / precision)\n # polygon, [cuts], axis, scale\n for cp in clipped_polys[1]:\n result = clipper._chop(cp, [left, right], 0, 1 / precision)\n cropped_polygons += list(result[1])\n return cropped_polygons", "def random_image():\n # Pick random background color\n bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n height = random.randint(300, 350)\n width = random.randint(350, 400)\n print(\"bg_color: \" + str(bg_color))\n print(\"height: \" + str(height))\n print(\"width: \" + str(width))\n\n image = np.ones([height, width, 3], dtype=np.uint8) * bg_color\n mask = np.zeros([height, width, 3], dtype=np.uint8)\n labels = []\n assert image.shape == mask.shape\n\n # Generate a few random shapes\n shapes = []\n boxes = []\n N = random.randint(1, 4)\n for _ in range(N):\n shape, color, dims = random_shape(height, width)\n shapes.append((shape, color, dims))\n x, y, s = dims\n boxes.append([y - s, x - s, y + s, x + s])\n # Apply non-max suppression wit 0.3 threshold to avoid\n # shapes covering each other\n keep_ixs = utils.non_max_suppression(\n np.array(boxes), np.arange(N), 0.1)\n shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]\n\n for shape, color, dims in shapes:\n x, y, s = dims\n if shape == 'square':\n image = cv2.rectangle(image, (x - s, y - s),\n (x + s, y + s), color, -1)\n mask = cv2.rectangle(mask, (x - s, y - s),\n (x + s, y + s), 1, -1)\n labels.append([1, x, y, s])\n elif shape == \"circle\":\n image = cv2.circle(image, (x, y), s, color, -1)\n mask = cv2.circle(mask, (x, y), s, 2, -1)\n labels.append([2, x, y, s])\n elif shape == \"triangle\":\n points = np.array([[(x, y - s),\n (x - s / math.sin(math.radians(60)), y + s),\n (x + s / math.sin(math.radians(60)), y + s),\n ]], dtype=np.int32)\n image = cv2.fillPoly(image, points, color)\n mask = cv2.fillPoly(mask, points, 3)\n labels.append([3, x, y, s])\n\n return image, mask, np.array(labels)", "def random_crop(sample: Sample,\n crop_size: TupleInt3,\n class_weights: List[float] = None) -> Tuple[Sample, np.ndarray]:\n image = sample.image\n labels = sample.labels\n mask = sample.mask\n\n image_spatial_shape = image.shape[1:]\n\n if any_pairwise_larger(crop_size, image_spatial_shape):\n raise ValueError(\"The crop_size across each dimension should be greater than zero and less than or equal \"\n \"to the current value (crop_size: {}, spatial shape: {})\"\n .format(crop_size, image_spatial_shape))\n\n # Sample a center pixel location for patch extraction.\n center = random_select_patch_center(sample, class_weights)\n\n # Verify and fix overflow for each dimension\n left = []\n for i in range(3):\n margin_left = int(crop_size[i] / 2)\n margin_right = crop_size[i] - margin_left\n left_index = center[i] - margin_left\n right_index = center[i] + margin_right\n if right_index > labels.shape[i + 1]:\n left_index = left_index - (right_index - labels.shape[i + 1])\n if left_index < 0:\n left_index = 0\n left.append(left_index)\n\n slicers = [slice(left[x], left[x] + crop_size[x]) for x in range(0, 3)]\n\n # Crop the tensors\n images_cropped = image[:, slicers[0], slicers[1], slicers[2]]\n labels_cropped = labels[:, slicers[0], slicers[1], slicers[2]]\n mask_cropped = mask[slicers[0], slicers[1], slicers[2]]\n sample = Sample(\n image=images_cropped,\n labels=labels_cropped,\n mask=mask_cropped,\n metadata=sample.metadata\n )\n return sample, center", "def crop_image(image):\n delta = .05\n rand_top_ratio = random.uniform(default_top_ratio - delta,\n default_top_ratio + delta)\n rand_bot_ratio = random.uniform(default_bot_tatio - delta,\n default_bot_tatio + delta)\n image = preprocess(image, top_ratio=rand_top_ratio, bot_ratio=rand_bot_ratio)\n\n return image", "def random_block(data, shape, rng=None):\n if rng is None:\n rng = np.random\n d_shape = data.shape\n assert len(d_shape) == len(shape)\n corner = [rng.randint(diff + 1)\n for diff in (np.array(d_shape) - np.array(shape))]\n slices = [slice(c, c + size) for c, size in zip(corner, shape)]\n return data[slices]", "def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\n\n device = boxes.device\n # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise\n # (several small tensors for representing a single instance mask)\n boxes = boxes.to(torch.device(\"cpu\"))\n\n results = [\n rasterize_polygons_within_box(poly, box.numpy(), mask_size)\n for poly, box in zip(self.polygons, boxes)\n ]\n \"\"\"\n poly: list[list[float]], the polygons for one instance\n box: a tensor of shape (4,)\n \"\"\"\n if len(results) == 0:\n return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)\n return torch.stack(results, dim=0).to(device=device)", "def random_crop_params(self, img, output_size):\n w, h = img.size\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = random.randint(0, h - th)\n j = random.randint(0, w - tw) \n return i, j, th, tw", "def randomPaste(bg_img, img):\n if img.shape[0] > bg_img.shape[0] or img.shape[1] > bg_img.shape[1]:\n log.error(\"Failed to paste: inner is bigger.\")\n return img\n\n x_offset = random.randint(0, bg_img.shape[1] - img.shape[1])\n y_offset = random.randint(0, bg_img.shape[0] - img.shape[0])\n\n img2 = paste(bg_img, img, x_offset, y_offset)\n return img2", "def crop(array, length, deterministic=False):\n if len(array) > length:\n if not deterministic:\n pos = np.random.randint(len(array) - length + 1)\n array = array[pos:pos + length:]\n else:\n l = len(array)\n array = array[(l - length) // 2:(l + length) // 2]\n return array", "def test_get_cropped_faces(self):\n with Image.open(self.subject) as im:\n image = im.convert(\"RGB\")\n\n cropped_list = image_helper.get_cropped_faces(image, [BoundingBox(0,0,15,15), BoundingBox(20,20,45,45)])\n\n self.assertEqual(cropped_list[0].size, (15, 15))\n self.assertEqual(cropped_list[1].size, (45, 45))", "def random_crop(image, model_input_image_size):\n im_size = image.get_shape().as_list()\n if len(im_size) == 3:\n return tf.random_crop(\n image, model_input_image_size[:-1] + [im_size[-1]])\n elif len(im_size) == 4:\n if im_size[-1] > 1:\n raise NotImplementedError\n crop_size = model_input_image_size[:2] + [im_size[0]]\n trans_image = tf.transpose(tf.squeeze(image), [1, 2, 0])\n crop_image = tf.expand_dims(\n tf.transpose(\n tf.random_crop(trans_image, crop_size),\n [2, 0, 1]), axis=-1)\n return crop_image\n else:\n raise NotImplementedError", "def crop_bbox(feats, bbox, HH, WW=None):\n N = tf.shape(feats)[0]\n if WW is None:\n WW = HH\n\n # change box from [0, 1] to [-1, 1] coordinate system\n bbox = 2 * bbox - 1\n\n x0, y0 = bbox[:, 0], bbox[:, 1]\n x1, y1 = bbox[:, 2], bbox[:, 3]\n X = tensor_linspace(x0, x1, steps=WW)\n X = tf.broadcast_to(tf.reshape(X, (N, 1, WW)), (N, HH, WW))\n Y = tensor_linspace(y0, y1, steps=HH)\n Y = tf.broadcast_to(tf.reshape(Y, (N, HH, 1)), (N, HH, WW))\n\n return bilinear_sampler(feats, X, Y)", "def imcrop(img,bboxes,scale=1.0,pad_fill=None):\n channel = 1 if img.ndim == 2 else img.shape[2]\n if pad_fill is not None:\n if isinstance(pad_fill,(int,float)):\n pad_fill = [pad_fill for _ in range(channel)]\n assert len(pad_fill) == channel\n # bboxes.ndim == 1 if there is only one box.\n _bboxes = bboxes[None,...] if bboxes.ndim == 1 else bboxes\n scaled_bboxes = bbox_scaling(_bboxes,scale).astype(np.int32)\n clipped_bboxes = bbox_clip(scaled_bboxes,img.shape)\n \n patches = []\n for i in range(clipped_bbox.shape[0]):\n x1,y1,x2,y2 = tuple(clipped_bbox[i,:])\n if pad_fill is None:\n patch = img[y1:y2+1,x1:x2+1,...]\n else:\n _x1,_y1,_x2,_y2 = tuple(scaled_bboxes[i,:])\n if channel == 2:\n patch_shape = (_y2 - _y1 + 1,_x2 - _x1 + 1)\n else:\n patch_shape = (_y2 - _y1 + 1,_x2 - _x1 + 1,chn)\n patch = np.array(pad_fill,dtype=img.dtype) * np.ones(patch_shape,dtype=img.dtype)\n x_start = 0 if _x1 >= 0 else -_x1\n y_start = 0 if _y1 >= 0 else -_y1\n w = x2 - x1 + 1\n h = y2 - y1 + 1\n patch[y_start:y_start + h,x_start:x_start + w,...] = img[y1:y1+h,x1:x1+w,...]\n patches.append(patch)\n \n if bboxes.ndim == 1:\n return patches[0]\n else:\n return patches", "def get_bboxes(self, **kwargs):\n pass", "def _prep_im_for_blob(self, im, pixel_means, bbox):\n im = im.astype(np.float32, copy=False)\n im -= pixel_means\n im_shape = im.shape\n\n # crop version 2\n x, y, w, h = bbox\n crop_img, crop_w, crop_h = None, None, None\n if (x, y, w, h) == (0, 0, im.shape[1]-1, im.shape[0]-1):\n crop_img = im[:,:,:]\n crop_w = w\n crop_h = h\n else:\n # 1. random shifted image'\n # crop_x = np.random.randint(x)\n # crop_w = np.random.randint(x+w, im_shape[1]-1) - crop_x\n # crop_y = np.random.randint(y)\n # crop_h = np.random.randint(y+h, im_shape[0]-1) - crop_y\n # crop_img = im[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w, :]\n # 2. original image\n crop_img = im[y:y+h, x:x+w, :]\n crop_w, crop_h = w, h\n\n im_scale_x = float(self._width) / float(crop_w)\n im_scale_y = float(self._height ) / float(crop_h)\n crop_img = cv2.resize(crop_img, None, None, fx=im_scale_x, fy=im_scale_y,\n interpolation=cv2.INTER_LINEAR)\n\n return crop_img", "def create_crops(merged_boxes, hyperspectral_pool=None, rgb_pool=None, sensor=\"hyperspectral\", expand=0, hyperspectral_savedir=\".\"): \n crops = []\n labels = []\n domains =[]\n sites = []\n box_index = []\n elevations = []\n heights = []\n for index, row in merged_boxes.iterrows():\n #Crop and append\n box = row[\"geometry\"] \n plot_name = row[\"plotID\"] \n domain = row[\"domainID\"]\n site = row[\"plotID\"].split(\"_\")[0]\n elevation = float(row[\"elevation\"])/1000\n height = float(row[\"height\"])/100\n \n #get sensor data\n if sensor == \"rgb\":\n try:\n sensor_path = find_sensor_path(bounds=box.bounds, lookup_pool=rgb_pool)\n except:\n raise ValueError(\"Cannot find RGB data path for box bounds {} for plot_name {}\".format(box.bounds,plot_name))\n elif sensor == \"hyperspectral\":\n try:\n rgb_path = find_sensor_path(bounds=box.bounds, lookup_pool=rgb_pool)\n except:\n raise ValueError(\"Cannot find RGB data path for box bounds {} for plot_name {}\".format(box.bounds,plot_name))\n \n try:\n hyperspectral_h5_path = find_sensor_path(bounds=box.bounds, lookup_pool=hyperspectral_pool)\n except:\n raise ValueError(\"Cannot find hyperspectral data path for box bounds {} for plot_name {}\".format(box.bounds,plot_name))\n \n sensor_path = convert_h5(hyperspectral_h5_path, rgb_path, savedir=hyperspectral_savedir)\n \n crop = crop_image(sensor_path=sensor_path, box=box, expand=expand)\n \n crops.append(crop)\n domains.append(domain)\n sites.append(site)\n labels.append(row[\"taxonID\"])\n elevations.append(elevation)\n heights.append(height)\n box_index.append(row[\"id\"])\n \n return crops, labels, domains, sites, heights, elevations, box_index", "def clip_boxes_to_image(boxes, size):\r\n height, width = size\r\n boxes[..., 0::2] = boxes[..., 0::2].clip(min=0, max=width)\r\n boxes[..., 1::2] = boxes[..., 1::2].clip(min=0, max=height)\r\n return boxes", "def _generate_bboxes(self, probs, offsets, scale, threshold):\n # applying P-Net is equivalent, in some sense, to\n # moving 12x12 window with stride 2\n stride = 2\n cell_size = 12\n\n # extract positive probability and resize it as [n, m] dim tensor.\n probs = probs[:, 1, :, :]\n\n # indices of boxes where there is probably a face\n mask = probs > threshold\n inds = mask.nonzero()\n\n if inds.shape[0] == 0:\n return torch.empty(0, dtype=torch.int32, device=self.device), \\\n torch.empty(0, dtype=torch.float32, device=self.device), \\\n torch.empty(0, dtype=torch.float32, device=self.device), \\\n torch.empty(0, dtype=torch.int32, device=self.device)\n\n # transformations of bounding boxes\n tx1, ty1, tx2, ty2 = [offsets[inds[:, 0], i, inds[:, 1], inds[:, 2]]\n for i in range(4)]\n\n offsets = torch.stack([tx1, ty1, tx2, ty2], 1)\n score = probs[inds[:, 0], inds[:, 1], inds[:, 2]]\n\n # P-Net is applied to scaled images\n # so we need to rescale bounding boxes back\n bounding_boxes = torch.stack([\n stride*inds[:, -1] + 1.0,\n stride*inds[:, -2] + 1.0,\n stride*inds[:, -1] + 1.0 + cell_size,\n (stride*inds[:, -2] + 1.0 + cell_size),\n ], 0).transpose(0, 1).float()\n\n bounding_boxes = torch.round(bounding_boxes / scale).int()\n return bounding_boxes, score, offsets, inds[:, 0].int()", "def uniform_box_sampling(n_sample, bounding_box=((0,), (1,))):\n bounding_box = np.array(bounding_box)\n dists = np.diag(bounding_box[1] - bounding_box[0])\n samples = np.random.random_sample((n_sample, bounding_box.shape[1]))\n samples = np.matmul(samples, dists) + bounding_box[0]\n\n return samples", "def covering_box(boxes):\n x_min = np.amin([b.x for b in boxes])\n x_max = np.amax([b.x + b.width for b in boxes])\n y_min = np.amin([b.y for b in boxes])\n y_max = np.amax([b.y + b.height for b in boxes])\n cover = Box(x_min, y_min, x_max - x_min, y_max - y_min)\n return cover", "def random_crop(self, img, output_img_h = 0.5, output_img_w = 0.5, p = 0.5):\n if self.decision(p):\n height, width, channels = img.shape\n new_height = random.randint(int(height * output_img_h), height)\n new_width = random.randint(int(width * output_img_w), width)\n y = random.randint(0, height - new_height)\n x = random.randint(0, width - new_width)\n roi = img[y:y + new_height, x:x + new_width]\n # check if cut is ahve to much dark pixels, more then 20 %\n non_zeros = np.count_nonzero(roi)\n non_zeros_procent = non_zeros / roi.size\n if non_zeros_procent < 0.8:\n pass\n else:\n img = roi\n return img" ]
[ "0.7094237", "0.6923021", "0.6707568", "0.6556093", "0.6494876", "0.64902526", "0.6450483", "0.6424151", "0.6415657", "0.64077026", "0.6321483", "0.62812304", "0.62280124", "0.62280124", "0.61862594", "0.61374986", "0.61367637", "0.60897416", "0.60772663", "0.6062564", "0.6056741", "0.60565764", "0.6053053", "0.6051694", "0.60297084", "0.5996235", "0.59685576", "0.5951814", "0.5948881", "0.5929734", "0.59295", "0.5926063", "0.59198153", "0.59135514", "0.5911482", "0.589992", "0.5870751", "0.5870751", "0.5867822", "0.5858791", "0.58291185", "0.5816148", "0.5787179", "0.57859915", "0.57775855", "0.5775844", "0.5759832", "0.5754235", "0.5729097", "0.5721727", "0.570731", "0.5705611", "0.57029057", "0.5697343", "0.56951493", "0.56890595", "0.5683695", "0.56790346", "0.56763226", "0.56755733", "0.56746936", "0.56663364", "0.5655987", "0.5655987", "0.56430393", "0.563588", "0.5621126", "0.56140804", "0.56110746", "0.5596567", "0.55886847", "0.5588627", "0.5585799", "0.55806166", "0.55757785", "0.55596095", "0.5558946", "0.5558946", "0.5539488", "0.55294424", "0.5519726", "0.5519205", "0.55119365", "0.5499469", "0.5495051", "0.5488051", "0.5484027", "0.5479633", "0.54775524", "0.5474835", "0.5473368", "0.5459229", "0.5454724", "0.5452498", "0.5450976", "0.54493356", "0.54411936", "0.54263943", "0.5414856", "0.5410128", "0.5399282" ]
0.0
-1
Function to perform cutmix.
def cutmix(batch: Tuple[torch.Tensor, torch.Tensor], alpha: float = 1.0) -> Tuple: data, targets = batch indices = torch.randperm(data.size(0)) shuffled_data = data[indices] shuffled_targets = targets[indices] lam = np.random.beta(alpha, alpha) if alpha > 0 else 1 x0, x1, y0, y1 = random_bbox(data, lam) data[:, :, y0:y1, x0:x1] = shuffled_data[:, :, y0:y1, x0:x1] targets = (targets, shuffled_targets, lam) return data, targets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cutmix(input: T, *args: Any, **kwargs: Any) -> T:\n ...", "def cut_sig(self):\n c = TCut(self.cut_both)\n c += TCut(self._return_if('_cut_sig'))\n return c", "def Cut(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_Cut(self, *args)", "def cut_bkg(self):\n c = TCut(self.cut_both)\n c += TCut(self._return_if('_cut_bkg'))\n return c", "def onCut(self):\n pass", "def _mix(a, b, c):\n c = _cutoff32(c)\n a = _cutoff32(a-b-c) ^ c >> 13\n b = _cutoff32(b-c-a) ^ _cutoff32(a << 8)\n c = _cutoff32(c-a-b) ^ b >> 13\n a = _cutoff32(a-b-c) ^ c >> 12\n b = _cutoff32(b-c-a) ^ _cutoff32(a << 16)\n c = _cutoff32(c-a-b) ^ b >> 5\n a = _cutoff32(a-b-c) ^ c >> 3\n b = _cutoff32(b-c-a) ^ _cutoff32(a << 10)\n c = _cutoff32(c-a-b) ^ b >> 15\n return a, b, c", "def cutmix_data(inputs, targets, alpha=1.):\n bsize, _, h, w = inputs.shape\n shuffled_idxs = torch.randperm(bsize).cuda()\n\n inputs_s = inputs[shuffled_idxs]\n lamb = np.random.beta(alpha, alpha)\n\n rx = np.random.randint(w)\n ry = np.random.randint(h)\n cut_ratio = np.sqrt(1. - lamb)\n rw = np.int(cut_ratio * w)\n rh = np.int(cut_ratio * h)\n\n x1 = np.clip(rx - rw // 2, 0, w)\n x2 = np.clip(rx + rw // 2, 0, w)\n y1 = np.clip(ry - rh // 2, 0, h)\n y2 = np.clip(ry + rh // 2, 0, h)\n\n inputs[:, :, x1:x2, y1:y2] = inputs_s[:, :, x1:x2, y1:y2]\n # adjust lambda to exactly match pixel ratio\n lamb = 1 - ((x2 - x1) * (y2 - y1) /\n (inputs.size()[-1] * inputs.size()[-2]))\n return inputs, targets, targets[shuffled_idxs], lamb", "def _crop_concat(self, upsampled, bypass):\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = F.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)", "def check_cut_mix_batch_c(method):\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n [image_batch_format, alpha, prob], _ = parse_user_args(method, *args, **kwargs)\n type_check(image_batch_format, (ImageBatchFormat,), \"image_batch_format\")\n check_pos_float32(alpha)\n check_positive(alpha, \"alpha\")\n check_value(prob, [0, 1], \"prob\")\n return method(self, *args, **kwargs)\n\n return new_method", "def __init__(self, *args):\n _BRepAlgo.BRepAlgo_Cut_swiginit(self,_BRepAlgo.new_BRepAlgo_Cut(*args))", "def _discretize_mixture(mix, k):\n disc = np.floor(mix * k).astype(int)\n inds = np.argsort(disc - mix * k)[: k - disc.sum()]\n disc[inds] += 1\n return disc", "def cut(self, other, combine=None):\n # TODO\n return NotImplemented", "def cut(S, T, graph):\n ###TODO\n pass", "def crop_and_concat(self, upsampled, bypass, crop=False):\n logging.debug(\"Before - Upsampled: {}\".format(upsampled.size()))\n logging.debug(\"Before - bypass: {}\".format(bypass.size()))\n if crop:\n c1 = (bypass.size()[2] - upsampled.size()[2]) // 2\n c2 = (bypass.size()[3] - upsampled.size()[3]) // 2\n bypass = F.pad(bypass, (-c2, -c2, -c1, -c1))\n logging.debug(\"Upsampled: {}\".format(upsampled.size()))\n logging.debug(\"bypass: {}\".format(bypass.size()))\n return torch.cat((upsampled, bypass), 1)", "def hxlcut():\n run_script(hxlcut_main)", "def cut_image(self, x, y, r_cut):\n image_cutted_raw = self.image[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n image_cutted = self.sub_bkg(image_cutted_raw)\n return image_cutted", "def cut(self, array_obj):\n\n pass", "def cull(self):", "def do_cut(self, map, affine):\n coords = [0, 0, 0]\n coords['xyz'.index(self.direction)] = self.coord\n x_map, y_map, z_map = [int(np.round(c)) for c in\n coord_transform(coords[0],\n coords[1],\n coords[2],\n np.linalg.inv(affine))]\n if self.direction == 'y':\n cut = np.rot90(map[:, y_map, :])\n elif self.direction == 'x':\n cut = np.rot90(map[x_map, :, :])\n elif self.direction == 'z':\n cut = np.rot90(map[:, :, z_map])\n else:\n raise ValueError('Invalid value for direction %s' %\n self.direction)\n return cut", "def cut(lines=[],params=\"\"):\n if not core.is_unixy():\n raise(\"cut is only implemented on unix-like systems\")\n cmd = \"cut\"\n if params != \"\":\n cmd = cmd + \" \" + params\n res = act.call(cmd,lines)\n return res", "def consolidate(self, *args, **kwargs):\n kwargs['mode'] = 'consolidate'\n kwargs['mix_before'] = (0, 0)\n kwargs['air_gap'] = 0\n kwargs['disposal_vol'] = 0\n return self.transfer(*args, **kwargs)", "def clip_at_nth(infits, cut=10):\n\n#\n#--- trim the extreme values\n#\n upper = find_nth(infits, cut)\n\n cmd1 = \"/usr/bin/env PERL5LIB=\"\n cmd2 = ' dmimgthresh infile=' + infits+ ' outfile=zout.fits cut=\"0:' + str(upper) + '\" value=0 clobber=yes'\n cmd = cmd1 + cmd2\n bash(cmd, env=ascdsenv)\n\n outfile = infits.replace('.fits','_full.fits')\n cmd = 'mv ' + infits + ' ' + outfile\n os.system(cmd)\n \n m = re.search('gz', infits)\n if m is not None:\n os.system('gzip zout.fits')\n cmd = 'mv zout.fits.gz ' + infits\n os.system(cmd)\n else:\n cmd = 'mv zout.fits ' + infits\n os.system(cmd)", "def pulsCut(self, phcut):\n if not os.path.isfile(self.outmktime):\n (\"\\t=== Make sure that self.workpath points towards a valid directory ===\")\n return\n\n if 'PULSE_PHASE' not in fits.getdata(self.outmktime).columns.names:\n print(\"\\t=== FT1 file does not have a 'PULSE_PHASE' column, run tempo2 first ===\")\n return\n\n frac = np.sum(phcut[1::2]) - np.sum(phcut[0::2])\n outfil = self.outmktime[:-5]+'_NoPulse_'+str(frac)+'.fits'\n cutCmd = \"(PULSE_PHASE >= {} && PULSE_PHASE <= {})\".format(phcut[0], phcut[1])\n if len(phcut) > 2:\n for i in np.arange(2, len(phcut), 2):\n cutCmd += \" || (PULSE_PHASE >= {} && PULSE_PHASE <= {})\".format(phcut[i], phcut[i+1])\n \n os.popen('fcopy \"{}[{}]\" {}'.format(self.outmktime, cutCmd, outfil))\n\n print(\"\\t=== File '{}' created ===\".format(outfil))\n return", "def preparing_tocut(image):\n\n _, image = threshold_image(image)\n\n return image", "def cut( self, i_start, i_stop ):\n # create two series of indices, combine them and remove them from the data cube\n beginning = np.arange( i_start, dtype=int )\n end = np.arange( i_stop, self.n_steps, dtype=int )\n self._remove_steps( np.concatenate([beginning,end]).tolist() )", "def fcn(self, data_in):\n \n assert isinstance(data_in, _np.ndarray), 'Required input is an ndarray'\n\n assert data_in.ndim == 1, 'Required input is a 1D ndarray'\n \n data_out = 0*data_in\n\n cutter = CutEveryNSpectra(self.parameters['offset'], cut_m=self.parameters['cut_m'],\n every_n=self.parameters['every_n'], action=self.parameters['action'])\n\n # Because of the limits of PlotEffect, the input and output data HAS TO BE the same size\n temp = cutter.calculate(_np.repeat(data_in[:,None], 11, axis=-1)).sum(axis=-1)\n data_out[:temp.size] = temp\n \n return data_out", "def reduce(self):\n return self.crop(*self.ink_offsets)", "def apply_cuts(objects):\n #- Check if objects is a filename instead of the actual data\n if isinstance(objects, (str, unicode)):\n objects = io.read_tractor(objects)\n \n #- undo Milky Way extinction\n flux = unextinct_fluxes(objects)\n gflux = flux['GFLUX']\n rflux = flux['RFLUX']\n zflux = flux['ZFLUX']\n w1flux = flux['W1FLUX']\n wflux = flux['WFLUX']\n \n #- DR1 has targets off the edge of the brick; trim to just this brick\n if 'BRICK_PRIMARY' in objects.dtype.names:\n primary = objects['BRICK_PRIMARY']\n else:\n primary = np.ones(len(objects), dtype=bool)\n \n #----- LRG\n lrg = primary.copy()\n lrg &= rflux > 10**((22.5-23.0)/2.5)\n lrg &= zflux > 10**((22.5-20.56)/2.5)\n lrg &= w1flux > 10**((22.5-19.35)/2.5)\n lrg &= zflux > rflux * 10**(1.6/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n lrg &= w1flux * rflux.clip(0)**(1.33-1) > zflux.clip(0)**1.33 * 10**(-0.33/2.5)\n\n #----- ELG\n elg = primary.copy()\n elg &= rflux > 10**((22.5-23.4)/2.5)\n elg &= zflux > rflux * 10**(0.3/2.5)\n elg &= zflux < rflux * 10**(1.5/2.5)\n elg &= rflux**2 < gflux * zflux * 10**(-0.2/2.5)\n elg &= zflux < gflux * 10**(1.2/2.5)\n\n #----- Quasars\n psflike = ((objects['TYPE'] == 'PSF') | (objects['TYPE'] == 'PSF ')) \n qso = primary.copy()\n qso &= psflike\n qso &= rflux > 10**((22.5-23.0)/2.5)\n qso &= rflux < gflux * 10**(1.0/2.5)\n qso &= zflux > rflux * 10**(-0.3/2.5)\n qso &= zflux < rflux * 10**(1.1/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n qso &= wflux * gflux.clip(0)**1.2 > rflux.clip(0)**(1+1.2) * 10**(-0.4/2.5)\n ### qso &= wflux * gflux**1.2 > rflux**(1+1.2) * 10**(2/2.5)\n\n #------ Bright Galaxy Survey\n #- 'PSF' for astropy.io.fits; 'PSF ' for fitsio (sigh)\n bgs = primary.copy()\n bgs &= ~psflike\n bgs &= rflux > 10**((22.5-19.35)/2.5)\n\n #----- Standard stars\n fstd = primary.copy()\n fstd &= psflike\n fracflux = objects['DECAM_FRACFLUX'].T \n signal2noise = objects['DECAM_FLUX'] * np.sqrt(objects['DECAM_FLUX_IVAR'])\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for j in (1,2,4): #- g, r, z\n fstd &= fracflux[j] < 0.04\n fstd &= signal2noise[:, j] > 10\n\n #- observed flux; no Milky Way extinction\n obs_rflux = objects['DECAM_FLUX'][:, 2]\n fstd &= obs_rflux < 10**((22.5-16.0)/2.5)\n fstd &= obs_rflux > 10**((22.5-19.0)/2.5)\n #- colors near BD+17; ignore warnings about flux<=0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n rzcolor = 2.5 * np.log10(zflux / rflux)\n fstd &= (grcolor - 0.32)**2 + (rzcolor - 0.13)**2 < 0.06**2\n\n #-----\n #- construct the targetflag bits\n #- Currently our only cuts are DECam based (i.e. South)\n desi_target = lrg * desi_mask.LRG_SOUTH\n desi_target |= elg * desi_mask.ELG_SOUTH\n desi_target |= qso * desi_mask.QSO_SOUTH\n\n desi_target |= lrg * desi_mask.LRG\n desi_target |= elg * desi_mask.ELG\n desi_target |= qso * desi_mask.QSO\n\n desi_target |= fstd * desi_mask.STD_FSTAR\n \n bgs_target = bgs * bgs_mask.BGS_BRIGHT\n bgs_target |= bgs * bgs_mask.BGS_BRIGHT_SOUTH\n\n #- nothing for MWS yet; will be GAIA-based\n mws_target = np.zeros_like(bgs_target)\n\n #- Are any BGS or MWS bit set? Tell desi_target too.\n desi_target |= (bgs_target != 0) * desi_mask.BGS_ANY\n desi_target |= (mws_target != 0) * desi_mask.MWS_ANY\n\n return desi_target, bgs_target, mws_target", "def cutting(args):\n import numpy as np\n import h5py\n\n # Read in map data\n with h5py.File(args.pointmap, 'r') as f:\n ptmap = f['map'][...]\n\n if args.threshold > 0:\n cut_map = np.where(ptmap<args.threshold, 0, ptmap)\n else:\n idx = np.unravel_index(np.argmax(ptmap), ptmap.shape) # the index of the max element\n cut_map = np.zeros_like(ptmap)\n cut_map[idx] = ptmap[idx]\n\n # Create output image file name\n if args.outfile:\n out_file = args.outfile\n elif args.threshold > 0:\n out_file = ((args.pointmap.split('/')[-1]).split('.')[0]).replace('sim', 'cut') + '_' + str(int(args.threshold)) + '.hdf5'\n else:\n out_file = ((args.pointmap.split('/')[-1]).split('.')[0]).replace('sim', 'cut') + '_max.hdf5'\n\n # Save cut data\n with h5py.File(out_file, 'w') as f:\n f.create_dataset('map', data=cut_map)\n\n print 'done!'", "def _auto_mix(self):\n gamma = np.random.choice([0.5, 1, 1.5, 2])\n offset = np.random.uniform(0, self.n * self.dt)\n empty_window = np.random.uniform(0, self.n * self.dt * 0.5)\n self.t.sort()\n self.t[2 * (self.n // 5):4 * (self.n // 5)] *= gamma\n self.t[3 * (self.n // 5):] += empty_window\n self._add_irregularities(epsilon=np.random.normal(0,\n 0.05 * self.dt,\n self.n))\n self.t.sort()\n self._normalize(offset=offset)", "def mix(self, cpgs, ident, method='avg', mutation_prob=0.5, crossover_prob=[0.5, 0.5]):\n # Available methods:\n # note that mutation means selecting a new random scalar for a particular value\n # avg -- takes the average of all of the keys in self.evolvables and then goes through all of their values and chooses whether or not to mutate them by mutation_prob\n # crossover -- goes through all of the values and chooses from the nth cpg in cpgs with probability crossover_prob[n], then mutates them\n # defaults to choosing from the first two cpgs with equal probability\n\n n_cpgs = float(len(cpgs))\n\n\n new_CPG = {}\n new_CPG['ident'] = str(ident)\n new_CPG = self._set_constants(new_CPG)\n\n if method == 'avg':\n for key in self.evolvables:\n avg_params = sum([cpg[key] for cpg in cpgs]) / n_cpgs\n new_CPG[key] = avg_params\n\n if method == 'crossover':\n # assume the current crossover_prob apply to cpgs[:len(crossover_prob)]\n # anything past that can be thrown out\n cpgs = cpgs[len(cpgs) - len(crossover_prob):]\n cdf = self.cumulative_sum(crossover_prob)\n choose_cpg = lambda rand: cpgs[bisect.bisect(cdf, rand)] # bisect will return the index of the value to the right of the given number in the sorted list\n # Now go through the keys\n for key in self.evolvables:\n if key in self.scalars:\n # then we'll just choose the cpg\n new_CPG[key] = choose_cpg(self.safe_rand())[key]\n else:\n new_CPG[key] = np.zeros(self.shapes[key])\n for param in range(self.sizes[key]):\n # Since CPGs that are read in don't have their lists in numpy.ndarray form\n this_cpg = choose_cpg(self.safe_rand())\n if not isinstance(this_cpg[key], np.ndarray):\n this_cpg[key] = np.array(this_cpg[key])\n new_CPG[key].flat[param] = choose_cpg(self.safe_rand())[key].flat[param]\n\n # mutate step\n for key in self.evolvables:\n if key in self.scalars:\n if np.random.rand() <= mutation_prob:\n new_CPG[key] = new_CPG[key] * np.random.rand()\n else:\n for param in range(self.sizes[key]):\n if np.random.rand() <= mutation_prob:\n new_CPG[key].flat[param] = new_CPG[key].flat[param] * (0.5 + np.random.rand())\n # Ok so this is inefficient but fuck it, anyways certain indices need to be zero to prevent seizureific movement\n for ind in range(self.n*self.n):\n if ind not in self.nonzeros:\n new_CPG['w'][ind] = 0\n\n # That's all folks!\n return new_CPG", "def cut(image,box,margin=0,bg=0,dtype=None):\n (r0,c0,r1,c1) = box\n return sl.cut(image,sl.box(r0,r1,c0,c1),margin=margin,bg=bg,dtype=dtype)", "def ccut(value,arg):\n return value.replace(arg, '')", "def cut_both(self):\n return TCut(self._return_if('_cut_both'))", "def cutPaper(self, cut='partial', feed=True):\n if cut not in ['partial', 'full']:\n raise ValueError('cut must be \\'partial\\' or \\'full\\'')\n elif type(feed) is not bool:\n raise ValueError('feed must be True or False')\n else:\n value = 0 if cut == 'full' else 1\n value += 65 if feed else 0\n self._write(self.__class__.__GS + 'V' + chr(value))", "async def channel_mix(\n client,\n event,\n left_to_left : P('float', 'left to left factor' , min_value = 0.0, max_value = 5.0),\n left_to_right : P('float', 'left to right factor' , min_value = 0.0, max_value = 5.0),\n right_to_right: P('float', 'right to right factor', min_value = 0.0, max_value = 5.0),\n right_to_left : P('float', 'right to left factor' , min_value = 0.0, max_value = 5.0),\n):\n player = get_player_or_abort(client, event)\n \n filter = ChannelMix(left_to_left, left_to_right, right_to_right, right_to_left)\n player.add_filter(filter)\n await player.apply_filters()\n \n return create_filter_added_embed(filter)", "def cut(self, piece):\n self.substrates = self.substrates.difference(piece)", "def mixfactor(self, segment):\n mixfactor = 0\n a = (89.0/1.5) + self.template['mixpoint']\n b = (188.0/1.5) + self.template['mixpoint']\n loud = self.loudness(self.original.analysis.segments, segment)\n if not loud:\n loud = self.original.analysis.loudness\n if loud != -1 * b:\n mixfactor = float(float(loud + a)/float(loud + b))\n if mixfactor > 0.8:\n mixfactor = 0.8\n elif mixfactor < 0.3:\n mixfactor = 0.3\n return mixfactor", "def cepstrum(input, nceps):\n # apply the Discrete Cosine Transform\n output = dct(input, norm='ortho')[:, 0:nceps]\n\n # myplot(output, 'Before lifter')\n\n # apply liftering\n # output = lifter(output)\n\n # myplot(output, 'After lifter')\n\n return output", "def test_1d_cut():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test_cut.ft\")\n assert data.shape == (2766,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -12123.67\n assert round(data[1],2) == -8979.31\n assert round(data[100],2) == -7625.30\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[278.59, 10.03])", "def crop_and_concat(upsampled, bypass, crop=False):\n if crop:\n crop_im_size = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = F.pad(bypass, (\n -crop_im_size,\n -crop_im_size,\n -crop_im_size,\n -crop_im_size))\n # concatenate on row (add element on line)\n return torch.cat((upsampled, bypass), 1)", "def cut(value,arg):\n return cut.replace(arg,\"\")", "def do_mixup(x: torch.Tensor, mixup_lambda: torch.Tensor):\n out = (x[0::2].transpose(0, -1) * mixup_lambda[0::2] +\n x[1::2].transpose(0, -1) * mixup_lambda[1::2]).transpose(0, -1)\n return out", "def do_mixup(x: torch.Tensor, mixup_lambda: torch.Tensor):\n out = (x[0::2].transpose(0, -1) * mixup_lambda[0::2] +\n x[1::2].transpose(0, -1) * mixup_lambda[1::2]).transpose(0, -1)\n return out", "def fcut(r, rcut, trans_width):\n if (isinstance(r, list) or isinstance(r, np.ndarray)) and len(r) > 1:\n return np.array([fcut(ri, rcut, trans_width) for ri in r])\n else:\n if r > rcut:\n return 0.\n elif r > rcut-trans_width:\n return 0.5*(np.cos(np.pi*(r-rcut+trans_width)/trans_width) + 1)\n else:\n return 1.0", "def Compact(self, *args):\n return _BRepAlgo.BRepAlgo_Image_Compact(self, *args)", "def cutout(self, centre, radius):", "def forward(self, reps_in):\n\n reps_cat = self.cat_reps(reps_in)\n reps_out = self.mix_reps(reps_cat)\n return reps_out", "def cutdna(dna, *cutsites, crop=False, supfeature=False, product=None, process_name=None, process_description=None, \n pn=None, pd=None, quinable=True, **kwargs):\n \n kwargs.setdefault(\"_sourcefile\", None) \n kwargs.setdefault(\"process_id\", None)\n kwargs.setdefault(\"original_ids\", []) \n _sourcefile = kwargs[\"_sourcefile\"] \n process_id = kwargs[\"process_id\"] \n original_ids = kwargs[\"original_ids\"]\n\n #Set process name, description and ID\n project = None\n project = project if product is None else product\n process_name = pn if process_name is None else process_name\n process_description = pd if process_description is None else process_description\n\n dna = copy.deepcopy(dna)\n def extract(dna, start, end, project=None): \n start_top = start[0] \n start_bottom = start[1] \n start = min(start)\n \n end_top = end[0]\n end_bottom = end[1] \n end = max(end)\n\n if start == 0 and end == len(dna.seq) and dna._topology == \"linear\":\n new_dna = copy.copy(dna)\n new_dna._topology = \"linear\"\n return new_dna\n\n if dna.topology == \"circular\":\n start = len(dna.seq) + start if start < 0 else start\n start = start - len(dna.seq) if start > len(dna.seq) else start\n end = end - len(dna.seq) if end > len(dna.seq) else end\n \n if (start >= end or (start_top == end_top and start_bottom == end_bottom)) and dna.topology == \"circular\":\n subdna1 = extract(dna, [start, start], [len(dna.seq), len(dna.seq)])\n if start == end and start == 0:\n subdna = subdna1\n else:\n subdna2 = extract(dna, [0,0], [end,end])\n subdna = joindna(subdna1, subdna2, quinable=0)\n else:\n if start > end and dna.topology == \"linear\":\n raise ValueError(\"'end' position must be larger than 'start' position.\")\n feats = []\n new_features = []\n \n #Linearize feature (Split feature covering zero position) \n for feat in dna.dnafeatures:\n strand = feat.strand\n s = feat.start\n e = feat.end\n if s > e:\n if \"_original\" not in feat.__dict__:\n feat._original = dna.printsequence(s, e, feat.location.strand if feat.location.strand !=0 else 1)\n \n if len(feat.location.parts) == 1:\n length = len(dna.seq) - s + e\n locations = [FeatureLocation(s,len(dna.seq)),FeatureLocation(0,e)]\n if strand == -1:\n locations.reverse()\n feat.location = CompoundLocation(locations)\n feat.location.strand = strand\n\n strand = feat.strand\n if len(feat.location.parts) == 2:\n feat1 = copy.deepcopy(feat)\n feat1.location = feat.location.parts[0]\n feat1.location.strand = feat.location.strand\n feat2 = copy.deepcopy(feat)\n feat2.location = feat.location.parts[1]\n feat2.location.strand = feat.location.strand\n\n else:\n feat1 = copy.deepcopy(feat)\n new_locations = []\n for part in feat1.location.parts:\n if part.start.position > part.end.postion:\n new_locations.append(FeatureLocation(part.start.position, len(dna.seq)))\n break\n else:\n new_locations.append(part)\n if strand == -1:\n new_locations.reverse()\n feat1.location = CompoundLocation(new_locations)\n feat1.location.strand = strand\n flag = 0\n feat2 = copy.deepcopy(feat)\n new_locations = []\n for part in feat1.location.parts:\n if part.start.position > part.end.postion:\n new_locations.append(FeatureLocation(0, part.end.position))\n flag = 1\n\n if flag == 1:\n new_locations.append(part)\n\n if strand == -1:\n new_locations.reverse()\n feat2.location = CompoundLocation(new_locations)\n feat2.location.strnad = strand\n\n if \"broken_feature\" not in feat1.qualifiers:\n label = feat1._id\n if feat1.feature_type == \"source\":\n original_seq = \"-\"\n else:\n original_seq = feat1.original\n \n if feat1.feature_type == \"CDS\" and \"translation\" in feat1.qualifiers:\n del feat1.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat1.original), original_seq, s, e))\n if strand >= 0:\n feat1.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, 1, len(dna.seq)-s)]\n else:\n feat1.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(dna.seq)-s, 1)]\n\n else:\n note = feat.qualifiers[\"broken_feature\"]\n note = note[0] if type(note) is list else note \n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\"))\n note = \"{}:{}..{}\".format(label, pos_s, pos_s + len(dna.seq)-s)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\"))\n note = \"{}:{}..{}\".format(label, pos_s, pos_s - (len(dna.seq)-s))\n feat1.qualifiers[\"broken_feature\"] = [note]\n\n if \"broken_feature\" not in feat2.qualifiers:\n label = feat2._id\n if feat2.feature_type == \"source\":\n original_seq = \"-\"\n else:\n original_seq = feat2.original\n \n if feat2.feature_type == \"CDS\" and \"translation\" in feat2.qualifiers:\n del feat2.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat2.original), original_seq, s, e))\n if strand >= 0:\n feat2.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(dna.seq)-s+1, len(dna.seq)-s+e)]\n else:\n feat2.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(dna.seq)-s+e, len(dna.seq)-s+1)]\n\n else:\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4])\n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\"))\n note = \"{}:{}..{}\".format(label, pos_s + len(dna.seq)-s, pos_e)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\"))\n note = \"{}:{}..{}\".format(label, pos_s - (len(dna.seq)-s), pos_e)\n feat2.qualifiers[\"broken_feature\"] = [note]\n \n new_features.append(feat.__class__(feature=feat1))\n new_features.append(feat.__class__(feature=feat2))\n \n else:\n #print(feat, start, end) \n new_features.append(feat.__class__(feature=feat))\n \n #Cropping\n for feat in new_features:\n strand = feat.strand\n s = feat.start\n e = feat.end\n feat = copy.deepcopy(feat)\n if len(feat.location.parts) == 1 and s <= e:\n if e > start and s < end:\n if \"_original\" not in feat.__dict__:\n feat._original = dna.printsequence(s, e, feat.location.strand if feat.location.strand !=0 else 1) \n if s - start < 0:\n feat.location.parts[0]._start = ExactPosition(0)\n if \"broken_feature\" not in feat.qualifiers:\n label = feat._id\n if feat.feature_type == \"source\" or len(feat.original) > 10000:\n original_seq = \"-\"\n else:\n original_seq = feat.original\n \n if feat.feature_type == \"CDS\" and \"translation\" in feat.qualifiers:\n del feat.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat.original), original_seq, s, e))\n if strand >= 0:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, abs(s-start)+1, e-s)] \n else:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(feat.original) - abs(s-start), 1)] \n else:\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\")) + abs(s-start) \n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_e)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\")) - abs(s-start) \n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_e)\n feat.qualifiers[\"broken_feature\"] = [note]\n else:\n feat.location.parts[0]._start = ExactPosition(s - start) \n \n feat.location.parts[-1]._end = ExactPosition(e - start) \n if feat.location.parts[-1]._end > end-start:\n feat.location.parts[-1]._end = ExactPosition(end - start)\n if \"broken_feature\" not in feat.qualifiers: \n label = feat._id\n if feat.feature_type == \"source\" or len(feat.original) > 10000:\n original_seq = \"-\"\n else:\n original_seq = feat.original\n \n if feat.feature_type == \"CDS\" and \"translation\" in feat.qualifiers:\n del feat.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat.original), original_seq, s, e))\n if strand >= 0: \n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, 1, end-s)]\n else:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(feat.original), len(feat.original)-(end-s)+1)]\n else:\n s = int(feat.location.parts[0].start.position)\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_s + (end-start-s)-1)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_s - (end-start-s)+1)\n feat.qualifiers[\"broken_feature\"] = [note]\n \n feat.location.strand = strand\n feats.append(feat.__class__(feature=feat))\n \n else:\n length = e-s\n locations = []\n sflag = 0 \n eflag = 0\n for apart in feat.location.parts:\n s = apart.start.position \n e = apart.end.position\n if e > start and s <= end:\n if \"_original\" not in feat.__dict__:\n feat._original = dna.printsequence(s, e, feat.location.strand if feat.location.strand !=0 else 1) \n _start = ExactPosition(s)\n if s - start <= 0:\n sflag = 1\n _end = ExactPosition(e) \n if _end > end-start:\n eflag = 1\n locations.append([_start,_end,feat.location.strand])\n \n if len(locations) > 0:\n s = int(locations[0][0])\n e = int(locations[-1][1])\n if s - start < 0 and sflag == 1:\n locations[0][0] = ExactPosition(0)\n if \"broken_feature\" not in feat.qualifiers:\n label = feat._id\n if feat.feature_type == \"source\" or len(feat.original) > 10000:\n original_seq = \"-\"\n else:\n original_seq = feat.original\n \n if feat.feature_type == \"CDS\" and \"translation\" in feat.qualifiers:\n del feat.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat.original), original_seq, s, e))\n if strand >= 0:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, abs(s-start)+1, e-s)]\n else:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, e-s, abs(s-start)+1)] \n else:\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\")) + abs(s-start) \n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_e)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\")) - abs(s-start) \n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_e)\n feat.qualifiers[\"broken_feature\"] = [note]\n else:\n locations[0][0] = ExactPosition(s - start)\n \n if e > end-start and eflag == 1:\n locations[-1][1] = ExactPosition(end-start)\n if \"broken_feature\" not in feat.qualifiers:\n label = feat._id \n if feat.feature_type == \"source\" or len(feat.original) > 10000:\n original_seq = \"-\"\n else:\n original_seq = feat.original\n \n if feat.feature_type == \"CDS\" and \"translation\" in feat.qualifiers:\n del feat.qualifiers[\"translation\"]\n\n label = \"[{}]\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat.original), original_seq, s, e))\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, 1, end-s)]\n else:\n s = int(locations[0][0])\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_s + (end-start-s)-1)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_s - (end-start-s)+1)\n feat.qualifiers[\"broken_feature\"] = [note]\n else:\n locations[-1][1] = ExactPosition(e - start)\n \n if len(locations) == 1:\n feat.location = FeatureLocation(*locations[0])\n else:\n for l in range(len(locations)):\n if l == 0:\n locations[l][1] = locations[l][1] - start\n elif l == len(locations) - 1:\n locations[l][0] = locations[l][0] - start\n else:\n locations[l][0] = locations[l][0] - start\n locations[l][1] = locations[l][1] - start\n locations = [FeatureLocation(*loc) for loc in locations] \n if strand == -1:\n locations.reverse()\n feat.location = CompoundLocation(locations)\n feats.append(feat.__class__(feature=feat))\n \n feats.sort(key=lambda x:(x.location.parts[0].start.position, x.location.parts[-1].end.position))\n subdna = dna.__class__(seq=str(dna.seq[start:end]), quinable=0)\n subdna._history_feature = copy.deepcopy(dna._history_feature) \n subdna._dnafeatures = feats\n subdna._topology = \"linear\"\n \n if start < len(dna._left_end) and dna.topology == \"linear\":\n subdna._left_end = dna._left_end[start:] \n subdna._left_end_top = dna._left_end_top\n subdna._left_end_bottom = dna._left_end_bottom\n else:\n subdna._left_end = subdna.seq[0:20] \n subdna._left_end_top = 1\n subdna._left_end_bottom = 1\n \n if len(dna.seq) - end < len(dna._right_end) and dna.topology == \"linear\":\n subdna._right_end = dna._right_end[:len(dna._right_end) - (len(dna.seq) - end)]\n subdna._right_end_top = dna._right_end_top\n subdna._right_end_bottom = dna._right_end_bottom\n \n else:\n subdna._right_end = subdna.seq[-20:]\n subdna._right_end_top = 1\n subdna._right_end_bottom = 1\n\n subdna.record.annotations[\"topology\"] = subdna.topology\n subdna.record.features = subdna.dnafeatures\n \n if start_top != start_bottom or end_top != end_bottom:\n start_dif = start_top - start_bottom\n if start_dif > 0:\n left = \"-\" * start_dif + \"/\" + \"*\" * start_dif\n elif start_dif < 0:\n left = \"*\" * abs(start_dif) + \"/\" + \"-\" * abs(start_dif) \n else:\n left = \"\"\n \n end_dif = end_top - end_bottom\n if end_dif > 0:\n right = \"*\" * end_dif + \"/\" + \"-\" * end_dif \n elif end_dif < 0:\n right = \"-\" * abs(end_dif) + \"/\" + \"*\" * abs(end_dif) \n else:\n right = \"\"\n subdna = modifyends(subdna, left, right, quinable=0)\n else:\n pass \n \n for dnafeature in subdna.dnafeatures:\n dnafeature.subject = subdna \n \n if start >= end:\n subdna._positions = dna._positions[start:] + dna._positions[:end]\n else:\n subdna._positions = dna._positions[start:end] \n\n return subdna \n \n dnas = [] \n new_positions = [] \n for pos in cutsites:\n if type(pos) is str:\n pos = tuple(map(int,pos.split(\"/\")))\n spos, epos = pos\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos)) \n \n elif type(pos) is int or (\"__dict__\" in dir(pos) and \"_qint\" in pos.__dict__):\n pos = (pos, pos) \n spos, epos = pos\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos)) \n \n elif type(pos) is SeqFeature or (\"__dict__\" in dir(pos) and \"_dnafeature\" in pos.__dict__):\n strand = pos.location.strand\n if \"cutsite\" not in pos.qualifiers:\n raise ValueError(\"DNAfeature object should hold 'qualifiers:cutsite' attribute.\")\n \n if pos._digestion_topl == \"null\":\n _, _, pos._digestion_topl, pos._digestion_topr, pos._digestion_bottoml, pos._digestion_bottomr = compile_cutsite(pos.qualifiers[\"cutsite\"][0])\n \n if strand != -1:\n if pos._digestion_topl != \"null\":\n spos = pos.start - pos._digestion_topl\n epos = pos.start - pos._digestion_bottoml \n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos))\n \n elif pos._digestion_topr != \"null\": \n spos = pos.end + pos._digestion_topr\n epos = pos.end + pos._digestion_bottomr\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos))\n else:\n if pos._digestion_topr != \"null\":\n spos = pos.start - pos._digestion_bottomr\n epos = pos.start - pos._digestion_topr\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos))\n \n elif pos._digestion_topl != \"null\": \n spos = pos.end + pos._digestion_bottoml\n epos = pos.end + pos._digestion_topl\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos))\n \n tmp_positions = new_positions[:]\n tmp_positions.sort() \n top_positions = list(list(zip(*tmp_positions))[0])\n bottom_positions = list(list(zip(*tmp_positions))[1])\n for b in range(len(bottom_positions)-1):\n if bottom_positions[b] <= bottom_positions[b+1]:\n pass\n else:\n raise ValueError(\"Invalid cut pattern.\")\n\n new_positions_original = new_positions[:] \n new_positions_original = [\"{}/{}\".format(*posset) for posset in new_positions_original]\n if crop == True:\n crop_positions = (new_positions[0], new_positions[1])\n \n if dna.topology == \"linear\":\n if (0,0) not in new_positions:\n new_positions = [(0,0)] + new_positions \n else:\n pass\n \n if (len(dna.seq),len(dna.seq)) not in new_positions:\n new_positions = new_positions + [(len(dna.seq), len(dna.seq))] \n new_positions = list(new_positions) \n new_positions.sort() \n \n elif dna.topology == \"circular\":\n new_positions = list(new_positions) \n tmp_positions = new_positions[:]\n new_positions.sort() \n for pindex, pos in enumerate(new_positions):\n if pos == tmp_positions[0]:\n new_positions = new_positions[pindex:] + new_positions[:pindex]\n break \n\n if dna.topology == \"linear\":\n if crop == True:\n dnas.append(extract(dna, crop_positions[0], crop_positions[1], project=project))\n else:\n for i, pos in enumerate(new_positions[0:-1]):\n dnas.append(extract(dna, pos, new_positions[i+1], project=project))\n \n elif dna.topology == \"circular\":\n if crop == True: \n dnas.append(extract(dna, crop_positions[0], crop_positions[1], project=project))\n else:\n for i, pos in enumerate(new_positions[0:-1]):\n dnas.append(extract(dna, pos, new_positions[i+1], project=project))\n if new_positions[0] == (0,0):\n dnas.append(extract(dna, new_positions[-1], (len(dna.seq), len(dna.seq)), project=project)) \n else:\n dnas.append(extract(dna, new_positions[-1], new_positions[0], project=project)) \n\n if project is None:\n for subdna in dnas:\n subdna._unique_id = dna._unique_id\n else:\n for subdna in dnas:\n subdna._unique_id = project\n \n if quinable == True:\n products = []\n dna_keys = list(dnas[0].__class__.dna_dict.keys())\n for i in range(len(dnas)):\n dnas[i]._product_id = dnas[i]._unique_id if product is None else product \n products.append(\"QUEEN.dna_dict['{}']\".format(dnas[i]._product_id))\n\n args = [] \n history_features = [dnas[0]._history_feature] \n for pos in cutsites:\n if \"__dict__\" in dir(pos) and \"_dnafeature\" in pos.__dict__:\n qkey = pos._qkey\n for qindex, qfeat in enumerate(dnas[0].__class__.queried_features_dict[qkey]):\n if qfeat._second_id == pos._second_id:\n break\n args.append(\"QUEEN.queried_features_dict['{}'][{}]\".format(qkey, qindex))\n history_features.append(pos.subject._history_feature) \n\n elif \"__dict__\" in dir(pos) and \"_qint\" in pos.__dict__:\n qkey = pos.qkey\n for qindex, qfeat in enumerate(dnas[0].__class__.queried_features_dict[qkey]):\n if qfeat._second_id == pos.parental_id:\n break\n args.append(\"QUEEN.queried_features_dict['{}'][{}].{}\".format(pos.qkey, qindex, pos.name))\n history_features.append(pos.parent.subject._history_feature) \n\n else:\n if type(pos) is int:\n args.append(str(pos))\n else:\n args.append(\"'\" + str(pos) + \"'\")\n \n \n if type(supfeature) in (tuple, list) and type(supfeature[0]) in (tuple, list) and type(supfeature[0][0]) == dict:\n for i, feature_dict_list in enumerate(supfeature): \n for feature_dict in feature_dict_list:\n dnas[i].setfeature(feature_dict) \n \n elif type(supfeature) in (tuple, list) and type(supfeature[0]) == dict:\n for i, feature_dict in enumerate(supfeature): \n dnas[i].setfeature(feature_dict) \n\n elif type(supfeature) == dict:\n dnas[0].setfeature(supfeature)\n\n if crop == True:\n fcrop = \", crop=True\"\n else:\n fcrop = \"\" \n \n project = \"\" \n fsupfeature = \"\" if supfeature == False else \", supfeature={}\".format(str(supfeature))\n fproduct = \"\" if product is None else \", product='\" + product + \"'\"\n process_name = \"\" if process_name is None else \", process_name='\" + process_name + \"'\"\n process_description = \"\" if process_description is None else \", process_description='\" + process_description + \"'\" \n \n if len(products) > 1:\n building_history = \"{} = cutdna(QUEEN.dna_dict['{}'], {}{}{}{}{}{}{})\".format(\", \".join(products), dna._product_id, \", \".join(args), fcrop, fsupfeature, project, fproduct, process_name, process_description) \n else:\n building_history = \"{}, = cutdna(QUEEN.dna_dict['{}'], {}{}{}{}{}{}{})\".format(\", \".join(products), dna._product_id, \", \".join(args), fcrop, project, fsupfeature, fproduct, process_name, process_description)\n \n for subdna in dnas:\n history_feature = _combine_history(subdna, history_features)\n subdna._history_feature = history_feature\n process_id, original_ids = make_processid(subdna, building_history, process_id, original_ids)\n subdna._check_uniqueness() \n add_history(subdna, [building_history, \"positions: {}\".format(\",\".join(list(map(str, new_positions_original)))) + \"; num_products: {}\".format(len(dnas)), \",\".join([process_id] + original_ids)], _sourcefile)\n else:\n for subdna in dnas:\n subdna.__dict__[\"_product_id\"] = dna._product_id if \"_product_id\" in dna.__dict__ else dna._unique_id\n \n if product is None:\n pass \n else:\n product = product.replace(\" \",\"\") \n if \",\" in product:\n for name, subdna in zip(product.split(\",\"), dnas):\n dnas[0].__class__._namespace[name] = subdna\n else: \n dnas[0].__class__._namespace[product] = dnas\n\n if crop == True:\n return dnas[0], crop_positions \n else:\n return dnas", "def main(configuration_path, input_path, output_path, chunksize, verbose):\n log = setup_logging(verbose=verbose)\n\n with open(configuration_path) as f:\n config = yaml.load(f)\n\n selection = config.get(\"selection\", None)\n data_format = config.get(\"data_format\", \"simple\")\n\n if not selection:\n log.info(\"No entries for selection cuts. Just copying files.\")\n copyfile(input_path, output_path)\n log.info(\"Copying finished\")\n return\n\n log.info(data_format)\n if data_format == \"simple\":\n key = config.get(\"events_key\", \"events\")\n n_events = get_number_of_rows_in_table(input_path, key=key)\n if chunksize is None:\n chunksize = n_events + 1\n apply_cuts_h5py_chunked(\n input_path, output_path, selection, chunksize=chunksize, key=key\n )\n n_events_after = get_number_of_rows_in_table(output_path, key=key)\n remaining = n_events_after / n_events\n log.info(f\"Events in file before cuts {n_events}\")\n log.info(\n f\"Events in new file after cuts {n_events_after}. That is {remaining:.2%}\"\n )\n copy_group(input_path, output_path, \"runs\")\n elif data_format == \"CTA\":\n keep_images = config.get(\"keep_images\", True)\n n_before, n_after = apply_cuts_cta_dl1(\n input_path,\n output_path,\n selection,\n keep_images,\n )\n\n log.info(f\"Telescope-events in file before cuts {n_before}\")\n log.info(\n f\"Telescope-events in new file after cuts {n_after}. \"\n f\"That is {(n_after/n_before):.2%}\"\n )", "def fold(self) -> np.ndarray:\n tiles = self.get_tiles()\n return self.tiles_to_volume(tiles)", "def getCutout(stack, sliceInd, xstart, ystart, deltaX, deltaY, scale, minIntensity, maxIntensity, mod):\n \n img = renderapi.image.get_bb_image(\n stack, sliceInd, xstart, ystart, deltaX, deltaY, scale, minIntensity, maxIntensity, render=mod.render)\n\n img = img[:, :, 0]\n return img", "def switch_cut_cor(self):\n if self.cut_cor == 41:\n self.cut_cor = 42\n elif self.cut_cor == 42:\n self.cut_cor = 41", "def triple_cut(deck):\n \n \n big = get_big_joker_value(deck)\n small = get_small_joker_value(deck)\n if deck.index(big) < deck.index(small):\n first_j = deck.index(big) \n second_j = deck.index(small)\n elif deck.index(small) < deck.index(big):\n first_j = deck.index(small)\n second_j = deck.index(big)\n above_first_j = deck[:first_j]\n under_second_j = deck[second_j+1:]\n middle = deck[first_j:second_j + 1]\n deck[:] = under_second_j + middle + above_first_j", "def clConvolution(self, size, mask):", "def run_cut(self, expanded, unexpanded) :\n\t\tif not expanded :\n\t\t\treturn self.errormessage(\"Needs some objects ids to cut\")\n\t\tif not self.HasPerms(self.__context, 'View management screens') :\n\t\t\treturn -1\n\t\tstatus = 0\n\t\tobjids = []\n\t\tfor objid in expanded :\n\t\t\tif '/' in objid :\n\t\t\t\tstatus = status + self.errormessage('Paths for objects ids are not allowed at this time: %s' % objid)\n\t\t\telse :\n\t\t\t\tobjids.append(objid)\n\t\ttry :\n\t\t\tself._clipboard = self.__context.manage_cutObjects(ids = objids)\n\t\t\tfor objid in objids :\n\t\t\t\tself.htmlmessage('%s cut to clipboard' % objid)\n\t\texcept AttributeError, msg :\n\t\t\tstatus = status + self.errormessage(\"Object %s doesn't exist\" % msg)\n\t\treturn status", "def make_cutouts(catalogname, imagename, image_label, apply_rotation=False,\n table_format='ascii.ecsv', image_ext=0, clobber=False,\n verbose=True):\n # Optional dependencies...\n from reproject import reproject_interp\n\n table = QTable.read(catalogname, format=table_format)\n\n with fits.open(imagename) as pf:\n data = pf[image_ext].data\n wcs = WCS(pf[image_ext].header)\n\n # It is more efficient to operate on an entire column at once.\n c = SkyCoord(table['ra'], table['dec'])\n x = (table['cutout_x_size'] / table['spatial_pixel_scale']).value # pix\n y = (table['cutout_y_size'] / table['spatial_pixel_scale']).value # pix\n pscl = table['spatial_pixel_scale'].to(u.deg / u.pix)\n\n # Do not rotate if column is missing.\n if 'cutout_pa' not in table.colnames:\n apply_rotation = False\n\n # Sub-directory, relative to working directory.\n path = '{0}_cutouts'.format(image_label)\n if not os.path.exists(path):\n os.mkdir(path)\n\n cutcls = partial(Cutout2D, data, wcs=wcs, mode='partial')\n\n for position, x_pix, y_pix, pix_scl, row in zip(c, x, y, pscl, table):\n\n if apply_rotation:\n pix_rot = row['cutout_pa'].to(u.degree).value\n\n cutout_wcs = WCS(naxis=2)\n cutout_wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n cutout_wcs.wcs.crval = [position.ra.deg, position.dec.deg]\n cutout_wcs.wcs.crpix = [(x_pix - 1) * 0.5, (y_pix - 1) * 0.5]\n\n try:\n cutout_wcs.wcs.cd = wcs.wcs.cd\n cutout_wcs.rotateCD(-pix_rot)\n except AttributeError:\n cutout_wcs.wcs.cdelt = wcs.wcs.cdelt\n cutout_wcs.wcs.crota = [0, -pix_rot]\n\n cutout_hdr = cutout_wcs.to_header()\n\n try:\n cutout_arr = reproject_interp(\n (data, wcs), cutout_hdr,\n shape_out=(math.floor(y_pix + math.copysign(0.5, y_pix)),\n math.floor(x_pix + math.copysign(0.5, x_pix))),\n order=2)\n except Exception:\n if verbose:\n log.info('reproject failed: '\n 'Skipping {0}'.format(row['id']))\n continue\n\n cutout_arr = cutout_arr[0] # Ignore footprint\n cutout_hdr['OBJ_ROT'] = (pix_rot, 'Cutout rotation in degrees')\n\n else:\n try:\n cutout = cutcls(position, size=(y_pix, x_pix))\n except NoConvergence:\n if verbose:\n log.info('WCS solution did not converge: '\n 'Skipping {0}'.format(row['id']))\n continue\n except NoOverlapError:\n if verbose:\n log.info('Cutout is not on image: '\n 'Skipping {0}'.format(row['id']))\n continue\n else:\n cutout_hdr = cutout.wcs.to_header()\n cutout_arr = cutout.data\n\n if np.array_equiv(cutout_arr, 0):\n if verbose:\n log.info('No data in cutout: Skipping {0}'.format(row['id']))\n continue\n\n fname = os.path.join(\n path, '{0}_{1}_cutout.fits'.format(row['id'], image_label))\n\n # Construct FITS HDU.\n hdu = fits.PrimaryHDU(cutout_arr)\n hdu.header.update(cutout_hdr)\n hdu.header['OBJ_RA'] = (position.ra.deg, 'Cutout object RA in deg')\n hdu.header['OBJ_DEC'] = (position.dec.deg, 'Cutout object DEC in deg')\n\n hdu.writeto(fname, clobber=clobber)\n\n if verbose:\n log.info('Wrote {0}'.format(fname))", "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n range_answers=args['cut_finding']['range_answers'],\n n_bins=args['cut_finding']['n_bins'])\n return Cuts(values=values, names=names)\n\n if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:\n\n values = kernighan_lin(A=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.kmodes:\n\n values = find_kmodes_cuts(xs=data.xs,\n max_nb_clusters=args['cut_finding']['max_nb_clusters'])\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:\n\n values = fid_mat(xs=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.linear:\n\n values, equations = linear_cuts(xs=data.xs,\n equations=args['cut_finding']['equations'],\n verbose=verbose)\n\n return Cuts(values=values, equations=equations)\n\n raise ValueError('Wrong name for a cut finding function')", "def cutwire(self, irc, msg, args, channel, cutWire):\n channel = ircutils.toLower(channel)\n try:\n if not self.bombs[channel].active:\n return\n if not ircutils.nickEqual(self.bombs[channel].victim, msg.nick):\n irc.reply('You can\\'t cut the wire on someone else\\'s bomb!')\n return\n self.bombs[channel].cutwire(irc, cutWire)\n except KeyError:\n pass\n irc.noReply()", "def fx(x, y):\n # Check bounds.\n x[ x < 0 ] = 0.\n y[ y < 0 ] = 0.\n\n x[ x > img.shape[1]-1 ] = img.shape[1]-1\n y[ y > img.shape[0]-1 ] = img.shape[0]-1\n\n return ggmix[ (y.round().astype(int), x.round().astype(int)) ]", "def cut_and_splice_clips(movie_info, \n clip_window, \n clip_window_origin,\n peak_thresh=0.00, \n divider_clip=None):\n\n\n clip_list_arr, clips_to_delete = generate_clips(movie_info, clip_window, \n clip_window_origin, peak_thresh, divider_clip)\n\n output_file = movie_info['output_file']\n list_file = write_list_file(output_file, clip_list_arr)\n splice_clips(list_file, output_file)\n move_clips_to_folder(clips_to_delete, movie_info['output_file'])", "def quality_cut(df): \n\n quality_cut_components_columns = [\n 'glitch_time_cut',\n 'maintenance_cut',\n 'reset_cut',\n 'offset_ion_cut',\n 'chi2_heat_cut',\n 'chi2_ion_cut',\n ]\n \n truth_array = pd.Series(data=True, index=df.index)\n for col in quality_cut_components_columns:\n truth_array = truth_array & df[col]\n \n df['quality_cut'] = truth_array\n\n return None", "def CutEdge(self, *args):\n return _BRepAlgo.BRepAlgo_Loop_CutEdge(self, *args)", "def _optimize_split_mixture(self, y, responsibility, component_index):\n U, S, V = _svd(self.covariance[component_index], self.covariance_type)\n\n split_mean = self.mean[component_index] \\\n + np.vstack([+V[0], -V[0]]) * S[0]**0.5\n\n # Responsibilities are initialized by allocating the data points to \n # the closest of the two means.\n distance = np.sum((y[:, :, None] - split_mean.T)**2, axis=1).T\n\n N, D = y.shape\n split_responsibility = np.zeros((2, N))\n split_responsibility[np.argmin(distance, axis=0), np.arange(N)] = 1.0\n\n # Calculate the child covariance matrices.\n split_covariance = _estimate_covariance_matrix(\n y, split_responsibility, split_mean,\n self.covariance_type, self.covariance_regularization)\n\n split_effective_membership = np.sum(split_responsibility, axis=1) \n split_weight = split_effective_membership.T \\\n / np.sum(split_effective_membership)\n\n # Integrate the split components with the existing mixture.\n parent_weight = self.weight[component_index]\n parent_responsibility = responsibility[component_index]\n\n mixture = self.__class__(\n threshold=self.threshold,\n covariance_type=self.covariance_type,\n max_em_iterations=self.max_em_iterations,\n covariance_regularization=self.covariance_regularization)\n\n # Initialize it.\n mixture.set_parameters(mean=split_mean, weight=split_weight,\n covariance=split_covariance)\n\n # Run E-M on the partial mixture.\n R, meta = mixture._expectation_maximization(\n y, parent_responsibility=responsibility[component_index])\n\n if self.weight.size > 1:\n # Integrate the partial mixture with the full mixture.\n weight = np.hstack([self.weight, \n [parent_weight * mixture.weight[1]]])\n weight[component_index] = parent_weight * mixture.weight[0]\n\n mean = np.vstack([self.mean, [mixture.mean[1]]])\n mean[component_index] = mixture.mean[0]\n\n covariance = np.vstack([self.covariance, [mixture.covariance[1]]])\n covariance[component_index] = mixture.covariance[0]\n\n responsibility = np.vstack([responsibility,\n [parent_responsibility * R[1]]])\n responsibility[component_index] = parent_responsibility * R[0]\n\n mixture.set_parameters(\n mean=mean, covariance=covariance, weight=weight)\n\n R, meta = mixture._expectation_maximization(\n y, responsibility=responsibility)\n\n # Store the mixture.\n slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))\n self._proposed_mixtures.append(mixture)\n self._mixture_predictors.append([\n mixture.weight.size,\n np.sum(np.log(mixture.weight)),\n meta[\"log_likelihood\"],\n slogdet,\n -meta[\"log_likelihood\"] + (D+2)/2.0 * slogdet\n ])\n # TODO: Remove predictors that we don't use.\n #self._slogs.append(np.linalg.det(mixture.covariance))\n\n return (len(self._proposed_mixtures) - 1, R, meta)\n\n # Run\n\n kwds = dict(\n threshold=self._threshold,\n max_em_iterations=self._max_em_iterations,\n covariance_type=self._covariance_type,\n covariance_regularization=self._covariance_regularization)\n\n # Run E-M on the split mixture, keeping all else fixed.\n #(dict(mean=mu, covariance=cov, weight=weight), responsibility, meta, dl)\n params, R, meta, dl = _expectation_maximization(y, split_mean, split_covariance,\n split_weight, responsibility=split_responsibility,\n parent_responsibility=parent_responsibility,\n **kwds)\n\n\n if self.weight.size > 1:\n\n # Integrate the child mixtures back.\n weight = np.hstack([self.weight, [parent_weight * params[\"weight\"][1]]])\n weight[component_index] = parent_weight * params[\"weight\"][0]\n\n mean = np.vstack([self.mean, [params[\"mean\"][1]]])\n mean[component_index] = params[\"mean\"][0]\n\n covariance = np.vstack([self.covariance, [params[\"covariance\"][1]]])\n covariance[component_index] = params[\"covariance\"][0]\n\n responsibility = np.vstack([responsibility, \n [parent_responsibility * R[1]]])\n responsibility[component_index] \\\n = parent_responsibility * R[0]\n\n return _expectation_maximization(y, mean, covariance, weight,\n responsibility=responsibility, **kwds)\n\n\n else:\n return (params, R, meta, dl)", "def cosh(x):\n raise NotImplementedError", "def make_chopper(\n frequency: sc.Variable,\n position: sc.Variable,\n phase: sc.Variable = None,\n cutout_angles_center: sc.Variable = None,\n cutout_angles_width: sc.Variable = None,\n cutout_angles_begin: sc.Variable = None,\n cutout_angles_end: sc.Variable = None,\n kind: str = None,\n) -> sc.Dataset:\n data = {\"frequency\": frequency, \"position\": position}\n if phase is not None:\n data[\"phase\"] = phase\n if cutout_angles_center is not None:\n data[\"cutout_angles_center\"] = cutout_angles_center\n if cutout_angles_width is not None:\n data[\"cutout_angles_width\"] = cutout_angles_width\n if cutout_angles_begin is not None:\n data[\"cutout_angles_begin\"] = cutout_angles_begin\n if cutout_angles_end is not None:\n data[\"cutout_angles_end\"] = cutout_angles_end\n if kind is not None:\n data[\"kind\"] = kind\n chopper = sc.Dataset(data=data)\n\n # Sanitize input parameters\n if (None not in [cutout_angles_begin, cutout_angles_end]) or (\n None not in [cutout_angles_center, cutout_angles_width]\n ):\n widths = utils.cutout_angles_width(chopper)\n if (sc.min(widths) < sc.scalar(0.0, unit=widths.unit)).value:\n raise ValueError(\"Negative window width found in chopper cutout angles.\")\n if not sc.allsorted(utils.cutout_angles_begin(chopper), dim=widths.dim):\n raise ValueError(\"Chopper begin cutout angles are not monotonic.\")\n if not sc.allsorted(utils.cutout_angles_end(chopper), dim=widths.dim):\n raise ValueError(\"Chopper end cutout angles are not monotonic.\")\n\n return chopper", "def getPrescaleFromCut(cut):\n sign = -1 if cut<0 else 1\n ucut = abs(cut)\n return (sign*0xFFFFFF ) / float( 0x1000000 - ucut )", "def cut_bytes(lines=[],nfrom=None,nto=None,complement=0):\n return cut_by(\"-b\",lines,nfrom,nto,complement)", "def onAddCutToolClicked(self, event):\n i_cube = self.cube_choice.GetSelection()\n i_dimension = self.cut_dimension_choice.GetSelection()\n\n if i_dimension <= 0:\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut dimension not selected'))\n else:\n value = self.cut_value_textCtrl.GetValue()\n if not value.strip():\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut value not specified'))\n else:\n cube = self._OLAP_server.getCubes()[i_cube]\n dimension = cube.getDimensions()[i_dimension - 1]\n row = (dimension.getLabel(), dimension.getName(), value)\n self.appendListCtrlRow(listctrl=self.cut_listCtrl, row=row)\n\n # After adding, clear the controls\n self.cut_dimension_choice.SetSelection(0)\n self.cut_value_textCtrl.SetValue(u'')\n\n event.Skip()", "def reduce_recipe(self):\n\n self.recipe.reduce(self.crafting, self.crafting_stride)", "def cut_eval(self, hits, *args):\n end = self.start_offset + self.train_window + self.predict_window\n return self.cut(hits, self.start_offset, end) + args", "def cutting_characters(character, image_2cut):\n\n preparing = []\n m = len(character)\n image_2cut = image_2cut.copy()\n\n for n in character:\n\n # The information is extracted from the the tupla n in character list.\n # For more information about this coordinates check the Bounding Rectangle function resources\n ulc_X = n[0]\n ulc_Y = n[1]\n\n width = n[2]\n height = n[3]\n\n #There is asigned new name to the above information and is constructed the rectangle.\n start_x = int(ulc_X)\n start_y = int(ulc_Y)\n\n width_new = int(width)\n height_new = int(height)\n\n\n final_x = start_x + width_new\n final_y = start_y + height_new\n\n # A width and height outter value is placed that allow a prudential margin of the principal content.\n width_outer = 25\n height_outer = 45\n\n\n #Then the rectangle is constructed with these outter width and heigt and the x and y coordinate are displaced too.\n x_outer = int(ulc_X) - 4\n y_outer = int(ulc_Y) - 6\n\n outer_xf = x_outer + width_outer\n outer_yf = y_outer + height_outer\n\n # Both rectangles are cutted by image_2cut\n\n rec_char_outer = image_2cut[y_outer:outer_yf, x_outer:outer_xf]\n\n rec_char_inter = image_2cut[start_y:final_y, start_x: final_x]\n\n # Imperfections are corrected and filling with white color by filling_white\n\n prep = filling_white(rec_char_outer, rec_char_inter)\n\n prep, _= resizing(prep, prep, 15)\n\n preparing.append(prep)\n\n return preparing", "def cutflow(self, *names):\n for cut in names:\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n\n masksonecut, maskscutflow = [], []\n for i, cut in enumerate(names):\n mask1 = self.any(cut)\n mask2 = self.all(*(names[: i + 1]))\n masksonecut.append(mask1)\n maskscutflow.append(mask2)\n\n if not self.delayed_mode:\n nevonecut = [len(self._data)]\n nevcutflow = [len(self._data)]\n nevonecut.extend(numpy.sum(masksonecut, axis=1))\n nevcutflow.extend(numpy.sum(maskscutflow, axis=1))\n\n else:\n nevonecut = [dask_awkward.count(self._data, axis=0)]\n nevcutflow = [dask_awkward.count(self._data, axis=0)]\n nevonecut.extend([dask_awkward.sum(mask1) for mask1 in masksonecut])\n nevcutflow.extend([dask_awkward.sum(mask2) for mask2 in maskscutflow])\n\n return Cutflow(\n names, nevonecut, nevcutflow, masksonecut, maskscutflow, self.delayed_mode\n )", "def cut_neck(template, r0, c0, r1, c1):\n # rr, cc, _ = line_aa(r0, c0, r1, c1)\n rr, cc = line(r0, c0, r1, c1)\n template[rr, cc] = 0\n return template", "def __reduce__(self):\n return ImageNetDownsample, (self.cutout,)", "def cut_characters(lines=[],nfrom=None,nto=None,complement=0):\n return cut_by(\"-c\",lines,nfrom,nto,complement)", "def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()", "def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()", "def cut_wavs(src, tgt, start, end):\n existed = os.path.exists(tgt)\n cmd = (\"sox\", \"--ignore-length\", src, \"-c 1 -r 16000 -b 16\", tgt, \"trim\", str(start), str(end - start))\n print u\" \".join(cmd)\n os.system(u\" \".join(cmd))\n return existed", "def heat_chi2_cut(stream, df):\n heat_chi2_threshold = quality_parameters[stream]['heat_chi2_threshold']\n chi2_heat = df['chi2_heat']\n energy_adu_heat = df['energy_adu_heat']\n \n # chi2_threshold = heat_chi2_threshold * ( 1 + (energy_adu_heat/2e3)**2 )\n chi2_threshold = heat_chi2_threshold_function(\n heat_chi2_threshold,\n energy_adu_heat\n )\n \n df['chi2_heat_cut'] = (chi2_heat < chi2_threshold)\n \n return None", "def _sample_binary_mixtures(model, steps, dtype=np.uint):\n mixture_size = model.parameters['fixed_mixture_size']\n \n if not model.is_correlated_mixture and mixture_size is None:\n # use simple monte carlo algorithm\n prob_s = model.substrate_probabilities\n \n for _ in range(int(steps)):\n # choose a mixture vector according to substrate probabilities\n yield (np.random.random(model.Ns) < prob_s).astype(dtype)\n\n elif mixture_size is None:\n # go through all mixtures and don't keep the size constant\n\n # use metropolis algorithm\n hi = model.commonness\n Jij = model.correlations\n \n # start with a random concentration vector \n c = np.random.randint(0, 2, model.Ns).astype(dtype)\n E_last = -np.dot(np.dot(Jij, c) + hi, c)\n \n for _ in range(int(steps)):\n i = random.randrange(model.Ns)\n c[i] = 1 - c[i] #< switch the entry\n Ei = -np.dot(np.dot(Jij, c) + hi, c)\n if Ei < E_last or random.random() < np.exp(E_last - Ei):\n # accept the new state\n E_last = Ei\n else:\n # reject the new state and revert to the last one\n c[i] = 1 - c[i]\n \n yield c\n \n elif mixture_size == 0:\n # special case which is not covered by the iteration below\n c_zero = np.zeros(model.Ns, dtype)\n for _ in range(model._sample_steps):\n yield c_zero\n\n elif mixture_size == model.Ns:\n # special case which is not covered by the iteration below\n c_ones = np.ones(model.Ns, dtype)\n for _ in range(steps):\n yield c_ones\n \n else:\n # go through mixtures with keeping their size constant\n\n # use metropolis algorithm\n hi = model.commonness\n Jij = model.correlations\n\n # create random concentration vector with fixed substrate count\n c = np.r_[np.ones(mixture_size, dtype),\n np.zeros(model.Ns - mixture_size, dtype)]\n np.random.shuffle(c)\n E_last = -np.dot(np.dot(Jij, c) + hi, c)\n \n for _ in range(int(steps)):\n # find the next mixture by swapping two items\n i0 = random.choice(np.flatnonzero(c == 0)) #< find 0\n i1 = random.choice(np.flatnonzero(c)) #< find 1\n c[i0], c[i1] = 1, 0 #< swap entries\n Ei = -np.dot(np.dot(Jij, c) + hi, c)\n if Ei < E_last or random.random() < np.exp(E_last - Ei):\n # accept the new state\n E_last = Ei\n else:\n # reject the new state and revert to the last one\n c[i0], c[i1] = 0, 1\n \n yield c", "def cut_image_psf(self, x, y, r_cut):\n image_cutted = self.image[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n return image_cutted", "def noiseReduction(self):\n pass", "def __call__(self, img: Image):\n if self.K <= 1:\n return self.transform(img)\n else:\n return [self.transform(img) for _ in range(self.K)]", "def mycut(value, arg):\r\n return value.replace(arg, '')", "def ShowOneContourCutBKG(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-100\n YMAX=100\n \n figname='contourCutBKG_{}_{}.pdf'.format(all_filt[index],index)\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-10:y0+10,:]=0\n reduc_image=full_image[y0+YMIN:y0+YMAX,x0:spec_index_max]/all_expo[index]\n \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n cs=plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image ,50, colors='white', linewidth=.001,origin='lower') \n \n \n cbar = plt.colorbar(cs) \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX*0.8,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def sharpen_bands(self):\n for label in self.labels:\n self.sharp_bands[label] = self.bands[label] - self.gauss_bands[\n label]", "def categoryFinder(bassSize):\n from string import Template\n catCollapse = {}\n sliceCount = {}\n theImportantSlices = []\n skippedThings = 0\n corpusSize = 0\n binnedThings = 0\n supersettedThings = 0\n #Load the pickled slices that have not been bass-normalized into types\n theSlices = pickle.load( open ('1122MajModeSliceDictwSDB.pkl', 'rb') )\n for i, slicey in enumerate(theSlices):\n if slicey == ['start'] or slicey == ['end']:\n continue\n #keep count of the total number of slices before reduction\n corpusSize += 1\n if theSlices[i+1] == ['end']:\n continue\n #First, deal with singletons of bass motion 0\n if len(slicey['voicing_type']) == 1 and theSlices[i]['bassMIDI'] - theSlices[i+1]['bassMIDI'] == 0:\n skippedThings += 1\n continue\n #Next, only look at cases where |bass motion| > bassSize\n if abs(theSlices[i+1]['bassMIDI'] - theSlices[i]['bassMIDI']) > bassSize:\n secondSlicePCs = []\n theKey = theSlices[i+1]['key']\n theTonic = str(theKey).split(' ')[0]\n theKeyPC = pitch.Pitch(theTonic).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in theSlices[i+1]['pcset']]\n for n in keyTransPCs:\n secondSlicePCs.append(n)\n firstSlicePCs = []\n theKey = theSlices[i]['key']\n theTonic = str(theKey).split(' ')[0]\n theKeyPC = pitch.Pitch(theTonic).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in theSlices[i]['pcset']]\n for m in keyTransPCs:\n firstSlicePCs.append(m)\n #make sure second thing is superset of first thing\n continueIfZero = 0\n #even one note wrong means no!\n for n in firstSlicePCs:\n if n not in secondSlicePCs:\n continueIfZero += 1\n break\n #If it passes bass motion and superset test, skip it\n if continueIfZero == 0:\n skippedThings += 1\n continue\n #if the slice is still around, it's \"important\" \n theImportantSlices.append(slicey)\n #Now, from the important ones, find voicing probs\n for slicey in theImportantSlices:\n theKey = slicey['key']\n theTonic = str(theKey).split(' ')[0]\n theKeyPC = pitch.Pitch(theTonic).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in slicey['pcset']]\n #rightChord = chord.Chord(sorted(keyTransPCs))\n slicey_label = (sorted(keyTransPCs),slicey['bassSD'])\n try:\n sliceCount[str(slicey_label)] += 1\n except KeyError:\n sliceCount[str(slicey_label)] = 1\n sliceProbs = getProbsFromFreqs(sliceCount)\n #Now make a list of the really important slices\n theReallyImportantSlices = []\n skipNext = 0\n #OK, now go again, looking for non-superset bass leaps\n for i, slicey in enumerate(theImportantSlices):\n if i == len(theImportantSlices) - 1:\n break\n if skipNext == 1:\n skipNext = 0\n continue\n #First, if there's no bass leap, just go on and add it like a normal slice\n if abs(theImportantSlices[i+1]['bassMIDI'] - theImportantSlices[i]['bassMIDI']) <= bassSize:\n theKeyPC = pitch.Pitch(str(slicey['key']).split(' ')[0]).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in slicey['pcset']]\n theReallyImportantSlices.append((sorted(keyTransPCs),slicey['bassSD']))\n continue\n #Next, only look at cases where |bass motion| > bassSize\n if abs(theImportantSlices[i+1]['bassMIDI'] - theImportantSlices[i]['bassMIDI']) > bassSize:\n combinedSlices = []\n theKeyPC = pitch.Pitch(str(slicey['key']).split(' ')[0]).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in slicey['pcset']]\n for n in keyTransPCs:\n combinedSlices.append(n)\n theKeyPC = pitch.Pitch(str(theImportantSlices[i+1]['key']).split(' ')[0]).pitchClass\n nextkeyTransPCs = [(n - theKeyPC)%12 for n in theImportantSlices[i+1]['pcset']]\n for m in nextkeyTransPCs:\n if m in combinedSlices:\n continue\n combinedSlices.append(m)\n sortedSlice = sorted(combinedSlices)\n #Pick whichever bass is literally lower in pitch, and use its SD for combo\n slicey_bass = slicey['bassMIDI']\n nextslice_bass = theImportantSlices[i+1]['bassMIDI']\n if slicey_bass <= nextslice_bass:\n bassSD = slicey['bassSD']\n if nextslice_bass < slicey_bass:\n bassSD = theImportantSlices[i+1]['bassSD']\n sortedSlice_type = (sortedSlice,bassSD)\n #If the combination never occurs, don't combine and move on\n try:\n testProb = sliceProbs[str(sortedSlice_type)]\n except KeyError:\n theKeyPC = pitch.Pitch(str(slicey['key']).split(' ')[0]).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in slicey['pcset']]\n theReallyImportantSlices.append((sorted(keyTransPCs),slicey['bassSD']))\n continue\n #Deal with singletons, which always have higher p\n #If both are singletons, move on:\n if len(slicey['pcset']) == 1 and len(theImportantSlices[i+1]['pcset']) == 1:\n continue\n #If the first is a singleton and second more probable than comb., move on\n elif len(slicey['pcset']) == 1 and len(theImportantSlices[i+1]['pcset']) > 1:\n if testProb < sliceProbs[str((sorted(nextkeyTransPCs),theImportantSlices[i+1]['bassSD']))]:\n continue\n #If the second is a singleton and first more probable than comb., move on\n elif len(theImportantSlices[i+1]['pcset']) == 1 and len(slicey['pcset']) > 1:\n if testProb < sliceProbs[str((sorted(keyTransPCs),slicey['bassSD']))]:\n continue\n #Otherwise, if p(comb) is less than either by themselves, move on\n elif testProb < sliceProbs[str((sorted(keyTransPCs),slicey['bassSD']))] or testProb < sliceProbs[str((sorted(nextkeyTransPCs),theImportantSlices[i+1]['bassSD']))]:\n continue\n #Once we rule out those cases, we know we want to combine.\n theReallyImportantSlices.append(sortedSlice_type)\n skipNext = 1\n binnedThings += 1\n #Tally up theReallyImportantSlices to get new sliceProbs\n #Now use sliceProbs to check the most common superset for each non-singleton slice\n sliceCount = {}\n for i, slicey in enumerate(theReallyImportantSlices):\n #if i > 10:\n # break\n if slicey == ['start'] or slicey == ['end'] or i == len(theReallyImportantSlices) - 1:\n continue\n if len(slicey[0]) == 1:\n continue\n slicey_prob = sliceProbs[str(slicey)]\n bestSupersetProb = slicey_prob\n bestSuperset = slicey\n #Find superset entries in sliceProbs with higher prob\n for key, probvalue in sliceProbs.iteritems():\n if probvalue < bestSupersetProb:\n continue\n #something funny here... what exactly does iteritems() do?\n keything = key.split('], ')[0]\n keyparts = keything.strip('([')\n if len(keyparts) == 1:\n listofPCs = [int(n) for n in keyparts]\n else:\n pclist = keyparts.split(', ')\n listofPCs = [int(n) for n in pclist]\n continueIfZero = 0\n #even one note wrong means no! For now, allow NEW bass note?\n for n in slicey[0]:\n if n not in listofPCs:\n continueIfZero += 1\n break\n if continueIfZero == 0:\n supersettedThings += 1\n bestSuperset = key\n bestSupersetProb = probvalue\n break\n #MESSED THIS UP\n if bestSuperset != str(slicey):\n #print bestSuperset, slicey\n try:\n catCollapse[str(bestSuperset)][str(slicey)] += 1\n except KeyError:\n try:\n catCollapse[str(bestSuperset)][str(slicey)] = 1\n except KeyError:\n catCollapse[str(bestSuperset)] = {}\n catCollapse[str(bestSuperset)][str(slicey)] = 1\n try:\n sliceCount[str((bestSuperset,bestSupersetProb))] += 1\n except KeyError:\n sliceCount[str((bestSuperset,bestSupersetProb))] = 1\n sorted_slicecount = sorted(sliceCount.iteritems(), key=operator.itemgetter(1), reverse=True)\n #export the probs as a csv file\n csvName = 'pcset superset tallies.csv'\n x = csv.writer(open(csvName, 'wb'))\n for pair in sorted_slicecount:\n x.writerow([pair[0], pair[1]]) \n #print \"supersetted things\",supersettedThings\n #now put the bigramTally in some kind of csv table\n \"\"\"\n cols = set()\n for row in catCollapse:\n for col in catCollapse[row]:\n cols.add(col)\n fieldnames = ['rowlabel'] + list(cols)\n #populate row labels\n for row in catCollapse:\n catCollapse[row]['rowlabel'] = row\n #write the CSV\n file = open('whatsincategories1122.csv', 'wb')\n #write the column headers\n #first, use plain CSV writer to write the field list\n lw = csv.writer(file)\n lw.writerow(fieldnames)\n #now write the body of the table\n #use a different CSV writer object\n dw = csv.DictWriter(file, fieldnames)\n for row in catCollapse:\n dw.writerow(catCollapse[row])\n \"\"\"", "def mix(self, well, volume=\"50:microliter\", speed=\"100:microliter/second\",\n repetitions=10):\n if isinstance(well, Well) or isinstance(well, str):\n well = WellGroup([well])\n for w in well.wells:\n opts = {\n \"well\": w,\n \"volume\": volume,\n \"speed\": speed,\n \"repetitions\": repetitions\n }\n self.pipette([{\"mix\": [opts]}])", "def cut_inefficient(image,box,margin=0,bg=0,dtype=None):\n (r0,c0,r1,c1) = box\n r0 -= margin; c0 -= margin; r1 += margin; c1 += margin\n if dtype is None: dtype = image.dtype\n result = interpolation.shift(image,(-r0,-c0),output=dtype,order=0,cval=bg)\n return result[:(r1-r0),:(c1-c0)]", "def fun_ned_cut(self, reg_x_len, n_size, block_index, n_loop):\n input_indices_ub = self.tik_instance.Tensor(self.dtype_indices, (8,),\n name=\"input_indices_ub\",\n scope=tik.scope_ubuf)\n self.tik_instance.data_move(input_indices_ub[0],\n self.input_indices_gm[0], 0, 1, 1, 0, 0)\n reg_start = self.tik_instance.Scalar(dtype=\"int32\")\n reg_start.set_as(input_indices_ub[0])\n reg_burst = 3200\n if self.dtype_x in (\"float32\", \"int32\"):\n reg_dtype_size = 8\n if reg_x_len % 25600 == 0:\n num_move = reg_x_len // 25600\n data_tail = 25600\n else:\n num_move = (reg_x_len // 25600) + 1\n data_tail = reg_x_len - (reg_x_len // 25600) * 25600\n tail_burst = data_tail // 8\n else:\n reg_dtype_size = 16\n if reg_x_len % 51200 == 0:\n num_move = reg_x_len // 51200\n data_tail = 51200\n else:\n num_move = (reg_x_len // 51200) + 1\n data_tail = reg_x_len - (reg_x_len // 51200) * 51200\n tail_burst = data_tail // 16\n\n data_input_ub = self.tik_instance.Tensor(self.dtype_x,\n (reg_burst, reg_dtype_size),\n name=\"data_input_ub\",\n scope=tik.scope_ubuf)\n\n with self.tik_instance.for_range(0, n_loop) as n_index:\n with self.tik_instance.if_scope(\n block_index * n_size + n_index != reg_start):\n with self.tik_instance.for_range(0, num_move) as move_index:\n with self.tik_instance.if_scope(move_index != num_move - 1):\n self.tik_instance.data_move(\n data_input_ub[0], self.input_x_gm[\n (block_index * n_size + n_index) * reg_x_len +\n move_index * reg_burst * reg_dtype_size], 0, 1,\n reg_burst, 0, 0)\n self.tik_instance.data_move(\n self.output_y_gm[\n (block_index * n_size + n_index) * reg_x_len +\n move_index * reg_burst * reg_dtype_size],\n data_input_ub[0], 0, 1, reg_burst, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n data_input_ub[0], self.input_x_gm[\n (block_index * n_size + n_index) * reg_x_len +\n move_index * reg_burst * reg_dtype_size], 0, 1,\n tail_burst, 0, 0)\n self.tik_instance.data_move(\n self.output_y_gm[\n (block_index * n_size + n_index) * reg_x_len +\n move_index * reg_burst * reg_dtype_size],\n data_input_ub[0], 0, 1, tail_burst, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(0, num_move) as move_index:\n with self.tik_instance.if_scope(move_index != num_move - 1):\n self.tik_instance.data_move(\n data_input_ub[0],\n self.input_v_gm[move_index * reg_burst *\n reg_dtype_size], 0, 1, reg_burst, 0,\n 0)\n self.tik_instance.data_move(\n self.output_y_gm[\n (block_index * n_size + n_index) * reg_x_len +\n move_index * reg_burst * reg_dtype_size],\n data_input_ub[0], 0, 1, reg_burst, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n data_input_ub[0],\n self.input_v_gm[move_index * reg_burst *\n reg_dtype_size], 0, 1, tail_burst,\n 0, 0)\n self.tik_instance.data_move(\n self.output_y_gm[\n (block_index * n_size + n_index) * reg_x_len +\n move_index * reg_burst * reg_dtype_size],\n data_input_ub[0], 0, 1, tail_burst, 0, 0)", "def get_cross_correlation_subpixel_offset(self, satLimit=16e3,\n cutoutSize=21):\n # TODO: Test if rough pixel-level alignment is required\n pass\n\n # # Test if a quick WCS integer pixel alignment is possible.\n # if self.image1.has_wcs and self.image2.has_wcs:\n # # Compute the integer pixel offsets using WCS\n # dx, dy = self.get_wcs_integer_pixel_offset()\n # else:\n # # Compute the integer pixel offsets using cross-correlation\n # dx, dy = self.get_cross_correlation_integer_pixel_offset()\n #\n # # Shift image2 array to approximately match image1\n # shiftedImage2 = self.image2.shift(-dx, -dy)\n\n # Compute a combined image and extract stars from that combined image\n combinedImage = 0.5*(self.image1 + self.image2)\n\n xStars, yStars = combinedImage.get_sources(\n satLimit = satLimit,\n crowdLimit = np.sqrt(2)*cutoutSize,\n edgeLimit = cutoutSize + 1\n )\n\n # Grab the list of star cutouts from image one\n starCutouts1 = self.image1.extract_star_cutouts(xStars, yStars,\n cutoutSize = cutoutSize)\n\n # Grab the list of star cutouts from shifted image two\n starCutouts2 = self.image2.extract_star_cutouts(xStars, yStars,\n cutoutSize = cutoutSize)\n\n # Cull any bad cutouts from the cutout list\n starCutouts1, starCutouts2 = self._parse_star_cutouts(\n starCutouts1,\n starCutouts2\n )\n\n\n # Build the square mosaics of cutouts\n cutoutMosaic1 = self._build_star_cutout_mosaic(starCutouts1)\n cutoutMosaic2 = self._build_star_cutout_mosaic(starCutouts2)\n\n #\n # TODO: remove this code block if possible\n #\n # Construct a NEW ImagePair instance from these two mosaics\n mosaicPair = ImagePairOffsetGetter(\n ReducedScience(cutoutMosaic1),\n ReducedScience(cutoutMosaic2)\n )\n\n # Replace any suprious values with local median values\n array1, array2 = mosaicPair._replace_negatives_and_nans_with_medians()\n\n # Do an array flipped convolution, which is a correlation.\n corrImage = signal.fftconvolve(\n array2,\n array1[::-1, ::-1],\n mode='same'\n )\n\n # Fix any suprious pixel values\n corrImage = ImagePairOffsetGetter._fix_bad_correlation_image_pixels(corrImage)\n\n # Grab the subpixel precision offsets from the cross correlation image\n dx, dy = ImagePairOffsetGetter._extract_subpixel_offset_from_correlation_image(corrImage)\n\n # # Add the integer and subpixel offsets and return them to the user\n # dx += dx1\n # dy += dy1\n\n return dx, dy", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def crop (*args, **kwargs):\n return compute('crop', inputs=list(args), args=kwargs)", "def run_cumulative_pipeline_damage(self):\n\t\t\"\"\" PWP1 = brittle\n\t\t\tPWP2 = ductile \"\"\"\n\n\t\trt = [100, 250, 500, 1000, 2500, 5000, 10000]\n\t\t# rt = [100]\n\n\t\tfor rt_val in rt:\n\t\t\tprint('\\tmc_pipe_dmg: cumulative rt_{}' .format(rt_val))\n\t\t\t# --- reading in damage results from above analysis\n\t\t\teq_damage_results_csv = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'pipe_DS_eq_{}yr_{}.csv' \n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val, retrofit_key))\n\t\t\ttsu_damage_results_csv = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'pipe_DS_tsu_{}yr_{}.csv'\n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val, retrofit_key))\n\t\t\teq_df = pd.read_csv(eq_damage_results_csv)\n\t\t\ttsu_df = pd.read_csv(tsu_damage_results_csv)\n\n\t\t\teq_df.set_index('guid', inplace=True)\n\t\t\ttsu_df.set_index('guid', inplace=True)\n\n\t\t\tcolumn_keys = list(eq_df.columns)\n\n\t\t\tcum_df = np.logical_or(eq_df.values, tsu_df.values).astype(int)\n\t\t\tcum_df = pd.DataFrame(cum_df, index=eq_df.index, columns=column_keys)\n\t\t\t\n\n\t\t\tresult_name = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t 'pipe_DS_cumulative_{}yr_{}.csv' \n\t\t\t\t\t\t\t\t\t\t.format(rt_val, retrofit_key))\n\n\t\t\tcum_df.to_csv(result_name, index=True)", "def compress(self, tensor):", "def continuumsub(self, *args, **kwargs):\n return _image.image_continuumsub(self, *args, **kwargs)", "def cutting(PAN, box_th= None): #---- cut outlier region\r\n if box_th is None: box_th = PAN.box_th; # cutting threshold\r\n src = PAN.dest; \r\n rows= src.shape[0]; cols= src.shape[1]; # get un-cut image size\r\n #--- find top boundary ---\r\n top = 0; flag= 0;\r\n while flag==0:\r\n inactive= np.sum(np.sum(src[top,:,:], axis=1)>0);\r\n if inactive> box_th*cols or top==rows: flag=1;\r\n else: top+=1;\r\n #--- find bottom boundary ---\r\n bottom= rows-1; flag=0;\r\n while flag==0:\r\n inactive= np.sum(np.sum(src[bottom,:,:], axis=1)>0);\r\n if inactive> box_th*cols or bottom==0: flag=1;\r\n else: bottom -=1;\r\n #--- find left boundary ---\r\n rowa= bottom- top;\r\n left = 0; flag=0;\r\n while flag==0:\r\n inactive= np.sum(np.sum(src[top:bottom,left,:], axis=1)>0);\r\n if inactive> box_th*rowa or left==cols: flag=1;\r\n else: left+=1;\r\n #--- find right boundary ---\r\n right= cols-1; flag=0;\r\n while flag==0:\r\n inactive= np.sum(np.sum(src[top:bottom,right,:], axis=1)>0);\r\n if inactive> box_th*rowa or right== 0: flag=1;\r\n else: right -=1;\r\n PAN.result= PAN.dest[top:bottom, left:right, :];", "def cut_384(img):\n if len(img.shape) > 2:\n ret = img[:, 50:434, 60:444]\n else:\n ret = img[50:434, 60:444]\n return ret" ]
[ "0.7247407", "0.6177565", "0.60966194", "0.60773325", "0.6003204", "0.5993062", "0.59929186", "0.59459716", "0.58568937", "0.5730637", "0.5683153", "0.5636909", "0.5531246", "0.550214", "0.54731303", "0.5415715", "0.54061943", "0.5387309", "0.5360789", "0.5356992", "0.53179294", "0.53029317", "0.53008044", "0.530025", "0.52945894", "0.52939576", "0.52755255", "0.52749026", "0.5267408", "0.5232525", "0.52243507", "0.5220865", "0.52003706", "0.517846", "0.5172129", "0.515978", "0.5146179", "0.51365614", "0.51241046", "0.51165587", "0.5116498", "0.50705415", "0.5058788", "0.5058788", "0.5044145", "0.50286293", "0.49904647", "0.4976054", "0.49747738", "0.4958855", "0.4952334", "0.4941462", "0.49395743", "0.49322674", "0.49247396", "0.49198928", "0.4904229", "0.4887961", "0.48815662", "0.4876394", "0.48674554", "0.48640034", "0.48610842", "0.48338133", "0.4833151", "0.48281753", "0.48155573", "0.4810167", "0.4810152", "0.4809361", "0.48088184", "0.48066923", "0.480547", "0.47975683", "0.47852358", "0.47849262", "0.4778457", "0.4778457", "0.47679245", "0.4758619", "0.47568023", "0.47565305", "0.47484106", "0.4747261", "0.4745991", "0.47390082", "0.47283438", "0.47237548", "0.47233048", "0.47167993", "0.4693667", "0.46927643", "0.4691177", "0.4691177", "0.46910983", "0.46859643", "0.4667142", "0.46650937", "0.46646175", "0.46628842" ]
0.51037717
41
Constructor Class for MixCriterion.
def __init__(self, criterion: Callable): self.criterion = criterion
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.condition = None", "def __init__(self, **kwargs: Any):\n self.multiclass_te_co = 3\n self.top_intersections = 5\n self.max_intersection_depth = 3\n self.subsample = 10000\n self.random_state = 42\n self.feats_imp = None\n self.ascending_by_cardinality = False\n\n self.max_bin_count = 10\n self.sparse_ohe = \"auto\"\n\n for k in kwargs:\n self.__dict__[k] = kwargs[k]", "def __init__(self, **kwargs):\n\n super().__init__(**kwargs)\n self.criterion = self._initLoss()", "def __init__(self, **kwargs):\n\n super().__init__(**kwargs)\n self.criterion = self._initLoss()", "def __init__(self, **kwargs):\n self._params = dict(\n score_func=ParameterDefinition([chi2, f_classif, mutual_info_classif]),\n k=None,\n )\n self.__k = None\n self.__select_k_best = SelectKB()", "def __init__(self, exclusion):\n self.exclusion = exclusion", "def __init__(self, type: int, filter: int):\n ...", "def __init__(self, count):\n assert count >= 0\n self.is_proportion = count < 1.0\n self.cutoff = count", "def __init__(self, *args):\n _snap.TPredicate_swiginit(self, _snap.new_TPredicate(*args))", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'tolerance'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def __init__(self, *args, **kwargs):\n self.whitelist = set(kwargs.pop('whitelist', []))\n self.blacklist = set(kwargs.pop('blacklist', []))\n\n super(MyTestRunner, self).__init__(*args, **kwargs)", "def __init__(self, factor=None, op=None, factors=None):\n if factors is not None:\n self.factors = factors\n else:\n self.factors = factor\n self.op = op", "def __init__(self, \n cutoff_frequency, \n order, \n filter_type=\"maximally_flat\"):\n self.cutoff_freq = cutoff_frequency\n self.order = order\n self.filter_type = filter_type\n \n #TODO: Initialise filter based on maximally flat prototype", "def __init__(self, required={}, excluded={}, exact_match=True, DEBUG=False):\n self.logger = get_logger(name=\"Filter\", DEBUG=DEBUG)\n self.required = None\n self.excluded = None\n if isinstance(required, dict):\n self.required = required\n else:\n self.required = {}\n self.logger.error(msg=\"Required is not a dictionary!\")\n if isinstance(excluded, dict):\n self.excluded = excluded\n else:\n self.excluded = {}\n self.logger.error(msg=\"Excluded is not a dictionary!\")\n self.exact_match = exact_match", "def __init__(\n\t\tself,\n\t\tthreshold_input: Optional[float] = 0.5,\n\t\tthreshold_target: Optional[float] = 0.5,\n\t\tbeta: float = 1.0,\n\t\tdim: Optional[int] = -1,\n\t\treduce_fn: Optional[Callable] = torch.mean,\n\t):\n\t\tsuper().__init__()\n\t\tself.beta = beta\n\t\tself.threshold_input = threshold_input\n\t\tself.threshold_target = threshold_target\n\t\tself.reduce_fn = reduce_fn\n\n\t\tself.recall = Recall(\n\t\t\tthreshold_input=None,\n\t\t\tthreshold_target=None,\n\t\t\tdim=dim,\n\t\t\treduce_fn=None,\n\t\t)\n\t\tself.precision = Precision(\n\t\t\tthreshold_input=None,\n\t\t\tthreshold_target=None,\n\t\t\tdim=dim,\n\t\t\treduce_fn=None,\n\t\t)", "def __init__(self, min_cut=0.1, max_cut=0.9):\n self._min_cut = min_cut\n self._max_cut = max_cut\n self._stopwords = set(stopwords.words('english') + list(punctuation))", "def __init__(self, examples, filter_pred=None, **kwargs):\n if filter_pred is not None:\n make_list = isinstance(examples, list)\n examples = filter(filter_pred, examples)\n if make_list:\n examples = list(examples)\n self.examples = examples", "def __init__(self, *args, **kwargs):\n # skip\n self.skip_whitelist = set(kwargs.pop('skip_whitelist', []))\n self.skip_blacklist = set(kwargs.pop('skip_blacklist', []))\n # ignore\n self.ignore_whitelist = set(kwargs.pop('ignore_whitelist', []))\n self.ignore_blacklist = set(kwargs.pop('ignore_blacklist', []))\n\n\n super(MyTestRunner, self).__init__(*args, **kwargs)", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, use_masking: bool=True,\n use_weighted_masking: bool=False):\n assert check_argument_types()\n super().__init__()\n\n assert (use_masking != use_weighted_masking) or not use_masking\n self.use_masking = use_masking\n self.use_weighted_masking = use_weighted_masking\n\n # define criterions\n reduction = \"none\" if self.use_weighted_masking else \"mean\"\n self.mse_criterion = nn.MSELoss(reduction=reduction)\n self.duration_criterion = DurationPredictorLoss(reduction=reduction)", "def __init__(self, *, required=False, AND=_Null, OR=_Null, XOR=_Null):\n self._required = required\n self._and = AND\n self._or = OR\n self._xor = XOR", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterStealLock, self).__init__(*args, **kwargs)\n\n # Get the comparison sense flag.\n self.sense = self.get_boolean('sense', default=True)\n logger.debug('sense = {0}'.format(self.sense))\n\n # Get the filter parameters.\n self.steallock = (\n self.context.tokens['StealLock'] == '1')\n logger.debug('steallock = {0}'.format(self.steallock))", "def __init__(self, **kwargs):\n\n super().__init__(compute_hjorth_mobility, **kwargs)", "def __init__(self, qubit: int):\n super().__init__([qubit])", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterBreakUnlock, self).__init__(*args, **kwargs)\n\n # Get the comparison sense flag.\n self.sense = self.get_boolean('sense', default=True)\n logger.debug('sense = {0}'.format(self.sense))\n\n # Get the filter parameters.\n self.breakunlock = (\n self.context.tokens['BreakUnlock'] == '1')\n logger.debug('breakunlock = {0}'.format(self.breakunlock))", "def __init__(self, *args):\n _snap.TPredicateNode_swiginit(self, _snap.new_TPredicateNode(*args))", "def __init__(self, **kwargs):\n raise NotImplementedError", "def __init__(self, **kwds):\n raise NotImplementedError", "def __init__(self, **kwargs):\n\n args = {\n 'nobs': None, # Number of observations\n 'npred': None, # Number of predictors\n 'nrelpred': None, # Number of relevant predictors\n 'relpos': None, # Position of relevant predictor components\n 'gamma': None, # Decay factor of eigenvalue of predictor\n 'rsq': None, # Coefficient of determination\n 'sim_type': None, # Type of simulation: univariate, bivariate, multivariate\n }\n for key, value in args.items():\n setattr(self, key, value)\n\n for key, value in kwargs.items():\n setattr(self, key, value)", "def __init__(self, *args, **kwargs):\n cls = TestUninformedWordGameSearch\n super(cls, self).__init__(*args, **kwargs) # pylint:disable=W0142\n self.search_function = search_function", "def __init__(self):\n self.label = \"Categorical Membership\"\n self.description = \"Create fuzzy memberships for categorical data by first reclassification to integers and then division by an appropriate value\"\n self.canRunInBackground = False\n self.category = \"Fuzzy Logic\\\\Fuzzy Membership\"", "def __init__(self, min_cut=0.1, max_cut=0.9):\n self._min_cut = min_cut\n self._max_cut = max_cut\n self._stopwords = set(stopwords.words('english') + list(punctuation))", "def __init__(self, matcher, minimum=1):\n self.matcher = matcher\n assert minimum >= 1, 'Minimum must be greater than or equal to 1'\n self.minimum = minimum", "def __init__(self, *args, **kwargs):\n # Initializing the test & training set\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def __init__(self, a, b, c):\r\n self.a = a\r\n self.b = b\r\n self.c = c", "def __init__(self):\n self.label = \"Categorical & Reclass\"\n self.description = \"Create fuzzy memberships for categorical data by first reclassification to integers and then division by an appropriate value.\"\n self.canRunInBackground = False\n self.category = \"Fuzzy Logic\\\\Fuzzy Membership\"", "def __init__(self, *args, **kwargs):\n\n kwargs.setdefault('unique', True)\n\n super().__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n\n kwargs.setdefault('unique', True)\n\n super().__init__(*args, **kwargs)", "def __init__(self, input_length, output_length, output, relative_to=None,\n\t\tvariant=None, **kwargs):\n\t\tsuper().__init__(**kwargs)\n\n\t\tif variant is None:\n\t\t\tvariant = set()\n\t\telif isinstance(variant, str):\n\t\t\tvariant = {variant}\n\t\telif isinstance(variant, (list, tuple, set)):\n\t\t\tvariant = set(variant)\n\t\telse:\n\t\t\traise ValueError('Unexpected or unsupport CTC variant type: {}'\n\t\t\t\t.format(variant))\n\n\t\tvariant.discard(None)\n\t\tfor x in variant:\n\t\t\tif x not in Ctc.KNOWN_VARIANTS:\n\t\t\t\tlogger.warning('Ignoring an unknown variant to the CTC loss '\n\t\t\t\t\t'function: %s', x)\n\n\t\tself.variant = variant\n\n\t\tself.input_length = input_length\n\t\tself.output_length = output_length\n\t\tself.output = output\n\t\tself.relative_to = relative_to", "def __init__(self, jobject=None, options=None):\n if jobject is None:\n classname = \"weka.filters.MultiFilter\"\n jobject = MultiFilter.new_instance(classname)\n self.enforce_type(jobject, \"weka.filters.MultiFilter\")\n super(MultiFilter, self).__init__(jobject=jobject, options=options)", "def _make_subset(cls, name, data, **kwargs):\r\n return cls(name, data, **kwargs)", "def __init__(self, quantifier: Quantifier, count: typing.Optional[int] = None):\n self.quantifier = quantifier\n\n if self.quantifier == Quantifier.N:\n if count is None:\n raise ValueError(\"'count' must not be None when quantifier is N\")\n\n if count < 0:\n raise ValueError(\"'count' must be >= 0 but was '{count}'\")\n self.count = count\n else:\n self.count = None", "def __init__(self, *generators, **kwargs):\n\n accepted_keywords = ['return_class_label', 'shuffle']\n for kw in kwargs:\n if kw not in accepted_keywords:\n log.warning(\"MixGenerator: Keyword {} not accepted.\".format(kw))\n kwargs.pop(kw)\n pass\n pass\n self.generators = generators\n\n # Keyword arguments\n self.return_class_label = kwargs.get('return_class_label', True)\n self.shuffle = kwargs.get('shuffle', True)\n return", "def __init__(self, label, expression):\n super(AggregateBooleanCount, self).__init__(label)\n self.expression = expression", "def __init__(self, given_data_augmentation, ntrials=4):\n super().__init__()\n self.augment = given_data_augmentation\n self.ntrials = ntrials\n self.max_criterion = torch.nn.CrossEntropyLoss(reduction='none')", "def __init__(self, condition: typing.Callable[..., bool]):\n super().__init__()\n self.condition = condition", "def __init__(\r\n self, use_masking=True, use_weighted_masking=False):\r\n super().__init__()\r\n assert (use_masking != use_weighted_masking) or not use_masking\r\n self.use_masking = use_masking\r\n self.use_weighted_masking = use_weighted_masking\r\n\r\n # define criterions\r\n reduction = \"none\" if self.use_weighted_masking else \"mean\"\r\n self.mse_criterion = torch.nn.MSELoss(reduction=reduction)", "def __init__(self, quantity=identity, cut=Count()):\n if not isinstance(cut, Container):\n raise TypeError(\"cut ({0}) must be a Container\".format(cut))\n self.entries = 0.0\n self.quantity = serializable(identity(quantity) if isinstance(quantity, str) else quantity)\n self.cut = cut\n super(Select, self).__init__()\n self.specialize()", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._query_type_ = 'campaign'\n \n self._irl_artifacts_ = IntervalReportingLoader(query_type='campaign')\n self._irl_totals_ = IntervalReportingLoader(query_type='campaign_total')", "def __init__(self, **kwargs):\n super(LogisticRegression, self).__init__()\n self.C = kwargs.pop(\"C\", 100)\n self.clf = _LogisticRegression(C=self.C, **kwargs)", "def __init__(self, ignore_measures: bool = False):\n super().__init__()\n self._ignore_measures = ignore_measures", "def __init__(self, source, attributes=ALLOWED_ATTRIBUTES,\n strip_disallowed_elements=False, strip_html_comments=True,\n **kwargs):\n self.attr_filter = attribute_filter_factory(attributes)\n\n self.strip_disallowed_elements = strip_disallowed_elements\n self.strip_html_comments = strip_html_comments\n\n return super(BleachSanitizerFilter, self).__init__(source, **kwargs)", "def __init__(self, **kwargs):\n super(Gather, self).__init__(**kwargs)", "def __init__(self, rules):\n self.rules = set(rules)\n self.products = []", "def __init__(self, **kwargs):\n self._kw = kwargs.pop('kw', None)\n super(Mocker, self).__init__(**kwargs)", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n \"\"\" Use _query_names_ to store a single query name \"\"\"\n self._query_names_ = 'report_donor_dollar_breakdown' # embed the query name in the class itself\n self._query_type_ = kwargs['query_type']", "def __init__(self, *args):\n self.args = args\n self.matchers = []\n for a in args:\n if a is _:\n a = lambda k: True\n elif isinstance(a, basestring):\n a = a.__eq__\n elif isinstance(a, (list, tuple, set)):\n a = (lambda ary: (lambda k: k in ary))(a)\n elif hasattr(a, 'search'):\n a = a.search\n else:\n a = str(a).__eq__\n self.matchers.append(a)", "def __init__(self, *args):\n _snap.TAtomicPredicate_swiginit(self, _snap.new_TAtomicPredicate(*args))", "def __init__(self,\n var: str,\n latitude: float,\n longitude: float,\n miles: Union[int, float]):\n assert isinstance(var, str)\n assert isinstance(latitude, float)\n assert isinstance(longitude, float)\n assert miles is None or isinstance(miles, (int, float))\n\n super().__init__(\n filter_type='overlaps_mile_radius',\n filter_variable=var,\n filter_value=dict(\n latitude=latitude,\n longitude=longitude,\n miles=miles))", "def __init__(self, *args, **kwargs):\r\n super(UniqueCourseTest, self).__init__(*args, **kwargs)", "def __init__(self,params:configargparse.Namespace):\n super(StatsCalculator,self).__init__()\n self.ignore_label = params.text_pad\n self.char_list = params.char_list \n self.criterion = CrossEntropyLoss(ignore_index=self.ignore_label,reduction=\"mean\")\n self.ctc = CTCLoss(zero_infinity=True)", "def __init__(self, *, client=None, verbose=False, **kwargs):\n super().__init__(client=client, verbose=verbose, **kwargs)\n\n self.datatype = \"cupy\"\n\n # Make any potential model args available and catch any potential\n # ValueErrors before distributed training begins.\n self._set_internal_model(MNB(**kwargs))", "def __init__(self, subset):\n if subset not in ('background', 'evaluation'):\n raise(ValueError, 'subset must be one of (background, evaluation)')\n self.subset = subset\n\n self.df = pd.DataFrame(self.index_subset(self.subset))\n\n # Index of dataframe has direct correspondence to item in dataset\n self.df = self.df.assign(id=self.df.index.values)\n\n # Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers\n self.unique_characters = sorted(self.df['class_name'].unique())\n self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())}\n self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))\n\n # Create dicts\n self.datasetid_to_filepath = self.df.to_dict()['filepath']\n self.datasetid_to_class_id = self.df.to_dict()['class_id']\n\n # Setup transforms\n self.transform = transforms.Compose([\n transforms.CenterCrop(224),\n transforms.Resize(84),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])", "def __init__(self, **kwargs):\n\n self.tests = OrderedDict()\n\n if 'passed' in kwargs:\n self.mark_all_passed(kwargs['passed'])\n if 'failed' in kwargs:\n self.mark_all_failed(kwargs['failed'])\n if 'flaked' in kwargs:\n self.mark_all_passed(kwargs['flaked'], flaky=True)", "def __init__(self):\n super().__init__()\n self.mapping = {}\n self.values = set()\n self.type = 'Categorical'\n self.dimensionality = 1\n self.distType = 'Discrete'\n self.isFloat = False", "def __init__(self, **kwargs):\n pass", "def __init__(self, brand: str, number_of_items: int, condition_name: str):\n self.__brand = brand\n self.__number_of_items = number_of_items\n self.__condition_name = condition_name", "def __init__(self,\n threshold=-15,\n ratio=8.0,\n postgain=-9.0,\n smooth=0.5,\n limit=-30):\n self.threshold = threshold\n self.ratio = ratio\n self.smooth = smooth\n self.power = 0\n cgain = self.cf(0)\n self.postgain = postgain - cgain\n self.limit = limit", "def __init__(self, *args, **kwargs):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n self.classes = [0,1] # (default to 0/1; replace during training)\n self.theta = np.array([]) # placeholder value before training\n\n if len(args) or len(kwargs): # if we were given optional arguments,\n self.train(*args,**kwargs) # just pass them through to \"train\"", "def __init__(self, opts, httpConfig):\n\n Query.__init__(self, opts, httpConfig)\n parentId, self.opts = self.parseOptsList(opts, [\"grade_level\", \"framework\"])\n\n # Math\n web.debug(parentId)\n if parentId:\n if parentId.find(\"ordering\") != -1:\n self.query = '{\"urn:lri:property_type:id\":%s,\"shape\":{\"urn:lri:property_type:path_step\":{\"urn:lri:property_type:competency_in_path\":{}}}}' % parentId\n else:\n self.query = '{\"urn:lri:property_type:contained_by\":%s}' % parentId\n else:\n self.query = '{\"urn:lri:property_type:types\":\"urn:ccss:entity_type:domain\"}'", "def __init__(self, docs, **kargs): # [todo]\n self.cohort = kargs.get('cohort', None)\n self.nDoc = len(docs)\n\n return", "def __init__(self, **kwargs):\n base.Layer.__init__(self, **kwargs)\n self._group = self.spec['group']\n self._conv_args = dict(self.spec)\n self._conv_args['name'] = self.spec['name'] + '_sub'\n del self._conv_args['group']\n self._bottom_sub = [base.Blob() for _ in range(self._group)]\n self._top_sub = [base.Blob() for _ in range(self._group)]\n self._conv_layers = None\n self._blocksize = 0\n self._num_kernels = self.spec['num_kernels']\n # create the convolution layers\n self._conv_layers = [\n convolution.ConvolutionLayer(**self._conv_args)\n for i in range(self._group)]\n self._param = sum((layer.param() for layer in self._conv_layers), [])\n return", "def __init__(self, **kwargs):\n self.__kwargs = kwargs", "def __init__(self, reduce=True, one_side=False):\n self.reduce = reduce\n self.one_side = one_side", "def __init__(self, quantiles, mask=None):\n self.quantiles = torch.tensor(quantiles).float()\n self.n_quantiles = len(quantiles)\n self.mask = mask\n if self.mask:\n self.mask = np.float32(mask)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.conditions = all_conditions(self.source)\n if self.log:\n print(\"Found conditions\",\n [astor.to_source(cond).strip() \n for cond in self.conditions])", "def __init__(self):\r\n self.filter_p_number = 3 # First one with enough data for statistics\r\n self.prfs_d = extract_settings_elvis()\r\n\r\n ccds = True\r\n filtered = False\r\n scamp = False\r\n\r\n input_df = read_csv('cats/cat_clean_ssos.csv', index_col=0)\r\n filt_cat = self.gets_filtered_catalog() # Gets data from filtered\r\n\r\n if ccds:\r\n cats_d = self.extract_cats()\r\n self.extract_stats_ccds(cats_d, input_df, filt_cat)\r\n elif filtered:\r\n self.extract_stats_filt(filt_cat, input_df)\r\n elif scamp:\r\n pass\r\n # self.extract_stats_scamp(input_df)\r\n else:\r\n pass", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterPropList, self).__init__(*args, **kwargs)\n\n # Construct the regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Get the \"look for the first match\" flag.\n self.matchfirst = self.get_boolean('matchFirst')\n logger.debug('matchfirst = {0}'.format(self.matchfirst))\n\n # Get the path name.\n self.path = self.context.tokens['Path']\n logger.debug('path = {0}'.format(self.path))", "def __init__(self, image, filter_name, cutoff, order = 0):\r\n self.image = image\r\n if filter_name == 'ideal_l':\r\n self.filter = self.get_ideal_low_pass_filter\r\n elif filter_name == 'ideal_h':\r\n self.filter = self.get_ideal_high_pass_filter\r\n elif filter_name == 'butterworth_l':\r\n self.filter = self.get_butterworth_low_pass_filter\r\n elif filter_name == 'butterworth_h':\r\n self.filter = self.get_butterworth_high_pass_filter\r\n elif filter_name == 'gaussian_l':\r\n self.filter = self.get_gaussian_low_pass_filter\r\n elif filter_name == 'gaussian_h':\r\n self.filter = self.get_gaussian_high_pass_filter\r\n\r\n self.cutoff = cutoff\r\n self.order = order\r\n self.filter_name = filter_name", "def __init__(self, *args):\n super().__init__(*args)\n\n self.output_dir = os.path.join(self.config.results_dir, \"cowinner\")\n self.merged_dir = os.path.join(self.output_dir, \"merged\")", "def __init__(self, predicate, behaviour, alternative_behaviour=None):\n self._predicate = predicate\n self._behaviour = behaviour\n self._alternative_behaviour = alternative_behaviour or _default # makes testing easier", "def __init__(\n self, threshold: Optional[float] = None,\n normalizer: Optional[Callable] = None,\n raise_error: Optional[bool] = False\n ):\n super(MajorityVote, self).__init__(raise_error=raise_error)\n self.threshold = threshold\n self.normalizer = normalizer", "def __init__(__self__, *,\n criterion_type: str,\n metric_name: str,\n name: str,\n operator: str,\n threshold: float,\n time_aggregation: str,\n dimensions: Optional[Sequence['outputs.MetricDimensionResponse']] = None,\n metric_namespace: Optional[str] = None,\n skip_metric_validation: Optional[bool] = None):\n pulumi.set(__self__, \"criterion_type\", 'StaticThresholdCriterion')\n pulumi.set(__self__, \"metric_name\", metric_name)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"operator\", operator)\n pulumi.set(__self__, \"threshold\", threshold)\n pulumi.set(__self__, \"time_aggregation\", time_aggregation)\n if dimensions is not None:\n pulumi.set(__self__, \"dimensions\", dimensions)\n if metric_namespace is not None:\n pulumi.set(__self__, \"metric_namespace\", metric_namespace)\n if skip_metric_validation is not None:\n pulumi.set(__self__, \"skip_metric_validation\", skip_metric_validation)", "def __init__(self, n=3, confidence_score=0.7, language=\"en\", **kwargs):\n super().__init__(**kwargs)\n self.n = n\n self.confidence_score = confidence_score\n self.language = language", "def __init__(self, *args, **kwargs):\n super(TurntableCrawler, self).__init__(*args, **kwargs)\n\n parts = self.var(\"name\").split(\"_\")\n\n # Add the job var once job names on disk match job code names in shotgun\n self.setVar('assetName', parts[1], True)\n self.setVar('step', parts[2], True)\n self.setVar('variant', parts[3], True)\n self.setVar('pass', parts[4], True)\n self.setVar('renderName', '{}-{}-{}'.format(\n self.var('assetName'),\n self.var('variant'),\n self.var('pass')\n ),\n True\n )", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def __init__(self):\n self.name = \"Schaffer\"\n objectives = [o_sh_1, o_sh_2]\n decisions = [Decision(-10 ** 5, 10 ** 5)]\n Model.__init__(self, objectives, None, decisions)", "def __init__(self, ratio=0.3, p=1.7, reduce=True):\n assert ratio > 0 and ratio <= 1, \"ratio should be in range [0, 1]\"\n assert p > 1, \"p should be >1\"\n self.ratio = ratio\n self.p = p\n self.reduce = reduce", "def __init__(self, batch_size_per_image: int, positive_fraction: float,\n min_neg: int = 0, pool_size: float = 10):\n super().__init__(pool_size=pool_size)\n self.min_neg = min_neg\n self.batch_size_per_image = batch_size_per_image\n self.positive_fraction = positive_fraction", "def __init__(self,\r\n broadcast_threshold=None,\r\n multicast_threshold=None,\r\n unknown_unicast_threshold=None):\r\n\r\n # Initialize members of the class\r\n self.broadcast_threshold = broadcast_threshold\r\n self.multicast_threshold = multicast_threshold\r\n self.unknown_unicast_threshold = unknown_unicast_threshold", "def __init__(self, **kwargs: Any):", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Bernoulli'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, image, filter_name, cutoff, order = 0):\n self.filter_name = filter_name\n self.image = image\n if filter_name == 'ideal_l':\n self.filter = self.get_ideal_low_pass_filter\n elif filter_name == 'ideal_h':\n self.filter = self.get_ideal_high_pass_filter\n elif filter_name == 'butterworth_l':\n self.filter = self.get_butterworth_low_pass_filter\n elif filter_name == 'butterworth_h':\n self.filter = self.get_butterworth_high_pass_filter\n elif filter_name == 'gaussian_l':\n self.filter = self.get_gaussian_low_pass_filter\n elif filter_name == 'gaussian_h':\n self.filter = self.get_gaussian_high_pass_filter\n\n self.cutoff = cutoff\n self.order = order", "def __init__(self):\n super().__init__()\n self.mu = 0.0\n self.type = 'Poisson'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self):\n\n #call super class's __init__ method\n super(TRiseSampler, self).__init__(name=\"trise\", observed=False)", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set" ]
[ "0.6095481", "0.60818535", "0.60206497", "0.60206497", "0.5973565", "0.5949284", "0.5945663", "0.57841676", "0.5744737", "0.5706614", "0.56997925", "0.5673495", "0.5635301", "0.5579832", "0.5573683", "0.55335826", "0.55186737", "0.5517108", "0.5516155", "0.5516155", "0.5516155", "0.55140173", "0.55136585", "0.55045277", "0.54949796", "0.5488207", "0.5487145", "0.5485577", "0.5484564", "0.5482968", "0.5479545", "0.5475137", "0.547416", "0.546987", "0.5467954", "0.54630446", "0.54593974", "0.54511124", "0.5446092", "0.5443251", "0.5443251", "0.54417986", "0.54384106", "0.54372823", "0.54354954", "0.54353994", "0.54174775", "0.54149604", "0.54142123", "0.54049677", "0.54031515", "0.54007167", "0.53938407", "0.53850436", "0.538036", "0.5379419", "0.537869", "0.53731155", "0.5371435", "0.53674066", "0.535866", "0.535598", "0.5355542", "0.5354874", "0.53540975", "0.5352482", "0.5351696", "0.5341347", "0.53363466", "0.53356504", "0.5327475", "0.5326204", "0.5317738", "0.5317128", "0.5314276", "0.5311753", "0.5305899", "0.5297301", "0.52960783", "0.5295791", "0.529568", "0.5286086", "0.5286039", "0.52851915", "0.52821493", "0.5280045", "0.5276388", "0.5274827", "0.52747905", "0.5273351", "0.5270746", "0.52695566", "0.5264042", "0.5263904", "0.52617115", "0.52612185", "0.5260486", "0.52547103", "0.52506125", "0.5249856" ]
0.6339344
0
Method to calculate loss.
def __call__(self, preds: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: if isinstance(targets, (list, tuple)): target_a, target_b, lam = targets loss = lam * self.criterion(preds, target_a) + (1 - lam) * self.criterion(preds, target_b) else: loss = self.criterion(preds, targets) return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_loss(self):", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def calculate_loss(self, X, y):\n probs = self.predict(X)\n\n num_examples = X.shape[0]\n\n sub = np.subtract(probs, y)\n abs_sum = np.abs(sub)\n sm = np.sum(abs_sum)\n loss = 1 - sm / num_examples\n print(\"Current loss: [ \" + str(\"{:6.5f}\").format(loss) + \" ]\")\n return loss", "def calc_loss(self, x: np.ndarray, y: np.ndarray) -> float:\n return self.descent.calc_loss(x, y)", "def compute_loss(self, *args, **kwargs):\n raise NotImplementedError", "def loss(self):\n return self._loss", "def compute_loss(self, obs, returns):", "def loss_op(self):\n return self.loss", "def get_loss(self):\n return self.loss / self.cnt", "def loss_(self, batch):\n raise NotImplementedError", "def loss(self):\n if not self.run:\n self._run()\n return self.model_loss", "def get_loss(self):\n raise NotImplementedError", "def _get_loss(self):\n raise NotImplementedError", "def loss(self) -> KernelLoss:\n return self._loss", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def loss(self, **kwargs):\n pass", "def compute_loss(self, x, gt):\n loss = sum([torch.mean((out - gt)**2) for out in self.forward(x)])\n return loss", "def loss(self):\n return la.norm(self.resids) / self.normX", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)", "def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def loss(self):\n return self._get(\"loss\")", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def calculate_loss(self, a, label):\n if self.loss == 'mse':\n diff = a - label\n err = np.square(diff).mean(axis=0).mean()\n elif self.loss == 'ce':\n return sum(-np.log2(a[label > 0]))\n else:\n raise ValueError('loss function not implemented')\n return err", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss_function(self, train_head, train_tail, train_relation, train_head_corrupted, train_tail_corrupted):\n\n # train_head = tf.nn.l2_normalize(train_head, 1)\n # train_tail = tf.nn.l2_normalize(train_tail, 1)\n # train_head_corrupted = tf.nn.l2_normalize(train_head_corrupted, 1)\n # train_tail_corrupted = tf.nn.l2_normalize(train_tail_corrupted, 1)\n\n # loss = tf.reduce_mean(\n # tf.maximum(self.dict_paras['margin']\n # + self.distance(tf.add(train_head, train_relation), train_tail)\n # - self.distance(tf.add(train_head_corrupted, train_relation), train_tail_corrupted), 0.))\n\n loss = tf.reduce_mean(self.distance(tf.add(train_head, train_relation), train_tail))\n\n return loss", "def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss", "def calc_loss(self, guess: List[float], answer: List[float]) -> float:\n #print(\"Guess: %s Answer: %s\" % (guess, answer))\n return self.tested_network.loss_function.func(guess, answer)", "def loss(self, y: torch.Tensor, state: AlgorithmState) -> torch.Tensor:\n\n raise NotImplementedError()", "def _compute_loss(self, predictions, targets, **params):\n pass", "def calculate_validation_loss(self):\n self.network.train()\n self.validation_average_loss = self.calculate_average_loss(self.validation_dataloader)", "def loss(self, X, y):\n pass", "def get_loss(self, Loss, results, inputs, device):\n return", "def calculate_loss(self, pred, gold, smoothing=False):\n gold = gold.contiguous().view(-1)\n if smoothing:\n epsilon = 0.1\n n_class = pred.size(1)\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - epsilon) + \\\n (1 - one_hot) * epsilon / (n_class - 1)\n\n log_prb = F.log_softmax(pred, dim=1)\n # create non-padding mask with torch.ne()\n non_pad_mask = gold.ne(self.constants.PAD)\n loss = -(one_hot * log_prb).sum(dim=1)\n # losses are averaged later\n loss = loss.masked_select(non_pad_mask).sum()\n else:\n loss = F.cross_entropy(\n pred, gold, ignore_index=self.constants.PAD, reduction='sum')\n return loss", "def calc_loss(self, codes, encodings):\n return tf.reduce_mean((tf.stop_gradient(encodings) - codes) ** 2)", "def loss(self, x, y):\n\n return self.loss_fn(x, y)", "def calc_loss(self, codes, encodings):\n return tf.reduce_mean((encodings - tf.stop_gradient(codes)) ** 2)", "def loss(self, forward, rating):\n return self.loss_fn(forward, rating.float().view(-1))", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def compute_loss(self, x, label):\n # Forward propagation\n y_hat = self.forward_pass(x)\n return -np.log(y_hat[label])", "def eval_loss(self, input_dataset, target_dataset):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tprediction = self.network.forward(input_dataset)\n\t\tloss = self._loss_layer.forward(prediction, target_dataset)\n\t\t\n\t\treturn loss\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def loss(self, x, y):\n return x", "def calculate_total_loss(self, train_x, train_y):\n return np.sum([self.calculate_error(x, y)\n for x, y in zip(train_x, train_y)])", "def loss(self, x, y):\n raise NotImplementedError", "def calc_loss(predictions, labels):\n return np.mean(np.square(predictions - labels))", "def _loss(self):\n\n cross_entropy = tf.reduce_mean(-tf.log(self.probability + epsilon) * self.y)\n self.loss = cross_entropy\n\n self.accuracy = tf.reduce_mean(\n tf.cast(tf.equal(tf.argmax(self.y, 1), self.prediction), tf.float32))", "def calculate_loss(self, interaction):\n\n if self.restore_user_e is not None or self.restore_entity_e is not None:\n self.restore_user_e, self.restore_entity_e = None, None\n\n user = interaction[self.USER_ID]\n pos_item = interaction[self.ITEM_ID]\n neg_item = interaction[self.NEG_ITEM_ID]\n\n user_all_embeddings, entity_all_embeddings, cor_loss = self.forward()\n u_embeddings = user_all_embeddings[user]\n pos_embeddings = entity_all_embeddings[pos_item]\n neg_embeddings = entity_all_embeddings[neg_item]\n\n pos_scores = torch.mul(u_embeddings, pos_embeddings).sum(dim=1)\n neg_scores = torch.mul(u_embeddings, neg_embeddings).sum(dim=1)\n mf_loss = self.mf_loss(pos_scores, neg_scores)\n reg_loss = self.reg_loss(u_embeddings, pos_embeddings, neg_embeddings)\n cor_loss = self.sim_decay * cor_loss\n loss = mf_loss + self.reg_weight * reg_loss + cor_loss\n return loss", "def _calc_loss(self, p_act_output:torch.Tensor, p_pred_output:torch.Tensor) -> float:\r\n\r\n return self._loss_fct(p_act_output, p_pred_output)", "def profit_loss(self) -> float:\n return self.net_worth / self.initial_net_worth", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def _calc_loss(self, fvs, labels, w, b):\n\n loss = 0.5 * self.lda * (np.linalg.norm(w) ** 2)\n tmp = sum(map(lambda x, y: (x - y) ** 2, fvs.dot(w) + b, labels))\n loss += tmp / fvs.shape[0]\n\n return loss", "def loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:\n raise NotImplementedError", "def loss(self):\n return 'mse'", "def getLoss(self, x_test, t_test):\n x_t = Variable(x_test, requires_grad=False)\n #Feed inputes into neural network\n t_pred = self.model(x_t)\n #Now lets compute out loss\n loss = self.loss_fn(t_pred, t_test)\n return loss", "def calc_loss(self, outputs, labels):\n information_loss = self.bottleneck.buffer_capacity.mean() # Taking the mean is equivalent of scaling with 1/K\n cross_entropy = F.cross_entropy(outputs, target=labels)\n total = cross_entropy + self.beta * information_loss\n self.ce_loss.append(cross_entropy.cpu().detach().numpy())\n self.info_loss.append(information_loss.cpu().detach().numpy())\n self.total_loss.append(total.cpu().detach().numpy())\n return total", "def loss_weights(self):\n return None", "def calculate_loss(self, train_x, train_y):\n self.log.info(\"Calculating average categorical crossentropy loss...\")\n\n num_words = np.sum([len(y) for y in train_y])\n return self.calculate_total_loss(train_x, train_y)/float(num_words)", "def loss(self, x):\n return self._svi.evaluate_loss(*x)", "def computeLoss(self):\n return sum(np.arccosh(-minkowskiArrayDot(self.examples, self.centroid)) ** 2)[0] / np.shape(self.examples)[0]", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def calculate_loss(self, output, batch):\n\n detailed_loss = {}\n for loss_func_key, this_loss_func, weight in self.loss_funcs:\n this_loss = this_loss_func(output, batch) * weight\n detailed_loss[loss_func_key] = this_loss\n loss = sum(detailed_loss.values())\n return loss, detailed_loss", "def loss(A, Y):\n return A - Y", "def loss_function(self, x, fwd_rtn):\n px_zs = fwd_rtn[\"px_zs\"]\n qz_x = fwd_rtn[\"qz_x\"]\n px_zss = fwd_rtn[\"px_zss\"]\n qz_xs = fwd_rtn[\"qz_xs\"]\n\n kl = self.calc_kl(qz_x)\n kl_separate = self.calc_kl(qz_xs)\n ll = self.calc_ll(x, px_zs)\n ll_separate = self.calc_ll(x, px_zss)\n\n total = kl + kl_separate - ll - ll_separate\n losses = {\"loss\": total, \"kl\": kl, \"ll\": ll, \"ll_separate\": ll_separate, \"kl_separate\": kl_separate}\n\n return losses", "def loss(self, X, labels):\n features = self.get_conv_features(X)\n loss = blah\n return loss", "def compute_loss(self, x, y):\n\n self.batch_size = x.shape[0]\n self.x = x\n self.y = y\n self.soft = self.softmax(x) + 10**(-11)\n out = np.zeros(self.batch_size)\n for i in range(self.batch_size):\n out[i] = -(y[i] @ np.log(self.soft[i]))\n\n return out", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def compute_loss(self, sample):\n observations_batch, actions_batch, return_batch, masks_batch, \\\n old_action_log_probs_batch, adv_targ = sample\n\n assert old_action_log_probs_batch.shape == (self.mini_batch_size, 1)\n assert adv_targ.shape == (self.mini_batch_size, 1)\n assert return_batch.shape == (self.mini_batch_size, 1)\n\n values, action_log_probs, dist_entropy = self.evaluate_actions(\n observations_batch, actions_batch)\n\n assert values.shape == (self.mini_batch_size, 1)\n assert action_log_probs.shape == (self.mini_batch_size, 1)\n assert values.requires_grad\n assert action_log_probs.requires_grad\n assert dist_entropy.requires_grad\n\n # [TODO] Implement policy loss\n ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n surr1 = ratio * adv_targ\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ\n policy_loss = -torch.min(surr1, surr2).mean()\n\n # [TODO] Implement value loss\n value_loss = F.mse_loss(return_batch, values)\n\n # This is the total loss\n loss = policy_loss + self.config.value_loss_weight * value_loss - self.config.entropy_loss_weight * dist_entropy\n\n return loss, policy_loss, value_loss, dist_entropy", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def compute_loss(y, tx, w):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute loss by MSE / MAE\n # ***************************************************\n \n # vector e\n e = compute_e(y, tx, w)\n N = compute_N(e)\n L_MSE = np.dot(np.matrix.transpose(e), e)\n L_MSE = L_MSE / (2 * N)\n \n return L_MSE", "def _get_loss_weight(self) -> torch.Tensor:\n n_pos: torch.Tensor = 0.0\n n_neg: torch.Tensor = 0.0\n\n for _, ground_truth in self.train_loader:\n n_poss_curr = ground_truth.sum()\n n_pos += n_poss_curr\n n_neg += ground_truth.numel() - n_poss_curr\n\n eps = torch.finfo(n_pos.dtype).eps\n return n_neg / (n_pos + eps)", "def loss_fn(self, targets, outputs, model):", "def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 2 ***\"\n return nn.SquareLoss(self.run(x), y)", "def ss_loss_(self, batch):\n raise NotImplementedError", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)", "def loss(self, X, labels):\n features = self.get_conv_feats(X)\n loss = blah\n return loss", "def get_current_loss(self):\n return sum(self.recent_loss_array)/sum(self.recent_loss_bs_array)", "def calc_loss(X, Y, model):\n Z = predict(X, model)\n return -(Y * np.log(Z)).sum() / len(Y)", "def loss(self, y_pred=None, y_true=None):\n ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2)\n return -ll.sum(dim=0)", "def get_loss_fn(self):\n raise NotImplementedError()", "def compute_loss(y, tx, w):\n e = y - tx.dot(w)\n return calculate_mse(e)", "def add_loss(self):\n \n raise RuntimeError(\"Must define function add_loss(self)\")", "def envisaged_loss(self):\n loss = round(\n self.calcul_buy_nb_action() * self.stop_loss - self.investment_price(),\n 2,\n )\n percent_loss = round(loss * 100 / self.capital, 2)\n return loss, percent_loss", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE ***\"\n #make your predictions using run\n #compute loss nn.squareloss\n y_pred = self.run(x)\n return nn.SquareLoss(y_pred,y)", "def calculate_loss(self, output, target, redmode = 'mean'):\n\n loss = F.cross_entropy(output, target, reduction = redmode)\n return loss", "def compute_loss(self, w0, w, data_type):\n\t\tloss = 0\n\t\tnum_data = 0\n\t\tfor x_dict, y in dl.data_gen(self.data_file, \n\t\t\t\t\t\t\t\t\tself.int_minmax, \n\t\t\t\t\t\t\t\t\tself.category_dicts, \n\t\t\t\t\t\t\t\t\tdata_type):\n\t\t\tdot_prod = self.compute_w_x_dot(w0, w, x_dict)\n\t\t\tloss += -y*dot_prod+self.log_sum_exp(dot_prod)\n\t\t\tnum_data += 1\n\t\treturn loss/num_data", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def _compute_loss(self, state, action, reward, next_state, done):\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n q_value = q_values[action]\n\n next_state = torch.FloatTensor(next_state)\n next_q_values = self.dqn(next_state)\n next_q_value = next_q_values.max()\n\n if done:\n target = reward\n else:\n target = reward + self.discount_factor * next_q_value\n\n loss = (q_value - target).pow(2).mean()\n\n return loss", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost", "def loss(returns, predicted_output):\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n raise NotImplementedError", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 3 ***\"\n return nn.SoftmaxLoss(self.run(x), y)", "def calculate_loss(self, output, batch, training_context, last_activation=None):\n if self._model_loss_key is None:\n return output\n else:\n return output[self._model_loss_key]" ]
[ "0.8619422", "0.8097837", "0.8058278", "0.8058278", "0.8057582", "0.80512774", "0.80417025", "0.80136496", "0.80118436", "0.80016685", "0.7976173", "0.7873951", "0.78684396", "0.7850407", "0.7773674", "0.7736192", "0.773443", "0.7730767", "0.7681916", "0.7677627", "0.7675766", "0.7662065", "0.7653196", "0.7621862", "0.7611532", "0.761112", "0.7555543", "0.75226074", "0.7477361", "0.7473552", "0.7472013", "0.74704885", "0.7464285", "0.74621093", "0.7421485", "0.7420882", "0.74200225", "0.74127907", "0.7407927", "0.73636425", "0.73597133", "0.73586243", "0.73550475", "0.73482054", "0.7330139", "0.7330139", "0.7320653", "0.7317735", "0.7316036", "0.7304385", "0.7300571", "0.72864354", "0.72693497", "0.7268929", "0.7255327", "0.72548795", "0.725052", "0.7233793", "0.721951", "0.7191195", "0.71851736", "0.7178396", "0.7168526", "0.71685225", "0.7168521", "0.7162738", "0.71489406", "0.71294653", "0.71252525", "0.7124965", "0.71208733", "0.71172035", "0.71058017", "0.71013933", "0.7101357", "0.7087044", "0.7080167", "0.7061966", "0.7057592", "0.705274", "0.70523953", "0.70318055", "0.7028441", "0.70280254", "0.70276594", "0.7026173", "0.70121104", "0.6997055", "0.6992906", "0.69898266", "0.6983388", "0.69831795", "0.6978563", "0.6974388", "0.69702053", "0.696822", "0.69500905", "0.6936445", "0.69343024", "0.6933748", "0.69336486" ]
0.0
-1
Constructor for CustomCollate class.
def __init__(self, mixer: Callable, alpha: float = 1.0): self.alpha = alpha self.aug = mixer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n super(AudioDataLoader, self).__init__(*args, **kwargs)\n self.collate_fn = _collate_fn", "def __init__(self, **kwargs):\n super(Converter, self).__init__()\n self._specfile = kwargs.get(\"specfile\", None)\n self._parsed = False\n self._columns = []\n self._offsets = []\n self._fixed_with_encoding = Encodings.default_fixedwidth_enc()\n self._included_header = False\n self._delimited_encoding = Encodings.default_delimited_enc()\n self.encoder_spec()", "def __init__(self, *args, **kwargs):\n super(AscatL2Image, self).__init__(*args, **kwargs)", "def __init__(\n self,\n *args,\n mapper_key: typing.Optional[str] = None,\n transform: typing.Optional[typing.Callable] = None,\n **kwargs,\n ):\n super(CustomColumn, self).__init__(*args, **kwargs)\n self.mapper_key = mapper_key\n self.transform = transform", "def __init__(self) -> None:\n\n super().__init__(255, 255, 255, 255)", "def __init__(self, *args, **kwargs):\n super(CoconutSplitter, self).__init__(*args, **kwargs)\n self._compile = self._coconut_compile", "def __init__( self, input_charset=_default_charset, output_charset=None ):\n _Charset.__init__( self, input_charset )\n\n if output_charset is not None:\n output_charset = output_charset.lower()\n master_charset = ALIASES.get( output_charset, output_charset )\n self.output_charset = output_charset\n self.output_codec = CODEC_MAP.get( master_charset, self.input_codec )", "def __init__(self, *args, **kwargs):\n _gdi_.DCTextColourChanger_swiginit(self,_gdi_.new_DCTextColourChanger(*args, **kwargs))", "def __init__(self, *args):\n _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_swiginit(self, _itkQuadEdgeCellTraitsInfoPython.new_itkMapContainerULLQEMPF3GQEULLULLBBT(*args))", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def __init__(self, *args):\n this = _libsbml.new_FbcToCobraConverter(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, **kwds):\n raise NotImplementedError", "def __init__(self, raw, style_cls):\n super(Base, self).__init__()\n self.raw = raw\n self.style_cls = style_cls", "def __init__(self, source, *args, **kwargs):\n super(self.__class__, self).__init__()", "def __init__(self, *args):\n _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_swiginit(self, _itkQuadEdgeCellTraitsInfoPython.new_itkMapContainerULLQEMPF2GQEULLULLBBT(*args))", "def __init__(self, **kwargs):\n raise NotImplementedError", "def __init__(self, encoding=None, object_hook=None):\n self.encoding = encoding\n self.object_hook = object_hook", "def __init__(self, **kwargs):\n super(Transform, self).__init__('transforms')\n\n # Import validators\n # -----------------\n from plotly.validators.heatmapgl import (transform as v_transform)\n\n # Initialize validators\n # ---------------------\n\n # Populate data dict with properties\n # ----------------------------------\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**kwargs)", "def __init__(self, *args, **kwargs):\n super(ColumnShiftTable, self).__init__(*args, **kwargs)\n # Override default template\n if hasattr(self, \"template_name\"):\n self.template_name = self.shifter_template\n else:\n self.template = self.shifter_template", "def __init__ (self):\n pass", "def __init__(self, *args, **kwargs):\n super(Dummy, self).__init__()\n \n self.affine = np.eye(4, dtype = np.float32)\n self._update_glaffine()\n \n self.vertices = np.random.random( (10,3)).astype(np.float32) * 10\n\n self.colors = np.array( [[255,255,0,255],\n [255,255,0,255],\n [0,255,0,255],\n [0,255,0,255]], dtype = np.ubyte )\n \n self.indices = np.array( [[0,1], [1,2], [5,6], [8,9]] , dtype = np.uint32).ravel()\n self.vertices = self.vertices[self.indices,:]\n self.indices = np.array( range(len(self.indices)), dtype = np.uint32)\n self.colors = self.colors.repeat(2, axis = 0)\n self.colors_ptr = self.colors.ctypes.data\n \n self.vertices_ptr = self.vertices.ctypes.data\n self.indices_ptr = self.indices.ctypes.data\n self.indices_nr = self.indices.size\n self.mode = GL_LINES\n self.type = GL_UNSIGNED_INT", "def __init__(self):\n self.bib_database = BibDatabase()\n #: Callback function to process BibTeX entries after parsing, for example to create a list from a string with\n #: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.\n self.customization = None\n\n #: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.\n self.ignore_nonstandard_types = True\n\n #: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to\n #: lowercase names. Default: `True`.\n self.homogenise_fields = True\n\n # On some sample data files, the character encoding detection simply\n # hangs We are going to default to utf8, and mandate it.\n self.encoding = 'utf8'\n\n # pre-defined set of key changes\n self.alt_dict = {\n 'keyw': 'keyword',\n 'keywords': 'keyword',\n 'authors': 'author',\n 'editors': 'editor',\n 'url': 'link',\n 'urls': 'link',\n 'links': 'link',\n 'subjects': 'subject'\n }\n\n self.replace_all_re = re.compile(r'((?P<pre>\"?)\\s*(#|^)\\s*(?P<id>[^\\d\\W]\\w*)\\s*(#|$)\\s*(?P<post>\"?))', re.UNICODE)", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def __init__(self, *args, **kwargs):\n raise NotImplementedError", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, *args):\n this = _libsbml.new_CobraToFbcConverter(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, **kwargs):\n super(VeryCleverBeamsplitter, self).__init__(**kwargs)\n self.shader_source = IL_SHADER_SOURCE\n self.centre = [0.5, 0.5]\n self.blazing_function = np.linspace(0,1,32)\n self.zernike_coefficients = np.zeros(12)", "def __init__(self, **attrs):\n \n self.color_id = None\n self.name = None\n self.rgb = None\n self.is_trans = None\n self.external_names = {}\n self.external_ids = {}\n \n super().__init__(**attrs)", "def __init__(self, *args):\n _snap.TLFlt_swiginit(self, _snap.new_TLFlt(*args))", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.cnf = None", "def __init__(self):\n _hypre.HypreILU_swiginit(self, _hypre.new_HypreILU())", "def __init__(self, *args, **kwargs):\n\n self.mandatory_attributes = {'keywords': [], 'rules': [], 'desc': \"\",}\n models.AssetCollection.__init__(self, *args, **kwargs)\n self.set_gear_vars()", "def __init__(self, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)", "def init(self) -> None:", "def __init__(self, *args):\n this = _libsbml.new_ConversionProperties(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\t# Typically a list of data here\n\t\t# Typically a dict of header keys and values here", "def __init__(self):\n super().__init__(\"ccx\", 3, [])", "def __init__(self, channels):\n super(PositionalEncodingPermute3D, self).__init__()\n self.penc = PositionalEncoding3D(channels)", "def initialize(self, cwrap):\n pass", "def _init(self):\n raise NotImplementedError", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def __init__(self):\n super(BlipDocument, self).__init__()\n self.annotations = Annotations()\n self.rotation = 0", "def __init__(self, **kwargs):\n\t\tself.__c_version = c_singlezone(**kwargs)", "def __init__(self, *args):\n _snap.TBiConVisitor_swiginit(self, _snap.new_TBiConVisitor(*args))", "def _init(self):", "def __init__(self, sourcedata=None, metadata=None):\n SourceHook.__init__(self, sourcedata=sourcedata, metadata=metadata)", "def __init__(self, **kwargs):\n super(ForwardLayersBase, self).__init__()\n pass", "def initialize(self, **kwargs):", "def __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n if len(args) > 0: self._init_from_ascii(*args)\n if len(kwargs) > 0: self._init_from_keywords(**kwargs)\n discdate = ephem.Date(self.jd - 2415020.0)\n self.ra_str = str(ephem.hours(self.ra * ephem.pi/180))\n if self.ra < 150.0:\n self.ra_str = '0' + self.ra_str\n self.dec_str = str(ephem.degrees(self.dec * ephem.pi/180))\n if abs(self.dec) < 10.0:\n self.dec_str = self.dec_str[0] + '0' + self.dec_str[1:]\n if self.dec > 0:\n self.dec_str = '+' + self.dec_str\n self.date_str = discdate.datetime().strftime(\"%d %b %Y\")\n self.name = \"CSS{}:{}{}\".format(\n discdate.datetime().strftime(\"%y%m%d\"),\n self.ra_str.replace(':','')[:6],\n self.dec_str.replace(':','')[:7])", "def __init__(self, parent, layer):\n pass", "def __init__(self, transforms):\n self.transforms = transforms", "def __init__(self, line: int, col: int) -> None:\n self._contents=\"\"\n super().__init__(self._contents)\n self._line=line\n self._col=col", "def __init__(self, *args, **kwargs):\n super(AbsLoopinData, self).__init__(\n # All set outside\n ('linl_lis', LinlLis()),\n ('linh', Byte()),\n *args, **kwargs\n )", "def __init__(self, *args):\n _gdi_.DCClipper_swiginit(self,_gdi_.new_DCClipper(*args))", "def __init__(self, resource_path=None):\n\n (\n self.tokenized_document,\n self.stack,\n self.source_provider,\n self.__parse_properties,\n ) = (None, None, None, None)\n\n if not resource_path:\n resource_path = os.path.join(os.path.split(__file__)[0], \"resources\")\n InlineHelper.initialize(resource_path)", "def __init__(self, **kwargs):\n super().__init__()", "def __init__(self, *args):\n _snap.TStrFltFltTr_swiginit(self, _snap.new_TStrFltFltTr(*args))", "def __init__(self, *args):\n this = _libsbml.new_CompFlatteningConverter(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n # Create an 8-byte initialization vector", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, *args):\n _XCAFDoc.XCAFDoc_ShapeMapTool_swiginit(self,_XCAFDoc.new_XCAFDoc_ShapeMapTool(*args))", "def __init__(self, *args):\n _snap.TStrAscFltKd_swiginit(self, _snap.new_TStrAscFltKd(*args))", "def __init__(self) -> None:\n super().__init__()", "def __init__(self) -> None:\n super().__init__()", "def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='VectorValue', **kwc)\n\n self._gen_template_attrs()", "def __init__ (self) :", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n super().__init__(\"\")", "def __init__(self, # noqa: R0913\r\n unified_encoder,\r\n mlp_non_seq_cat_list,\r\n mlp_non_seq_cont,\r\n decoder):\r\n super().__init__()\r\n\r\n self.unified_encoder = unified_encoder\r\n self.mlp_non_seq_cat_list = mlp_non_seq_cat_list\r\n self.mlp_non_seq_cont = mlp_non_seq_cont\r\n self.decoder = decoder", "def __init__():", "def __init__(self, coder):\n self.coder = coder", "def __init__(self, base: str, **kwargs) -> None:\n if base.startswith('\\\\'):\n base = base[1:-1]\n path = '\\\\' + base + '\\\\'\n super().__init__(path, EmptyQuery(), VisualAttributes(\"CA\"), sourcesystem_cd=base, **kwargs)\n self._base = base", "def __init__(self):\n super(LinearAggregationLayer, self).__init__()", "def __init__(self, column_pos, row_pos, type='1'):\n\t\tself.column_pos = column_pos\n\t\tself.row_pos = row_pos\n\t\tself.terrain_type = type", "def __init__(self, *args):\n\n BaseDataTypes.__init__(self, *args)\n self.const_field_table_dummy_dict = {'$key1':'value1','$key2':'value2'}\n self.const_field_table_dummy_dict_encoded = '\\x00\\x00\\x00\\x22\\x05$key2S\\x00\\x00\\x00\\x06value2\\x05$key1S\\x00\\x00\\x00\\x06value1'", "def __init__(self) -> None:\n self.hebrew_alphabet = None # list of hebrew characters\n self.augmenter = Augmenter()", "def __init__(self):\n\t\tsuper().__init__()", "def __init__(self):\n\t\tsuper().__init__()", "def __init__(self):\n super(Flattener, self).__init__()", "def __init__(self, data=None, codec=None):\n self.data = data\n self.codec = codec", "def _init(self):\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self):\n self.data = None\n self.compiled = None", "def __init__(self, data_location=None):\n super(FormatterMediator, self).__init__()\n self._data_location = data_location\n self._language_identifier = self.DEFAULT_LANGUAGE_IDENTIFIER\n self._lcid = self.DEFAULT_LCID\n self._winevt_database_reader = None", "def __init__(self, lzw_min_code_sz, col_table_sz):\n self.code_table = dict()\n clear_code = 1<<lzw_min_code_sz\n eoi_code = clear_code + 1\n self.code_table[clear_code] = [CLEARCODEVAL]\n self.code_table[eoi_code] = [EOICODEVAL]\n for color in range(col_table_sz):\n self.code_table[color] = [color]", "def __init__(self, *args, **kwargs):\n # Impose default formatter\n super().__init__(*args, **kwargs)\n formatter = axistools.Formatter('auto')\n self.xaxis.set_major_formatter(formatter)\n self.yaxis.set_major_formatter(formatter)\n self.xaxis.isDefault_majfmt = True\n self.yaxis.isDefault_majfmt = True\n # Custom attributes\n self._datex_rotated = False # whether manual rotation has been applied\n self._dualy_arg = None # for scaling units on opposite side of ax\n self._dualx_arg = None\n self._dualy_cache = None # prevent excess _dualy_overrides calls\n self._dualx_cache = None" ]
[ "0.64296454", "0.6141736", "0.59631157", "0.59013855", "0.5901021", "0.5887784", "0.5874038", "0.58196646", "0.5807004", "0.57944214", "0.5786476", "0.57478714", "0.5736771", "0.57238066", "0.57140934", "0.5711623", "0.5707364", "0.56902254", "0.5678276", "0.5651362", "0.5638153", "0.55982614", "0.55937165", "0.55826104", "0.55578834", "0.55578834", "0.55578834", "0.55578834", "0.55578834", "0.55578834", "0.55578834", "0.55578834", "0.55578834", "0.5557289", "0.5551445", "0.55343425", "0.5528411", "0.552732", "0.5526213", "0.55210066", "0.5517907", "0.55064774", "0.5504544", "0.5500201", "0.5500028", "0.54950947", "0.5494954", "0.54937184", "0.5490799", "0.5485589", "0.5484042", "0.54771304", "0.5474181", "0.5473826", "0.5466583", "0.54574335", "0.5455661", "0.54509", "0.5449702", "0.5448303", "0.5446989", "0.54429895", "0.5441628", "0.54394084", "0.54385364", "0.5431", "0.5427914", "0.5425544", "0.5425276", "0.5425276", "0.5425276", "0.54229385", "0.54183906", "0.5412391", "0.5412391", "0.5411859", "0.5411124", "0.54105264", "0.54105264", "0.54105264", "0.54105264", "0.5408498", "0.5408199", "0.5407227", "0.54038155", "0.54015887", "0.5396617", "0.5395542", "0.53945404", "0.53938854", "0.5392157", "0.5392157", "0.53897583", "0.5387667", "0.5387599", "0.5387277", "0.5387277", "0.5384222", "0.5382396", "0.53768975", "0.5372132" ]
0.0
-1
Method to create collate_fn for dataloader.
def get_collate_fn(mixer_name: str, alpha: float) -> Callable: fn = cutmix if mixer_name == "cutmix" else mixup collate_fn = CustomCollate(alpha=alpha, mixer=fn) return collate_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_collate(self, cfg: ConfigType) -> Callable:\n try:\n with FUNCTIONS.switch_scope_and_registry(self.scope) as registry:\n collate_fn = registry.get(cfg.test_dataloader.collate_fn)\n except AttributeError:\n collate_fn = pseudo_collate\n return collate_fn # type: ignore", "def view(\n self,\n collate_fn: Union[callable, str] = \"batch_of_g_and_y\",\n *args,\n **kwargs\n ):\n # provide default collate function\n if isinstance(collate_fn, str):\n collate_fn = getattr(self, collate_fn)\n\n return torch.utils.data.DataLoader(\n dataset=self,\n collate_fn=collate_fn,\n *args,\n **kwargs,\n )", "def collate_fn(self, *args):\n return TupleMiniBatch(default_collate(*args))", "def build_collate_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n super(AudioDataLoader, self).__init__(*args, **kwargs)\n self.collate_fn = _collate_fn", "def collate_fn(self, image_column_names: Optional[List] = None, per_gpu_batch_size: Optional[int] = None) -> Dict:\n fn = {}\n if self.requires_column_info:\n return NotImplementedError(\n f\"requires_column_info={self.requires_column_info} not implemented for OVD tasks.\"\n )\n\n fn.update(\n {\n self.image_key: PadCollator(pad_val=0),\n self.prompt_key: ListCollator(),\n self.image_meta_key: ListCollator(),\n }\n )\n return fn", "def get_transform_fn():", "def collate_fn(list_samples):\n data = dict(outputs=None) # compliant with DataManager <collate_fn>\n data[\"inputs\"] = torch.stack([torch.from_numpy(sample[0]) for sample in list_samples], dim=0).float()\n data[\"labels\"] = torch.stack([torch.tensor(sample[1]) for sample in list_samples], dim=0).squeeze().float()\n return DataItem(**data)", "def get_collate_for_dataset(\n dataset: Union[Dataset, ConcatDataset], ensure_collate_fn_are_the_same: bool = True\n) -> Callable:\n collate_fn = default_collate\n\n if hasattr(dataset, \"get_collate_fn\"):\n return dataset.get_collate_fn()\n elif isinstance(dataset, ConcatDataset):\n collate_fns = [get_collate_for_dataset(ds) for ds in dataset.datasets]\n collate_fn = collate_fns[0]\n\n if ensure_collate_fn_are_the_same:\n for other_collate_fn in collate_fns[1:]:\n if type(other_collate_fn) != type(collate_fn):\n raise ValueError(\n f\"Detected ConcatDataset consist of datasets with different collate functions: {type(collate_fn)} and {type(other_collate_fn)}.\"\n )\n\n if isinstance(collate_fn, functools.partial):\n if not _partial_functions_equal(collate_fn, other_collate_fn):\n raise ValueError(\n f\"Detected ConcatDataset consist of datasets with different collate functions: {collate_fn} and {type(other_collate_fn)}.\"\n )\n elif collate_fn != other_collate_fn:\n raise ValueError(\n f\"Detected ConcatDataset consist of datasets with different collate functions: {collate_fn} and {other_collate_fn}.\"\n )\n\n collate_fn = collate_fns[0]\n\n return collate_fn", "def custom_collate_fn(data):\n features, labels = zip(*data)\n return pack_sequence(features, enforce_sorted=False), torch.tensor(labels)", "def create_loader(dataset: Dataset, cfg: trainer_configs.BaseDatasetConfig, batch_size: int, *,\r\n collate_fn: Optional[Callable[[List[Any]], Any]] = None) -> DataLoader:\r\n # return DataLoader(\r\n # dataset, batch_size=batch_size, num_workers=cfg.num_workers,\r\n # drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r\n return DataLoader(\r\n dataset, batch_size=batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers,\r\n drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r", "def collate_fn(batch):\n\n flattened_batch = []\n for data in batch:\n num_examples = len(data['image'])\n for i in range(num_examples):\n flattened_batch.append({\n k: v[i] for k, v in data.items()\n })\n\n return default_collate(flattened_batch)", "def collate_fn(data):\n\toutput = dict()\n\n\tfor name in ['answer_ID','query_ID']:\n\t\toutput[name] = [ _[name] for _ in data]\n\n\n\tfor name in ['query_len','answer_len']:\n\t\ttemp = [ _[name] for _ in data]\t \n\t\toutput[name] = torch.stack(temp, dim=0) \n\t\n\t#deal with source and target\n\tfor name in ['answer','query']:\n\t\tlength = output['{0}_len'.format(name)]\n\t\tl = length.max().item()\n\n\t\tfor i in range(len(data)):\n\t\t\tif(l-length[i].item()>0):\n\t\t\t\tdata[i][name] = torch.cat([data[i][name],torch.zeros(l-length[i].item(),dtype=torch.long)],dim=-1)\n\n\t\ttemp = [ _[name] for _ in data]\n\t\t\n\t\toutput[name] = torch.stack(temp, dim=0).long()\n\t\t\n\n\treturn output", "def collate_fn(batch):\n metadata = []\n for el in batch:\n metadata.append(el[\"metadata\"])\n del el[\"metadata\"]\n\n batch = default_collate(batch)\n\n batch[\"metadata\"] = metadata\n\n return batch", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def collate_fn(data):\n # Sort a data list by caption length\n images, captions, cap_mask, vision_mask, labels, vision_labels = zip(*data)\n\n images = torch.stack(images, 0)\n labels = torch.stack(labels, 0)\n vision_labels = torch.stack(vision_labels, 0).long()\n targets = torch.stack(captions, 0).long()\n cap_mask = torch.stack(cap_mask,0).long()\n vision_mask = torch.stack(vision_mask,0).long()\n\n return images, targets, cap_mask, vision_mask, labels, vision_labels", "def collate_fn(batch):\n pad_index = 1 # the <PAD> index in vocabulary\n src_list = [sample[0] for sample in batch] # list of each language sentences\n trg_list = [sample[1] for sample in batch]\n\n def padding(sentence_list):\n \"\"\"padding each sentence to the right\"\"\"\n max_len = max([sentence.size(0) for sentence in sentence_list])\n pad_sen = [sen.tolist() + [pad_index] * max(0, max_len - len(sen))\n for sen in sentence_list]\n return torch.LongTensor(pad_sen).transpose(0, 1) # shape of (T, B)\n\n return padding(src_list), padding(trg_list)", "def collate_fn(data):\n # sort a data list by caption length\n data.sort(key=lambda x: len(x[1]), reverse=True)\n zipped_data = list(zip(*data))\n # align_tensor = len(tokenized_caption) * len(whole_caption)\n images, captions, ids, img_ids, = zipped_data\n images = torch.stack(images, 0)\n targets = torch.zeros(len(captions), len(captions[0])).long()\n lengths = [len(cap) for cap in captions]\n for i, cap in enumerate(captions):\n end = len(cap)\n targets[i, :end] = cap[:end]\n return images, targets, lengths, ids", "def make_transform_fn(self, ):\n return self._transform_fn", "def collate_fn(data):\n # Sort by conversation length (descending order) to use 'pack_padded_sequence'\n data.sort(key=lambda x: x[1], reverse=True)\n\n # Separate\n sentences, conversation_length, sentence_length = zip(*data)\n\n # return sentences, conversation_length, sentence_length.tolist()\n return sentences, conversation_length, sentence_length", "def collate_fn(self, batch):\n # Sort a data list by caption length (descending order).\n #sample.sort(key=lambda x: len(x[1]), reverse=True)\n images, words = [b.get('image') for b in batch], [b.get('word') for b in batch]\n \n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n \n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(word) for word in words]\n targets = torch.zeros(sum(lengths)).long()\n lengths = torch.tensor(lengths)\n for j, word in enumerate(words):\n start = sum(lengths[:j])\n end = lengths[j]\n targets[start:start+end] = torch.tensor([self.ds.char_dict.get(letter) for letter in word]).long()\n \n if self.device == 'cpu':\n dev = torch.device('cpu')\n else:\n dev = torch.device('cuda')\n return images.to(dev), targets.to(dev), lengths.to(dev)", "def collate_batch(self) -> Dict[str, Any]:\n pass", "def collate_fn_bert(data):\n # sort a data list by caption length\n data.sort(key=lambda x: x[4].shape[1], reverse=True)\n zipped_data = list(zip(*data))\n whole_length_max = zipped_data[4][0].shape[1]\n # align_tensor = len(tokenized_caption) * len(whole_caption)\n images, captions, ids, img_ids, align_tensors = zipped_data\n images = torch.stack(images, 0)\n lengths = [len(cap) for cap in captions]\n length_max = max(lengths)\n lengths_whole = [align.shape[1] for align in align_tensors]\n targets = torch.zeros(len(captions), length_max).long()\n targets_aligns = torch.zeros(len(captions), length_max, whole_length_max).to(torch.float32)\n for i, tup in enumerate(zip(captions, align_tensors)):\n cap, align_tensor = tup\n end = len(cap)\n tokenized_l = align_tensor.shape[0]\n whole_l = align_tensor.shape[1]\n #import ipdb; ipdb.set_trace()\n targets[i, :end] = cap[:end]\n targets_aligns[i, :tokenized_l, :whole_l]\n return images, targets, lengths, ids, targets_aligns, lengths_whole", "def build_label_transform():\n\n return NALabelEncoder()", "def collate_fn(data):\n # Sort a data list by caption length\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions, bboxes, depends, ids, img_ids = zip(*data)\n\n # Merge images (convert tuple of 3D tensor to 4D tensor)\n images = torch.stack(images, 0)\n bboxes = torch.stack(bboxes, 0)\n\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n\n return images, targets, bboxes, depends, lengths, ids", "def collate_fn(data, device=default_device):\n # batch.sort(key=lambda x: len(x[1]), reverse=True)\n has_mask_tensor = True if data[0][-1] is not None else False\n input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor = zip(*data)\n\n input_tensor, input_lengths = padSequence(input_tensor)\n target_tensor, target_lengths = padSequence(target_tensor)\n bs_tensor = torch.as_tensor(bs_tensor, dtype=torch.float, device=device)\n db_tensor = torch.as_tensor(db_tensor, dtype=torch.float, device=device)\n mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if has_mask_tensor else None\n # mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if mask_tensor[0] and mask_tensor[0] != [] else None\n\n # data = input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor\n # if torch.cuda.is_available():\n # data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n return input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor # tensors [batch_size, *]", "def createTransformFunc(self):\n raise NotImplementedError()", "def collate_fn(batch):\n sentence1 = [item[0] for item in batch]\n sentence2 = [item[1] for item in batch]\n label = [item[2] for item in batch]\n label = torch.tensor(label)\n return sentence1, sentence2, label", "def collate_fn(batch):\r\n transposed = zip(*batch)\r\n lbd = lambda batch:torch.cat([torch.from_numpy(b).long() for b in batch])\r\n return [lbd(samples) for samples in transposed]", "def collate_fn(data):\n\n # Sort a data list by tweet length (descending order).\n # data.sort(key=lambda x: len(x[1]), reverse=True)\n texts_, targets_, relations = zip(*data)\n\n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(text) for text in texts_]\n texts = torch.zeros(len(texts_), max(lengths)).long()\n for i, text in enumerate(texts_):\n end = lengths[i]\n texts[i, :end] = text[:end]\n\n lengths_targets = [len(text) for text in targets_]\n targets = torch.zeros(len(targets_), max(lengths_targets)).long()\n for i, text in enumerate(targets_):\n end = lengths_targets[i]\n targets[i, :end] = text[:end]\n return targets, lengths, texts, torch.tensor(relations).view(-1)", "def collate_fn(data):\r\n # Sort a data list by caption length\r\n data.sort(key=lambda x: len(x[1]), reverse=True)\r\n\r\n images, captions, ids, img_ids = zip(*data)\r\n\r\n # Merge images (convert tuple of 3D tensor to 4D tensor)\r\n images = torch.stack(images, 0)\r\n\r\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\r\n lengths = torch.LongTensor([len(cap) for cap in captions])\r\n targets = torch.zeros(len(captions), max(lengths)).long()\r\n for i, cap in enumerate(captions):\r\n end = lengths[i]\r\n targets[i, :end] = cap[:end]\r\n\r\n return images, targets, lengths, ids", "def get_loader(\n data_source: Iterable[dict],\n open_fn: Callable,\n dict_transform: Callable = None,\n sampler=None,\n collate_fn: Callable = default_collate_fn,\n batch_size: int = 32,\n num_workers: int = 4,\n shuffle: bool = False,\n drop_last: bool = False,\n):\n from catalyst.data.dataset import ListDataset\n\n dataset = ListDataset(\n list_data=data_source, open_fn=open_fn, dict_transform=dict_transform,\n )\n loader = torch.utils.data.DataLoader(\n dataset=dataset,\n sampler=sampler,\n collate_fn=collate_fn,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=shuffle,\n pin_memory=torch.cuda.is_available(),\n drop_last=drop_last,\n )\n return loader", "def collate_fn(batch):\n # Unzip the batch\n imgs,qs, answers = list(zip(*batch))\n\n # concatenate the vectors\n imgs = torch.stack(imgs)\n \n #concatenate the labels\n q = torch.stack(qs)\n a = torch.stack(answers)\n \n return imgs, q, a", "def collate_fn(batch):\n text = [item[0] for item in batch]\n audio = [item[1] for item in batch]\n\n text_lengths = [len(x) for x in text]\n audio_lengths = [len(x) for x in audio]\n\n max_text = max(text_lengths)\n max_audio = max(audio_lengths)\n\n text_batch = np.stack(pad_text(x, max_text) for x in text)\n audio_batch = np.stack(pad_spectrogram(x, max_audio) for x in audio)\n\n return (torch.LongTensor(text_batch),\n torch.FloatTensor(audio_batch).permute(1, 0, 2),\n text_lengths, audio_lengths)", "def collate_fn(data):\r\n\r\n # sort data by caption length\r\n data.sort(key=lambda x: len(x[1]), reverse=True)\r\n images, captions = zip(*data)\r\n\r\n # Merge image tensors (stack)\r\n images = torch.stack(images, 0)\r\n\r\n # Merge captions\r\n caption_lengths = [len(caption) for caption in captions]\r\n\r\n # zero-matrix num_captions x caption_max_length\r\n padded_captions = torch.zeros(len(captions), max(caption_lengths)).long()\r\n\r\n # fill the zero-matrix with captions. the remaining zeros are padding\r\n for idx, caption in enumerate(captions):\r\n end = caption_lengths[idx]\r\n padded_captions[idx, :end] = caption[:end]\r\n return images, padded_captions, caption_lengths", "def collate_fn(batch):\n file = [item[\"file\"] for item in batch]\n wave = torch.cat([item[\"wave\"] for item in batch], dim=0)\n return {\"file\": file, \"wave\": wave}", "def collate_fn(batch):\n file = [item[\"file\"] for item in batch]\n wave = torch.cat([item[\"wave\"] for item in batch], dim=0)\n return {\"file\": file, \"wave\": wave}", "def __init__(self, index='ndt', doc_type='sentence', dataset_path=None,\n dataset_fn=None, lang=None, sections=None, fields=None,\n normalize_func=normalize):\n super(NDTDataset, self).__init__(index=index, doc_type=doc_type, dataset_path=dataset_path,\n dataset_fn=dataset_fn, normalize_func=normalize_func)\n\n self.archive_fn = NDT_ARCHIVE_URL\n self.field_indices = None\n self.fields = CONLL_U_FIELDS\n\n if fields:\n self.fields = fields\n self.field_indices = [CONLL_U_FIELDS.index(f) for f in fields]\n\n self.sections = sections\n self.lang = lang", "def create_str_to_initialiser_converter(self):\n str_to_initialiser_converter = {\"glorot_normal\": initializers.glorot_normal, \"glorot_uniform\": initializers.glorot_uniform,\n \"xavier_normal\": initializers.glorot_normal, \"xavier_uniform\": initializers.glorot_uniform,\n \"xavier\": initializers.glorot_uniform,\n \"he_normal\": initializers.he_normal, \"he_uniform\": initializers.he_uniform,\n \"identity\": initializers.identity, \"lecun_normal\": initializers.lecun_normal,\n \"lecun_uniform\": initializers.lecun_uniform, \"truncated_normal\": initializers.TruncatedNormal,\n \"variance_scaling\": initializers.VarianceScaling, \"default\": initializers.glorot_uniform}\n return str_to_initialiser_converter", "def collater(self, samples):\r\n raise NotImplementedError", "def __init__(self):\n self.key_to_fn = {\n BytesSequenceToEncodingBiLSTM.LAYER_NAME: BytesSequenceToEncodingBiLSTM,\n Global1dPooling.LAYER_NAME: Global1dPooling,\n CategoricalEmbeddingToEncodingBiLSTM.LAYER_NAME: CategoricalEmbeddingToEncodingBiLSTM,\n CategoricalEmbeddingWithHashBuckets.LAYER_NAME: CategoricalEmbeddingWithHashBuckets,\n CategoricalEmbeddingWithIndices.LAYER_NAME: CategoricalEmbeddingWithIndices,\n CategoricalEmbeddingWithVocabularyFile.LAYER_NAME: CategoricalEmbeddingWithVocabularyFile,\n CategoricalEmbeddingWithVocabularyFileAndDropout.LAYER_NAME: CategoricalEmbeddingWithVocabularyFileAndDropout,\n CategoricalIndicatorWithVocabularyFile.LAYER_NAME: CategoricalIndicatorWithVocabularyFile,\n TFNativeOpLayer.LAYER_NAME: TFNativeOpLayer,\n StringMultiLabelProcessor.LAYER_NAME: StringMultiLabelProcessor\n }", "def add_convertor(metadata):\n\n metadata[\"convertor\"] = make_convertor(metadata[\"name\"],\n metadata[\"datatype\"])", "def make_convertor(name, dtype):\n\n # The spaces may be important in the strings, but don't think\n # they are for my use case, so remove them.\n #\n if dtype == 'char':\n if name.endswith('_flag'):\n return convert_to_bool\n else:\n return lambda v: v.strip()\n elif dtype == 'int':\n return convert_to_int\n elif dtype == 'double':\n return convert_to_float\n elif dtype == 'boolean':\n return convert_to_bool\n else:\n raise ValueError(dtype)", "def collater(self, samples):\r\n return collate(\r\n samples, self.src_dict, self.tgt_dict,\r\n left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,\r\n max_sent_len=self.max_sent_len,\r\n mask_other_sents=self.mask_other_sents\r\n )", "def collate_fn(data):\n images, idxs, captions = zip(*data)\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n batch_size = images.shape[0]\n # p\n nums = []\n for idx in idxs:\n num = [0] * num_concept\n for id in idx:\n num[id[1]] = 1\n nums.append(num)\n concepts = torch.FloatTensor(nums)\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n return images,concepts, targets", "def collate_fn(batch):\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels = map(torch.stack, zip(*batch))\n max_len = max(all_lens).item()\n all_input_ids = all_input_ids[:, :max_len]\n all_attention_mask = all_attention_mask[:, :max_len]\n all_token_type_ids = all_token_type_ids[:, :max_len]\n return all_input_ids, all_attention_mask, all_token_type_ids, all_labels", "def construct_dataset_name(self, *args):\n raise NotImplementedError", "def collate_fn(batch):\n # From\n # https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection/blob/43fd8be9e82b351619a467373d211ee5bf73cef8/datasets.py#L60\n\n images = list()\n boxes = list()\n labels = list()\n\n for b in batch:\n if b[0] is not None:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n\n if len(images) > 0:\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels", "def test_custom_collate() -> None:\n metadata = PatientMetadata(patient_id='42')\n foo = \"foo\"\n d1 = {foo: 1, SAMPLE_METADATA_FIELD: \"something\"}\n d2 = {foo: 2, SAMPLE_METADATA_FIELD: metadata}\n result = collate_with_metadata([d1, d2])\n assert foo in result\n assert SAMPLE_METADATA_FIELD in result\n assert isinstance(result[SAMPLE_METADATA_FIELD], list)\n assert result[SAMPLE_METADATA_FIELD] == [\"something\", metadata]\n assert isinstance(result[foo], torch.Tensor)\n assert result[foo].tolist() == [1, 2]", "def list_data_collate(batch: Sequence):\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n key = None\n try:\n if config.USE_META_DICT:\n data = pickle_operations(data) # bc 0.9.0\n if isinstance(elem, Mapping):\n ret = {}\n for k in elem:\n key = k\n data_for_batch = [d[key] for d in data]\n ret[key] = collate_meta_tensor(data_for_batch)\n else:\n ret = collate_meta_tensor(data)\n return ret\n except RuntimeError as re:\n re_str = str(re)\n if \"equal size\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create images of different shapes, creating your \"\n + \"`DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem (check its \"\n + \"documentation).\"\n )\n _ = dev_collate(data)\n raise RuntimeError(re_str) from re\n except TypeError as re:\n re_str = str(re)\n if \"numpy\" in re_str and \"Tensor\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create mixtures of torch Tensor and numpy ndarray, \"\n + \"creating your `DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem \"\n + \"(check its documentation).\"\n )\n _ = dev_collate(data)\n raise TypeError(re_str) from re", "def init():\n return _libsbml.SBMLFunctionDefinitionConverter_init()", "def SBMLFunctionDefinitionConverter_init():\n return _libsbml.SBMLFunctionDefinitionConverter_init()", "def __init__(self, loader, tok_fn, return_intent_labels=True, to_lower_case=True):\n super().__init__()\n self.loader = loader\n for attr in [\"raw_texts\", \"raw_labels\", \"ood_labels\"]:\n setattr(self, attr, getattr(self.loader, attr))\n self.n_ood = sum(self.ood_labels)\n self.n_indomain = len(self) - self.n_ood\n\n if to_lower_case:\n self.raw_texts = [t.lower() for t in self.raw_texts]\n self.tokenized_texts = [tok_fn(t) for t in self.raw_texts]\n self.vectorized_texts = None\n self.return_intent_labels = return_intent_labels\n self.label_vocab, self.vectorized_labels, self.label_cnts = self.vectorize_labels()\n self.encoder = None", "def init_func(unicore_fuzz, uc):\n pass", "def clevr_collate_fn(data):\n\tdata = sorted(data, key=lambda x: len(x[1]), reverse=True)\n\timg, q, len_q, a, f, idx = list(zip(*data))\n\tq = torch.nn.utils.rnn.pad_sequence(q, batch_first=True)\n\treturn torch.stack(img), q, list(len_q), torch.stack(a), list(f), list(idx)", "def init():\n return _libsbml.FbcToCobraConverter_init()", "def FbcToCobraConverter_init():\n return _libsbml.FbcToCobraConverter_init()", "def get_native_batch_from_loader(loader: DataLoader, batch_index: int = 0):\n dataset = loader.dataset\n collate_fn = loader.collate_fn\n return collate_fn([dataset[batch_index]])", "def dataset_initializer_hook(self):\n iterator = self._dataset.make_initializable_iterator()\n # pylint: disable=protected-access\n hook = estimator_lib._DatasetInitializerHook(iterator)\n self._iterator = iterator\n return hook", "def build_model_fn(self):", "def apply(self, transform_func):\n #input_shapes = transform_func.input_shapes\n #input_types = transform_func.input_types\n #data_shapes = transform_func.data_shapes\n #data_types = transform_func.data_types\n #assert input_shapes == self._data_shapes\n #assert input_types = self._data_types\n ret_gen = transform_func(self.generator)\n ret = type(self).from_generator_func(ret_gen)\n if self.name is not None:\n ret.name = self.name\n #ret.data_shapes = data_shapes\n #ret.data_types = data_types\n return ret", "def __init__(\n self,\n *args,\n mapper_key: typing.Optional[str] = None,\n transform: typing.Optional[typing.Callable] = None,\n **kwargs,\n ):\n super(CustomColumn, self).__init__(*args, **kwargs)\n self.mapper_key = mapper_key\n self.transform = transform", "def collate_fn(data: list):\n def pad_tensor(inp):\n assert type(inp[0]) == torch.Tensor\n it = iter(inp)\n t = next(it)\n max_shape = list(t.shape)\n while True:\n try:\n t = next(it)\n for i in range(len(max_shape)):\n max_shape[i] = int(max(max_shape[i], t.shape[i]))\n except StopIteration:\n break\n max_shape = np.array(max_shape)\n\n padded_ts = []\n for t in inp:\n pad_pattern = np.zeros(2 * len(max_shape), dtype=np.int64)\n pad_pattern[::-2] = max_shape - np.array(t.shape)\n pad_pattern = tuple(pad_pattern.tolist())\n padded_ts.append(F.pad(t, pad_pattern, 'constant', 0))\n\n return padded_ts\n\n def stack(inp):\n if type(inp[0]) == list:\n ret = []\n for vs in zip(*inp):\n ret.append(stack(vs))\n elif type(inp[0]) == dict:\n ret = {}\n for kvs in zip(*[x.items() for x in inp]):\n ks, vs = zip(*kvs)\n for k in ks:\n assert k == ks[0], \"Key value mismatch.\"\n ret[k] = stack(vs)\n elif type(inp[0]) == torch.Tensor:\n new_t = pad_tensor(inp)\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == np.ndarray:\n new_t = pad_tensor([torch.from_numpy(x) for x in inp])\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == str:\n ret = inp\n else:\n raise ValueError('Cannot handle type {}'.format(type(inp[0])))\n return ret\n\n ret = stack(data)\n\n # compute CPU-intensive matrix K1, K2 here to leverage multi-processing nature of dataloader\n # if 'Gs' in ret and 'Hs' in ret and :\n # try:\n # G1_gt, G2_gt = ret['Gs']\n # H1_gt, H2_gt = ret['Hs']\n # sparse_dtype = np.float32\n # K1G = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(G2_gt, G1_gt)] # 1 as source graph, 2 as target graph\n # K1H = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(H2_gt, H1_gt)]\n # K1G = CSRMatrix3d(K1G)\n # K1H = CSRMatrix3d(K1H).transpose()\n #\n # ret['Ks'] = K1G, K1H #, K1G.transpose(keep_type=True), K1H.transpose(keep_type=True)\n # except ValueError:\n # pass\n\n return ret", "def build_pfunc(cls, representation):\n if ut.is_str(representation):\n try:\n func = eval(representation)\n except:\n bf = 'cls.build_pfunc('\n af = ')'\n st = ut.parse_enclose_with_counter(representation , before = bf, after = af)\n func = eval(st)\n \n elif ut.is_dico(representation):\n name_func = representation['name_func']\n func = eval(name_func)(**representation)\n \n else:\n raise SystemError(\"build_custom_func can build a function from an \"\n \"object of tye {0}\".format(cls.__class__))\n \n return func", "def _transform(func_name):\n\n def wrapped(self, *args, **kwargs):\n replacement_string = _query_super(func_name)(self, *args, **kwargs)\n to_string = []\n char_counter = 0\n for index in range(0, len(self._raw_string)):\n if index in self._code_indexes:\n to_string.append(self._raw_string[index])\n elif index in self._char_indexes:\n to_string.append(replacement_string[char_counter])\n char_counter += 1\n return ANSIString(\n \"\".join(to_string),\n decoded=True,\n code_indexes=self._code_indexes,\n char_indexes=self._char_indexes,\n clean_string=replacement_string,\n )\n\n return wrapped", "def init():\n return _libsbml.FbcV1ToV2Converter_init()", "def __init__(self, df, cat_features, enc_type, handle_na=False ):\n self.df = df\n self.cat_features = cat_features\n self.enc_type = enc_type\n self.label_encoder = dict()\n self.binary_encoder = dict()\n self.ohe = None\n self.handle_na = handle_na\n \n if self.handle_na:\n for cat in self.cat_features:\n self.df.loc[:,cat] = self.df.loc[:,cat].astype('str').fillna('-9999999')\n self.output_df = self.df.copy(deep=True)", "def collater(self, samples):\n\n return dual_collate(\n samples, pad_idx=self.d1.src_dict.pad(), eos_idx=self.d1.src_dict.eos(),\n left_pad_source=self.d1.left_pad_source, left_pad_target=self.d1.left_pad_target,\n input_feeding=self.d1.input_feeding,\n )\n\n #prev_output_tokens doesn't match!\n #id doesn't match\n #both of these keys are lengths 248 for both dictionaries\n #length only captures the first dimension of a multidimensional tensor\n #248 is likely the batch size here\n #error occurs because of the sorting by descending source length in the collate method\n #may be possible to fix by replace the sort_order line with: sort_order = torch.LongTensor(range(len(id)))\n #also it seems like there's more keys in c1 and c2 than we explicitly account for here \n #also fix DualSourceSequenceGenerator.generate\n\n indexes = [sample['id'] for sample in samples]\n\n c1 = self.d1.collater([self.d1[index] for index in indexes])\n c2 = self.d2.collater([self.d2[index] for index in indexes])\n\n # c1 = self.d1.collater([self.d1[sample['id']] for sample in samples])\n # c2 = self.d2.collater([self.d2[sample['id']] for sample in samples])\n\n net_input1 = c1['net_input']; net_input2 = c2['net_input']\n net_input = {}\n for key in net_input1.keys():\n if 'src_' in key:\n net_input[key+'1'] = net_input1[key]\n elif key == 'prev_output_tokens':\n net_input[key] = net_input1[key]\n # elif key == 'ntokens':\n # net_input[key] = net_input1[key]\n else:\n raise AssertionError\n for key in net_input2.keys():\n if 'src_' in key:\n net_input[key+'2'] = net_input2[key]\n elif key == 'prev_output_tokens':\n if self.dual_decoder:\n net_input[key+'_extra'] = net_input2[key]\n else:\n # net_input[key] = net_input2[key]\n pass\n # err = \"NET_INPUT ASSERTION: \"+str(len(indexes))+\";\\n\"\n # err += str(len(net_input[key])) + \"\\t\" + str(net_input[key]) + \"\\n\"\n # err += str(len(net_input2[key])) + \"\\t\" + str(net_input2[key]) + \"\\n\"\n # assert False, err\n # if not net_input[key] == net_input2[key]:\n # print(\"NET_INPUT ASSERTION:\")\n # print(net_input[key])\n # print(net_input2[key])\n # raise AssertionError\n else:\n raise AssertionError\n\n c = {'net_input': net_input}\n for key in c1.keys():\n if key == 'target':\n c[key] = c1[key]\n elif key == 'ntokens':\n c[key] = c1[key]\n elif key == 'id' or key == 'nsentences':\n c[key] = c1[key]\n else:\n assert key == 'net_input',key\n for key in c2.keys():\n if key == 'target':\n c[key] = c2[key]\n elif key == 'ntokens':\n if 'target' not in samples[0]:\n c[key] += c2[key] # source tokens\n elif self.dual_decoder:\n c[key+'_extra'] = c2[key] # target tokens for decoder 2\n else:\n assert c[key] == c2[key], \"NTOKENS:\\n\"+str(c[key])+\"\\n\"+str(c2[key]) # target tokens for decoder\n elif key == 'id':\n # set1 = set(c[key])\n # set2 = set(c2[key])\n # assert set1 == set2\n assert False, \"ID: lengths: \"+str(len(indexes))+\"; \"+str(len(c[key]))+\", \"+str(len(c2[key]))+\"\\n\"+str(c[key][:10])+\"...\\n\"+str(c2[key][:10])+\"...\\n\" \n assert c[key] == c2[key], \"ID:\\n\"+str(c[key])+\"\\n\"+str(c2[key])\n elif key == 'nsentences':\n assert c[key] == c2[key], \"NSENT:\\n\"+str(c[key])+\"\\n\"+str(c2[key])\n else:\n assert key == 'net_input',key\n return c\n\n\n\n net_input1['src_tokens1'] = net_input1.pop('src_tokens') \n net_input1['src_lengths1'] = net_input1.pop('src_lengths')\n net_input1['src_tokens2'] = net_input2['src_tokens'] \n net_input1['src_lengths2'] = net_input2['src_lengths']\n\n if self.dual_decoder:\n net_input1['prev_output_tokens_extra'] = net_input2['prev_output_tokens']\n c1['target_extra'] = c2['target']\n c1['ntokens_extra'] = c2['ntokens']\n if 'target' not in samples[0]:\n #ntokens and ntokens_extra represent the total number of source tokens\n c1['ntokens'] = c1['ntokens'] + c2['ntokens']\n if 'ntokens_extra' in c1:\n c1['ntokens_extra'] = c1['ntokens']\n #else ntokens is the total number of target tokens\n return c1", "def init():\n return _libsbml.CompFlatteningConverter_init()", "def __init__(self, setfunc, column, role, convertfunc):\n super(SetDataArgs, self).__init__()\n self.setfunc = setfunc\n self.column = column\n self.role = role\n self.convertfunc = convertfunc", "def CobraToFbcConverter_init():\n return _libsbml.CobraToFbcConverter_init()", "def collate_fn(\n self,\n batch: List[\n Tuple[\n np.ndarray,\n np.ndarray,\n np.ndarray,\n np.ndarray,\n int,\n int,\n bool,\n bool,\n Optional[np.ndarray],\n Optional[np.ndarray],\n ]\n ],\n ) -> Union[\n Tuple[Tensor, Tensor, Tensor, Tensor, Any, Any, Any, Any],\n Tuple[Tensor, Tensor, Tensor, Tensor, Any, Any, Any, Any, Any, Any],\n ]:\n if not self.use_audio:\n inp_ids, segment_ids, inp_mask, st_mask, n_preceding, query_ids, is_first, is_last = zip(*batch)\n return (\n pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0),\n n_preceding,\n query_ids,\n is_first,\n is_last,\n )\n (\n inp_ids,\n segment_ids,\n inp_mask,\n st_mask,\n n_preceding,\n query_ids,\n is_first,\n is_last,\n features,\n features_length,\n ) = zip(*batch)\n return (\n pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0),\n n_preceding,\n query_ids,\n is_first,\n is_last,\n pad_sequence([torch.tensor(x) for x in features], batch_first=True, padding_value=0).float(),\n torch.tensor(features_length, dtype=torch.long),\n )", "def init():\n return _libsbml.CobraToFbcConverter_init()", "def CompFlatteningConverter_init():\n return _libsbml.CompFlatteningConverter_init()", "def FbcV1ToV2Converter_init():\n return _libsbml.FbcV1ToV2Converter_init()", "def _get_data_preprocessing_fns(self):\n # Create new functions with partial positional arguments assigned\n process_path_fn = \\\n partial(data_preprocessing.process_path,\n one_hot=self.ONE_HOT,\n num_classes=self._NUM_CLASSES,\n class_names=self._CLASS_NAMES)\n process_img_path_fn = data_preprocessing.process_img_path\n convert_format_fn = \\\n partial(data_preprocessing.convert_format,\n grayscale_in=self._GRAYSCALE_IN,\n grayscale_out=self._GRAYSCALE_OUT)\n random_rotate_fn = \\\n partial(data_preprocessing.random_rotate,\n stddev=self._ROTATE_STDDEV)\n random_zoom_fn = \\\n partial(data_preprocessing.random_zoom,\n max_percent=self._ZOOM_MAX_PERCENT,\n stddev=self._ZOOM_STDDEV,\n img_height=self._HEIGHT,\n img_width=self._WIDTH)\n resize_fn = \\\n partial(data_preprocessing.resize,\n height=self._HEIGHT,\n width=self._WIDTH)\n\n funcs = edict({'process_path': process_path_fn,\n 'process_img_path': process_img_path_fn,\n 'convert_format': convert_format_fn,\n 'random_rotate': random_rotate_fn,\n 'random_zoom': random_zoom_fn,\n 'resize': resize_fn})\n\n return funcs", "def create_vectorizer(ds):\n vectorize_layer = TextVectorization(\n standardize=clean_text,\n split=\"whitespace\",\n max_tokens=MAX_WORDS - 1,\n output_mode=\"int\",\n output_sequence_length=MAX_LEN,\n )\n vectorize_layer.adapt(ds.map(lambda text, label: text))\n return vectorize_layer", "def __init__(self, *args):\n _snap.TStrAscFltKd_swiginit(self, _snap.new_TStrAscFltKd(*args))", "def init():\n return _libsbml.FbcV2ToV1Converter_init()", "def FbcV2ToV1Converter_init():\n return _libsbml.FbcV2ToV1Converter_init()", "def collate_fn_bow(data, vocab_size):\n labels = torch.zeros(len(data), dtype=torch.long)\n ood_labels = torch.zeros(len(data), dtype=torch.long)\n rows, cols = [], []\n values = []\n for idx, (numerical_sent, label, is_ood) in enumerate(data):\n labels[idx] = label\n ood_labels[idx] = is_ood\n for num, cnt in zip(*np.unique(numerical_sent, return_counts=True)):\n rows.append(idx)\n cols.append(num)\n values.append(cnt)\n indices = np.vstack((rows, cols))\n\n i = torch.LongTensor(indices)\n v = torch.FloatTensor(values)\n batch = torch.sparse.FloatTensor(i, v, torch.Size((len(data), vocab_size)))\n return batch, labels, ood_labels", "def generate_c_source(self):\n return template_elfling_source % (self.generate_c_data_block(), ELFLING_WORK, ELFLING_OUTPUT, ELFLING_UNCOMPRESSED, len(self.__contexts), ELFLING_WORK, self.get_input_offset(), ELFLING_OUTPUT, self.get_uncompressed_size(), ELFLING_UNCOMPRESSED)", "def __call__(\n self,\n method: str,\n callable_: Callable[[pd.DataFrame], Union[pd.DataFrame, pd.Series]],\n normalizable: bool,\n **_ignore\n ):\n return MetaDataSetFeaturizerViaLambda(\n method=method, callable_=callable_, normalizable=normalizable\n )", "def build_dataloader(cfg, augmentor=None, mode='train', dataset=None, rank=None,\n dataset_class=VolumeDataset, dataset_options={}, cf=collate_fn_train):\n assert mode in ['train', 'val', 'test']\n print('Mode: ', mode)\n\n if mode == 'train':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH\n elif mode == 'val':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH * 4\n else:\n cf = collate_fn_test # update the collate function\n batch_size = cfg.INFERENCE.SAMPLES_PER_BATCH * cfg.SYSTEM.NUM_GPUS\n\n if dataset is None: # no pre-defined dataset instance\n if cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT is not None:\n dataset_class = VolumeDatasetMultiSeg\n dataset = get_dataset(cfg, augmentor, mode, rank, dataset_class, dataset_options)\n\n sampler = None\n num_workers = cfg.SYSTEM.NUM_CPUS\n if cfg.SYSTEM.DISTRIBUTED:\n num_workers = cfg.SYSTEM.NUM_CPUS // cfg.SYSTEM.NUM_GPUS\n if cfg.DATASET.DISTRIBUTED == False:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n\n # In PyTorch, each worker will create a copy of the Dataset, so if the data\n # is preload the data, the memory usage should increase a lot.\n # https://discuss.pytorch.org/t/define-iterator-on-dataloader-is-very-slow/52238/2\n img_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=False, collate_fn=cf,\n sampler=sampler, num_workers=num_workers, pin_memory=True)\n\n return img_loader", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def __init__(self, column_name=COLUMN_WITH_JSONS_DEFAULT, output_explore_file_name=EXPLORE_LKML_OUT_DEFAULT,\n output_view_file_name=OUTPUT_VIEW_ML_OUT_DEFAULT, sql_table_name=TABLE_WITH_JSON_COLUMN_DEFAULT,\n table_alias=TABLE_ALIAS_DEFAULT, handle_null_values_in_sql=HANDLE_NULL_VALUES_IN_SQL_DEFAULT,\n primary_key=None, sql_dialect=SQL_DIALECT_DEFAULT):\n self.output_explore_file_name = output_explore_file_name or EXPLORE_LKML_OUT_DEFAULT\n self.output_view_file_name = output_view_file_name or OUTPUT_VIEW_ML_OUT_DEFAULT\n self.column_name = column_name or COLUMN_WITH_JSONS_DEFAULT\n self.sql_table_name = sql_table_name or TABLE_WITH_JSON_COLUMN_DEFAULT\n self.table_alias = get_formatted_var_name(table_alias or TABLE_ALIAS_DEFAULT)\n self.handle_null_values_in_sql = handle_null_values_in_sql or HANDLE_NULL_VALUES_IN_SQL_DEFAULT\n self.sql_dialect = sql_dialect or SQL_DIALECT_DEFAULT\n\n if self.sql_dialect.lower() not in supported_dialects:\n raise ValueError(\"SQL Dialect {} not supported. Dialects available: {}\".format(self.sql_dialect, \", \".join(supported_dialects) ))\n\n self.generator = Generator(column_name=self.column_name,\n table_alias=self.table_alias,\n handle_null_values_in_sql=self.handle_null_values_in_sql,\n sql_dialect=self.sql_dialect,\n primary_key=primary_key)\n\n self.sql_writer = SQLWriter(self.sql_table_name, self.table_alias, self.sql_dialect)\n self.looker_writer = LookerWriter(self.output_explore_file_name, self.output_view_file_name,\n self.sql_table_name, self.table_alias)", "def _cmplx_factory_ ( cmplxt , re , im ) :\n return cmplxt ( re , im )", "def mk_collation_from_prevstate(shard_chain, state, coinbase):\n # state = state or shard_chain.state\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation", "def initializer(self):\n return self.pretransformed_input.initializer", "def Init(*args, **kwargs):\n return _gdi_.EncodingConverter_Init(*args, **kwargs)", "def convert(self):\n\t\tself.make_func_dict() #sets self.func_dict\n\t\tself.make_main_function() #sets self.main\n\t\tself.remove_lambda_nesting()\n\t\tself.replace_self_with_func_names()\n\t\tself.make_func_declarations() #sets self.cpp_declarations\n\t\tself.make_func_bodies() #sets self.cpp_func_bodies\t\t\n\t\tself.make_cpp_func_bodies()\n\t\tlines = []\n\t\tlines.append('#include \"lithp.hpp\"')\n\t\tfor name, signature in self.cpp_declarations.iteritems():\n\t\t\tlines.append(signature + ';')\n\n\t\tfor name, signature in self.cpp_declarations.iteritems():\n\t\t\tif name == 'main': continue\n\t\t\tlines.append(signature + '{')\n\t\t\tlines.append(' return ' + self.cpp_func_bodies[name] + ';\\n}')\n\t\tlines.append(\n\"\"\"\nint main(){\n %s;\n return 0;\n}\n\"\"\" % self.cpp_func_bodies['main'])\n\t\tself.converted = '\\n'.join(lines)\t\t\n\t\treturn self.converted", "def regular_collate_fn(data):\n\timg, box, q, a = list(zip(*data))\n\tq = torch.nn.utils.rnn.pad_sequence(q, batch_first=True)\n\treturn torch.stack(img), torch.stack(box), q, torch.stack(a).long()", "def __init__(self, recipes, decode_param_from=None, custom_handlers=None):\n\n if not recipes or not isinstance(recipes, list):\n logger.error('Unsupported _functions type! Something went wrong!')\n\n # Get required functions\n self.functions = [] # {func: func_obj, func_params: (params), fields=[]}\n\n for _func in recipes:\n # Check the syntax of provided function\n\n # Case: handler_name\n if match(r'(^[a-zA-Z0-9_-]{3,20}$)', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>XyZ<field>AbC\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512}<field>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>XyZ<field>AbC<rfield>YzX\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512}<field>.{1,512}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>XyZ\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<field>AbC\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<field>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>AbC<rfield>XXX\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<field>AbC<rfield>XXX\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<field>.{1,512}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<rfield>ABCD\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: Syntax error\n else:\n logger.error('Syntax Error. Function: %s' % _func)\n logger.error(\n 'The example syntax of registry handler function shuld be: \\n\"-rh function_name<param>param1<param>param2<field>field_name_to_process<rfield>output_field_name\" (<param>,<field> and <rfield> are optional and depends on given function)\\nUse -prh for more details')\n exit(ERR_PROVIDER_INCORRECT_FUNCTION_SYNTAX)\n\n\n _func_name = ''\n _func_params = None\n _func_fields = None\n _func_output_fields = None\n\n # Get function, parameter(s) and fields (if specified)\n # Get _func_name\n _func_name, separator, _ = _func.partition('<')\n _func_name = _func_name.lower()\n\n if '<rfield>' in _func:\n _func, _, _func_output_fields = _func.partition('<rfield>')\n _func_output_fields = _func_output_fields.split(';')\n map(str.strip, _func_output_fields)\n\n if '<field>' in _func:\n _func, _, _func_fields = _func.partition('<field>')\n _func_fields = _func_fields.split(';')\n map(str.strip, _func_fields)\n\n if '<param>' in _func:\n _func, _, _func_params = _func.partition('<param>')\n _func_params = _func_params.split(';')\n map(str.strip, _func_params)\n\n if decode_param_from:\n if decode_param_from.lower() == 'base64':\n _func_params = list(map(base64.b64decode, _func_params))\n _func_params = list(map(bytes.decode, _func_params))\n else:\n logger.error('Unable to create a registry handler: \"%s\"\\n'\n 'Function: \"%s\"\\n'\n 'Unsupported param encoding: \"%s\"' %\n (_func_name, _func, decode_param_from))\n return None\n\n _func_params = tuple(_func_params)\n\n try:\n if not custom_handlers:\n func_class = getattr(handlers, _func_name)\n else:\n try:\n func_class = getattr(handlers, _func_name)\n except AttributeError:\n func_class = getattr(custom_handlers, _func_name)\n\n func_obj = getattr(func_class, _func_name)\n\n # if _func_output_fields is None:\n # _func_output_fields = _func_fields\n # pass\n\n self.functions.append({'func': func_obj, 'func_params': _func_params, 'func_fields': _func_fields,\n 'result_fields': _func_output_fields})\n\n except Exception as msg:\n logger.warning('Unable to get function object for: %s. Error: %s' % (_func_name, msg))\n logger.error('Unsupported Registry Handler: \"%s\"' % _func_name)\n\n self.default_fields = [registry_provider.registry_value.attributes.value_content]", "def collate_fn(self, batch):\n images, boxes, categories = [], [], []\n\n for b in batch:\n images.append(b['img'])\n boxes.append(b['box'])\n categories.append(b['category'])\n\n images = torch.stack(images, dim=0)\n\n # tensor (N, 3, 300, 300), 3 lists of N tensors each\n return {\n 'imgs': images,\n 'boxes': boxes,\n 'categories': categories\n }", "def collator(self, batch):\n\n # Retrieve data from batch\n ids = [item[\"ids\"] for item in batch]\n label = [item[\"label\"] for item in batch]\n\n # Sort the list\n ids, label = map(\n list,\n zip(\n *sorted(\n zip(ids, label), key=lambda _tuple: len(_tuple[0]), reverse=True,\n )\n ),\n )\n\n max_len = len(ids[0])\n\n # Initialize seq len list\n text_lengths = []\n new_ids = []\n for id in ids:\n\n _len = len(id)\n pad_len = max_len - _len\n\n if pad_len < 0:\n id = id[:max_len]\n else:\n id = np.pad(\n id, (0, pad_len), \"constant\", constant_values=self.pad_id\n ).tolist()\n\n new_ids.append(id)\n\n text_lengths.append(_len if _len < max_len else max_len)\n\n label = torch.tensor(label)\n text_lengths = torch.tensor(text_lengths)\n text = np.stack(new_ids)\n text = torch.from_numpy(text)\n\n return {\"label\": label, \"text_lengths\": text_lengths, \"text\": text}", "def __init__(self, *args):\n _snap.TStrFltFltTr_swiginit(self, _snap.new_TStrFltFltTr(*args))", "def __init__(self, columns_to_trans='all', trans_flag=True):\n self.columns_to_trans = columns_to_trans\n self.trans_flag = trans_flag", "def __call__(self, f: Callable[..., int]) -> BaseNLPLabelingFunction:\n if self._lf_cls is None:\n raise NotImplementedError(\"_lf_cls must be defined\")\n name = self.name or f.__name__\n return self._lf_cls(\n name=name,\n f=f,\n resources=self.resources,\n pre=self.pre,\n text_field=self.text_field,\n doc_field=self.doc_field,\n language=self.language,\n disable=self.disable,\n memoize=self.memoize,\n memoize_key=self.memoize_key,\n gpu=self.gpu,\n )", "def init_transform(source_path=None, template_path=None, **kwargs):\r\n kwargs.setdefault('adjust_for_shapefile', False)\r\n kwargs.setdefault('clean_whitespace_field_names', ())\r\n kwargs.setdefault('dissolve_field_names')\r\n kwargs.setdefault('extract_where_sql')\r\n kwargs.setdefault('field_name_change_map', {})\r\n kwargs.setdefault('insert_dataset_paths', ())\r\n kwargs.setdefault('insert_dicts_kwargs', ())\r\n kwargs.setdefault('insert_iters_kwargs', ())\r\n kwargs.setdefault('unique_id_field_names', ())\r\n kwargs.setdefault('xy_tolerance')\r\n import arcetl\r\n # Init.\r\n try:\r\n if source_path:\r\n etl = arcetl.ArcETL('Extract from ' + os.path.basename(source_path))\r\n etl.extract(source_path, extract_where_sql=kwargs['extract_where_sql'])\r\n else:\r\n etl = arcetl.ArcETL('Init from ' + os.path.basename(template_path))\r\n etl.init_schema(template_path)\r\n rename_fields(etl, kwargs['field_name_change_map'])\r\n # Insert features.\r\n for func, arg in ((insert_features_from_paths, 'insert_dataset_paths'),\r\n (insert_features_from_dicts, 'insert_dicts_kwargs'),\r\n (insert_features_from_iters, 'insert_iters_kwargs')):\r\n func(etl, kwargs[arg])\r\n # Alter attributes.\r\n clean_whitespace(etl, kwargs['clean_whitespace_field_names'])\r\n # Combine features.\r\n if kwargs['dissolve_field_names'] is not None:\r\n etl.transform(arcetl.features.dissolve,\r\n dissolve_field_names=kwargs['dissolve_field_names'],\r\n tolerance=kwargs['xy_tolerance'])\r\n # Finalize attributes.\r\n update_attributes_by_unique_ids(etl, kwargs['unique_id_field_names'])\r\n if kwargs['adjust_for_shapefile']:\r\n etl.transform(arcetl.combo.adjust_for_shapefile)\r\n except:\r\n etl.close()\r\n raise\r\n return etl" ]
[ "0.72403246", "0.6569447", "0.6355525", "0.6254008", "0.6132644", "0.59491956", "0.58509344", "0.58122337", "0.57363015", "0.57224447", "0.5562954", "0.55518824", "0.55403864", "0.5461661", "0.54195124", "0.54195124", "0.53915864", "0.53042173", "0.526494", "0.52282083", "0.5184555", "0.518289", "0.5173001", "0.51387495", "0.51309776", "0.51299584", "0.51109487", "0.50816363", "0.5073432", "0.5042116", "0.50302905", "0.5012312", "0.49991915", "0.4951458", "0.49217", "0.48887545", "0.48806292", "0.48806292", "0.48431316", "0.48253992", "0.48221925", "0.4820619", "0.48144242", "0.47915646", "0.47856608", "0.4740637", "0.47284582", "0.4691686", "0.46879044", "0.46536794", "0.46521208", "0.4648509", "0.46317357", "0.46148342", "0.4605452", "0.45946535", "0.45852938", "0.45826986", "0.45780703", "0.457608", "0.45628974", "0.4550168", "0.45495802", "0.4542084", "0.4534219", "0.4532592", "0.4522481", "0.45137432", "0.451335", "0.45113322", "0.45065838", "0.45064944", "0.4498809", "0.4494693", "0.448995", "0.44863373", "0.44754136", "0.44517076", "0.4450051", "0.44344988", "0.4433508", "0.44275633", "0.44260612", "0.44224587", "0.44144884", "0.44115254", "0.44114068", "0.43954316", "0.43915266", "0.43892384", "0.43807217", "0.43727195", "0.43647084", "0.4352137", "0.43517175", "0.43505067", "0.43468854", "0.43430826", "0.43414006", "0.4334189" ]
0.6172216
4
Loads yaml file. Arguments
def load(path: str='config.yaml'): file = Path(path).open() result = yaml.safe_load(file) debug(f'YAML file {path} loaded and parsed succesful') return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadFromFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n if os.path.exists(path) and os.path.isfile(path):\n self.load(yaml.load(open(path, 'r')))", "def load_yaml(filename):\n try:\n f = file(filename, 'r')\n data = yaml.load(f)\n return data\n except (IOError, OSError) as e:\n err = e[0]\n reason = e[1]\n error = 'load_yaml: Failed to open {filename}: {reason} {err}'.format(filename=filename, reason=reason, err=err)\n raise IOError(error)", "def load_yaml(filepath):\n with open(filepath, 'r') as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)", "def load_yaml(file: Text):\n with open(file) as fp:\n return yaml.load(fp, yaml.FullLoader)", "def load_yaml(file):\n with open(file, 'r') as file:\n data = yaml.safe_load(file)\n return data", "def LoadYaml(path):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import yaml\n \n fp = None\n try:\n fp = open(path)\n \n data = yaml.load(fp)\n \n finally:\n if fp:\n fp.close()\n \n return data", "def load_yaml(yml_file):\n with open(yml_file) as src:\n cfg = yaml.load(src, Loader=yaml.Loader)\n return cfg", "def load_yaml(yaml_name):\n print('training network configuration file is {0}'.format(yaml_name))\n util.check_file_exist(yaml_name)\n config = util.load_yaml_file(yaml_name)\n return config", "def load_yaml(file_path):\n with open(file_path) as fin:\n content = yaml.load(fin, Loader=yaml.FullLoader)\n return content", "def _load_file(self, f):\n if not os.path.exists(f):\n msg = '%s is a non-existant definition file' % f\n raise ValueError(msg)\n\n with open(f, 'r') as fh:\n return yaml.load(fh.read())", "def load_yaml(self):\n env = self.state.document.settings.env\n relpath, abspath = env.relfn2path(directives.path(self.arguments[0]))\n\n env.note_dependency(relpath)\n\n encoding = self.options.get('encoding', env.config.source_encoding)\n with io.open(abspath, 'rt', encoding=encoding) as stream:\n spec = yaml.load(stream, _YamlOrderedLoader) # nosec\n self.spec = spec\n self.paths = spec[self.path_path]\n self.definitions = spec[self.models_path]\n self.openapi_version = spec.get('swagger', None) or spec['openapi']\n self.options.setdefault('uri', 'file://%s' % abspath)", "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n return {}", "def _load_yaml_file(yaml_file):\n with io.open(yaml_file, 'r', encoding='utf-8') as stream:\n yaml_content = yaml.load(stream)\n FileUtils._check_format(yaml_file, yaml_content)", "def load_yaml(content):\n from yaml import load, FullLoader\n return load(content, Loader=FullLoader)", "def load_yaml_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n\n try:\n if sys.version_info[0] > 2:\n f = open(fn, 'r', encoding='utf8')\n else:\n f = open(fn, 'r')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening YAML file='+fn+' ('+format(e)+')'}\n\n try:\n s = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading YAML file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n try:\n d = yaml.load(s, Loader=yaml.FullLoader)\n except Exception as e:\n return {'return': 1, 'error': 'problem parsing YAML from file='+fn+' ('+format(e)+')'}\n\n return {'return': 0, 'dict': d}", "def load_yaml_file(self, path):\n with path.open('r') as handle:\n data = load_yaml(handle)\n\n self.set_all(**self.SCHEMA.load(data).data)", "def _load_yaml(source_dir, file_name):\n return yaml.dump(utils.load_yaml_dict(os.path.join(source_dir, file_name)))", "def load_yaml(input_path):\n yaml = ruamel.yaml.YAML()\n with open(input_path, 'rb') as input_file:\n return yaml.load(input_file)", "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n # This should maybe throw an exception or something\n return {}", "def load_multiple_yaml_file(self, args):\n sList = []\n for file in args:\n with open (file , \"r\") as stream:\n sList.append(stream.read())\n fString = ''\n for s in sList:\n fString = fString + \"\\n\" + s\n self.data = yaml.load(fString)", "def load_yaml(fname):\n with open(fname) as f:\n val = yaml.safe_load(os.path.expandvars(f.read()))\n return val", "def _load_datas(self) -> tp.Dict[str, dict]:\n with open(self._file, \"r\") as stream:\n try:\n load: tp.Dict[str, dict] = yaml.safe_load(stream)\n logger.info(\"YAML imported\")\n return load\n except yaml.YAMLError as exc:\n logger.debug(\"YAML import error : %s\", exc)\n raise", "def load_yml(yml_file):\n with open(yml_file) as src:\n cfg = yaml.load(src, Loader=yaml.Loader)\n return cfg", "def load(cls, file_name):\n with open(file_name) as fl:\n config = yaml.load(fl, Loader=yaml.FullLoader)\n return cls(file_name, _loaded_from_file=True, **config)", "def load_yaml(path: str) -> Dict[str, Any]:\n with open(path, \"r\", encoding=\"utf8\") as fp:\n data = yaml.safe_load(fp)\n return data", "def load(self, filename=None):\n prefix = os.path.dirname(filename)\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n name = filename or self.filename\n\n if os.path.exists(name):\n with open(name, 'rb') as dbfile:\n self.data = yaml.safe_load(dbfile) or dict()", "def load_yaml(cls, file=None):\n if file is None:\n file = f'{cls.base_path}rcp_{rcp.stage}.yml'\n try:\n with open(file, 'r') as f:\n recipe = yaml.load(f, Loader=yaml.FullLoader)\n rcp.__dict__ = recipe\n return rcp\n except FileNotFoundError:\n print(\"Recipe file doesn't exist.\")\n raise", "def load_yaml(file_path: str) -> dict:\n assert file_path.endswith(\".yaml\")\n with open(file_path) as file:\n return yaml.load(file, Loader=yaml.FullLoader)", "def load_yaml(path: str) -> dict:\n with open(path, 'r') as f:\n yaml_file = yaml.load(f, Loader=yaml.FullLoader)\n return yaml_file", "def load_yaml(cls, file=None):\n if file is None: file = f'{rcp.base_path}cfg.yml'\n try:\n with open(file, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n cfg.__dict__ = config\n return cfg\n except FileNotFoundError:\n print(\"Config file doesn't exist.\")", "def _load_data_yaml(self, pathname): \n pathname = self._yaml_extension(pathname)\n\n with open(pathname) as file:\n traj_data = yaml.load(file, Loader=yaml.FullLoader)\n \n return traj_data", "def load_yaml_file(self, yaml_file_path):\n try:\n yaml_file = open(yaml_file_path, encoding=\"UTF-8\").read()\n except FileNotFoundError:\n raise CouldNotFindYAMLFileError(yaml_file_path)\n\n try:\n yaml_contents = yaml.safe_load(yaml_file)\n except yaml.YAMLError:\n raise InvalidYAMLFileError(yaml_file_path)\n\n if yaml_contents is None:\n raise EmptyYAMLFileError(yaml_file_path)\n\n if isinstance(yaml_contents, dict) is False:\n raise InvalidYAMLFileError(yaml_file_path)\n\n return yaml_contents", "def load_file(self, filepath):\n filepath = self._yaml_extension(filepath)\n data = self._load_data_yaml(filepath)\n return data", "def read_yaml_file(yaml_file):\n with open(yaml_file, 'r') as yfile:\n loaded_file = yaml.safe_load(yfile)\n return loaded_file", "def load_yaml(path):\n fsock = open(path)\n \n try:\n yaml_string = fsock.read()\n yaml_obj = yaml.load(yaml_string)\n \n finally:\n fsock.close()\n\n return yaml_obj", "def load_yaml():\n yamlfullpath = os.path.join(THISDIR, 'ff_data.yaml')\n\n with open(yamlfullpath, 'r') as stream:\n ff_data = yaml.safe_load(stream)\n\n FF_DATA_SCHEMA(ff_data)\n return ff_data", "def loadseasoning(self):\n stream = open(self.fileref)\n self.config = yaml.safe_load(stream)\n stream.close()", "def load_yaml(fname: str) -> dict:\n try:\n with open(fname, 'r') as f:\n dataMap = yaml.safe_load(f)\n except IOError as e:\n print(f\"Cannot open YAML file {fname}\")\n print(f\"IOError: {e}\")\n \n return dataMap", "def load_config(filename):\n AS[\"config\"] = load_yaml_file(filename)", "def load_yaml(filename):\n with open(filename) as file:\n yaml = YAML()\n data = yaml.load(file)\n return data, yaml", "def load_params_file(filename):\n with open(filename, 'r') as f:\n params = yaml.safe_load(f)\n return params", "def __init__(self, yaml_file_path: Path) -> None:\n with yaml_file_path.open(\"r\") as yaml_file:\n self._yaml = YAML().load(yaml_file.read())", "def yank_load(script):\n return yaml.load(textwrap.dedent(script), Loader=YankLoader)", "def load(path=\".travis.yml\"):\n if not path:\n path = \".travis.yml\"\n with open(path, 'r') as stream:\n return yaml.load(stream)", "def load_yaml(yaml_file, template_params=None, return_config_str=False):\n with open(yaml_file, 'r') as yaml_config:\n yaml_str = yaml_config.read()\n\n return load_yaml_str(yaml_str, template_params, return_config_str)", "def run_yaml(filepath: str, strict: bool = False, debug: bool = False):\n with open(filepath, \"r\", encoding='utf-8-sig') as stream:\n return _run_file_full(filepath, yaml.safe_load(stream), strict=strict, debug=debug)", "def from_path(cls, path: str) -> Any:\n cls._check_yaml()\n with open(path) as f:\n return yaml.safe_load(f)", "def read_from_yaml(file_path, Loader=None):\n import yaml\n if Loader is None:\n Loader = yaml.FullLoader\n if os.path.isfile(file_path):\n with open(file_path, 'r') as stream:\n data = yaml.load(stream, Loader=Loader)\n return data\n else:\n raise Exception('File: {} does not exist.'.format(file_path))", "def readyml(filename):\n\n with open(filename, 'r') as f:\n return yaml.load(f.read())", "def read_exercise_yaml(path_yaml):\n exer_dict = {}\n with open(path_yaml, 'r') as stream:\n try:\n exer_dict = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return exer_dict", "def load_config_file(path):\n with open(path) as file:\n return yaml.load(file, Loader=yaml.FullLoader)", "def load_yaml(fname, schema=None):\n with open(fname) as fh:\n data = yaml.safe_load(fh.read())\n if schema:\n import jsonschema\n jsonschema.validate(data, schema=schema)\n return data", "def load(text: str, options: Dict[str, str]) -> object:\n raise LoaderMissingError(\"Yaml is not installed on the system\") from e", "def read_yaml(yaml_path):\n with open(yaml_path) as f:\n yaml_data = yaml.load(f, Loader=yaml.FullLoader)\n\n return yaml_data", "def load_yaml_file(yaml_file):\n try:\n # Get the configuration parameters which contain the region, vpc name, template filename, VPC CIDR blocks\n s = open(yaml_file).read()\n config = list(yaml.load_all(s))[0]\n\n except Exception as e:\n # We're expecting the user parameters to be encoded as YAML\n # so we can pass multiple values. If the YAML can't be decoded\n # then return failure with a helpful message.\n print(e)\n raise Exception('Input configuration parameters could not be decoded as YAML')\n\n return config", "def load_config(config_file):\n with open(config_file) as f:\n return yaml.load(f)", "def load_cfg(filepath=\"./config.yaml\"):\n with open(filepath, \"r\") as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "def get_ymal_load(yamlfile):\n with open(yamlfile, 'r', encoding='utf-8') as fr:\n filedata = fr.read()\n yamldata = yaml.full_load(filedata)\n return yamldata", "def load(filePath):\n\n stream = open(filePath, 'r')\n yamlDict = yaml.safe_load(stream)\n\n return yamlDict", "def load_data(filename, **kwargs):\n # TODO: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated,\n # as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.\n mod = _get_module_from_extension(filename, **kwargs)\n with open(filename) as f_in:\n data = mod.load(f_in)\n\n logger.debug(\"Loaded data from %s\", filename)\n return data", "def load_config(path):\n with open(path, 'r') as stream:\n return yaml.load(stream)", "def read_yaml_file(filepath: str) -> Dict:\n return yaml.safe_load(read_file(filepath))", "def _cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n cfg = yaml.load(f)\n return cfg", "def load_config(path):\n return yaml.load(open(path, 'r'), Loader=yaml.SafeLoader)", "def load_params(path):\n try:\n with open(path, \"rb\") as f:\n params = yaml.full_load(f)\n return params\n except Exception as e:\n print(e)\n with open(path, \"r\") as f:\n params = yaml.full_load(f, encoding='utf-8')\n return params", "def from_yaml(self, content):\r\n if yaml is None:\r\n raise UnsupportedDeserializationFormat(\"Usage of the YAML aspects requires yaml.\")\r\n\r\n return yaml.load(content, Loader=DeliciousCakeLoader)", "def read_yaml(file):\n with open(file, mode='r') as stream:\n out = yaml.load(stream)\n\n return out", "def loadfrom_yaml(key, path):\n\twith open(path, 'r') as f:\n\t\td = yaml.load(f)\n\t\tnew_namespace(key)\n\t\t\n\t\t# ns = get_namespace(key)\n\n\t\t# for key, value in d.items():\n\t\t# \t_recurse(0, key, value, ns)", "def _load_support(name):\n curr = P.dirname(P.abspath(__file__))\n with open(P.join(curr, \"data\", \"%s.yml\" % name)) as fin:\n return yaml.full_load(fin)", "def loadblogs(filename):\n\n stream = file(filename, 'r')\n data = yaml.load_all(stream)\n return data", "def load(self, filename: str = None):\n if not filename:\n filename = 'config.yml'\n\n if (f := self.__home / filename).exists():\n filename = f\n elif (f := Path(filename)).exists():\n filename = f\n else:\n raise FileNotFoundError(f'File {filename} not found')\n\n with filename.open(encoding='utf-8') as f:\n data = yaml.load(f, Loader=ConfigYAMLLoader)\n self.update(data)\n\n self.__home = Path(filename).parent\n return self", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def read_yaml(path: PathLike) -> Dict:\n with open(path, \"r\") as read_file:\n return yaml.load(read_file, Loader=yaml.UnsafeLoader)", "def load_config(path=\"configs/default.yaml\") -> dict:\n with open(path, \"r\", encoding=\"utf-8\") as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n return cfg", "def vidl(*args, **kwargs):\n loadstring = ''\n #now read the file definitions\n for file in args:\n ext = os.path.splitext(file)[1][1:] # get the extension without the dot\n if ext == 'ddf':\n loadstring += open(file).read()\n continue\n elif ext == 'rdf':\n continue\n elif ext == 'udf':\n continue\n else:\n # load the internal definitions for the file type\n loadstring += open(kwargs[ext]).read()\n # now load the file\n loadstring += open(file).read()\n return yaml.load(loadstring)", "def load_config(path):\n return yaml.load(open('config.yaml', 'r'), Loader=yaml.SafeLoader)", "def from_yaml(cls, model: nn.Module, yaml_path: str) -> pl.LightningModule:\n with open(yaml_path, \"r\") as stream:\n kwargs = yaml.full_load(stream)\n\n return cls(model, **kwargs)", "def from_yaml(cls, yml: str):\n\n return cls.from_dict(feast_yaml.yaml_loader(yml, load_single=True))", "def get_cfg_from_yaml(self):\n try:\n with open(self.parsed_cfg_path, 'r') as cfg_yaml:\n self.from_yaml_cfg_dict = yaml.load(cfg_yaml)\n except Exception as exc:\n print(exc)\n traceback.print_exc()\n self.from_yaml_cfg_dict = {}", "def load_config(filename):\n with open(filename, \"r\") as stream:\n try:\n global CONFIG\n CONFIG = yaml.load(stream)\n except yaml.YAMLError as ex:\n print(ex)", "def read_yaml(fname):\n\n with open(fname, 'r') as stream:\n try:\n return yaml.load(stream)\n except yaml.YAMLError as exc:\n return None", "def _yaml_load(src):\n if not isinstance(src, str):\n try:\n src_name = src.name\n except AttributeError:\n src_name = '<yaml stringio>'\n # Force-load file streams as that allows the parser to print\n # much more context when it encounters an error\n src = src.read()\n else:\n src_name = '<yaml string>'\n try:\n return yaml.safe_load(src)\n except yaml.YAMLError:\n logging.error('Parser error when reading YAML from {}.'.format(src_name))\n raise", "def load_config(filepath=None):\n if filepath is None:\n raise ValueError(\"The filepath is None, please check the config file is exist\")\n\n with open(filepath, \"r\") as stream:\n output = dict()\n try:\n content = yaml.load(stream)\n output.update(content)\n return output\n except yaml.YAMLError as e:\n print(e)", "def load(path=None):\n if path is None:\n path = settings.HOST_CONFIG_PATH\n\n try:\n with open(path, 'r') as source:\n data = yaml.safe_load(source.read())\n return data\n except IOError as e:\n pass\n\n return None", "def load(yml_files, debug = False):\n\n dc = {}\n\n if type(yml_files) == dict:\n dc = yml_files\n elif type(yml_files) == str:\n with open(yml_files, \"r\") as f:\n dc = yaml.load(f)\n elif type(yml_files) == list or type(yml_files) == tuple:\n for yml_file in yml_files:\n with open(yml_file, \"r\") as f:\n dc_cur = yaml.load(f)\n # check that now key is overwritten\n for k in dc_cur.keys():\n if k in dc:\n raise Exception (\"Key %s is defined in at least to yml files (e.g. in %s)\" % (k, yml_file) )\n dc.update(dc_cur)\n\n return build_plasm_from_dictionary(dc, debug)", "def _include_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:\n fname = os.path.join(os.path.dirname(loader.name), node.value)\n try:\n return _add_reference(load_yaml(fname), loader, node)\n except FileNotFoundError as exc:\n raise XKNXException(f\"{node.start_mark}: Unable to read file {fname}.\") from exc", "def load():\n with open(SAVE_FILE_NAME, 'r') as save_file:\n dikt = yaml.safe_load(save_file)\n if dikt is None:\n dikt = {}\n return dikt", "def load_config(file_path):\n _, ext = os.path.splitext(file_path)\n assert ext in ['.yml', '.yaml'], \"only support yaml files for now\"\n config = yaml.load(open(file_path, 'rb'), Loader=yaml.Loader)\n return config", "def yaml_loads(value):\n return yaml.load(value)", "def load_config():\n proj_dir = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(proj_dir, \"config.yml\")\n conf = yaml.safe_load(open(config_path))\n return conf", "def read_yaml(preset_file: Text) -> Dict:\n with open(preset_file, \"r\") as preset_file:\n return yaml.safe_load(preset_file)", "def load_config(file_path):\n _, ext = os.path.splitext(file_path)\n assert ext in [\".yml\", \".yaml\"], \"only support yaml files for now\"\n config = yaml.load(open(file_path, \"rb\"), Loader=yaml.Loader)\n return config", "def load_from_yaml_file(f: Union[str, TextIO]) -> Dict:\n\n # support environment variables in config\n # https://stackoverflow.com/a/55301129\n\n # For maximum compatibility with PyGeoApi config files, this function is\n # inspired by the yaml_load() function in pygeoapi/util.py here:\n # https://github.com/geopython/pygeoapi/blob/2c567d25f70daa3ed0a047ae548a3dfcd97c7cc2/pygeoapi/util.py#L100\n path_matcher = re.compile(r'.*\\$\\{([^}^{]+)\\}.*')\n\n def path_constructor(loader, node):\n env_var = path_matcher.match(node.value).group(1)\n if env_var not in os.environ:\n raise EnvironmentError(\"Undefined environment variable in config\")\n return str_to_python(path.expandvars(node.value))\n\n class EnvVarLoader(yaml.SafeLoader):\n pass\n\n EnvVarLoader.add_implicit_resolver('!path', path_matcher, None)\n EnvVarLoader.add_constructor('!path', path_constructor)\n do_close = False\n if isinstance(f, str):\n f = open(f, \"r\")\n resp = yaml.load(f, Loader=EnvVarLoader)\n if do_close:\n f.close()\n return resp", "def load(self, filepath):\n yaml_load = lambda fp: yaml.load(fp, Loader=yaml.SafeLoader)\n reader = json.load if Config.isjson(filepath) else yaml_load\n with open(filepath, 'r') as f:\n self.__init__(reader(f))\n return self", "def from_yaml(cls, yaml_file):\n return cls(OrderedDict(yaml.load(open(yaml_file, \"r\"), \n Loader=yaml.FullLoader)))", "def __open_yml_file(path_to_yml_file: str):\n\n yaml_content = None\n\n with open(path_to_yml_file, 'r', encoding='utf8') as stream:\n try:\n yaml_content = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(\"could not read yml file '\" + str() + \"'...\\n\" + str(exc) + \"...\")\n\n return yaml_content", "def load_cfg(yaml_filepath):\n # Read YAML experiment definition file\n with open(yaml_filepath, 'r') as stream:\n cfg = yaml.load(stream)\n cfg = make_paths_absolute(os.path.dirname(yaml_filepath), cfg)\n return cfg", "def load_yaml(fname: str) -> JSON_TYPE:\n try:\n with open(fname, encoding=\"utf-8\") as conf_file:\n # If configuration file is empty YAML returns None\n # We convert that to an empty dict\n return yaml.load(conf_file, Loader=SafeLineLoader) or OrderedDict()\n except yaml.YAMLError as exc:\n logger.error(str(exc))\n raise XKNXException(exc) from exc\n except UnicodeDecodeError as exc:\n logger.error(\"Unable to read file %s: %s\", fname, exc)\n raise XKNXException(exc) from exc", "def _parse_from_yaml(self) -> Dict:\n config_path = path.join(path.dirname(path.abspath(__file__)), self.config_file)\n try:\n with open(config_path, \"r\") as f:\n config_dict = yaml.load(f, Loader=yaml.FullLoader)\n return config_dict\n except FileNotFoundError as fnfe:\n raise FileNotFoundError('configuration file not found.')\n except Exception as exc:\n raise Exception('Error while loading config file.')", "def load_config(args, path=\".\"):\n with open(path + \"/config/\" + args.config, 'r') as f:\n config = yaml.safe_load(f)\n\n for key, value in config.items():\n args.__dict__[key] = value" ]
[ "0.77729696", "0.7398894", "0.73696256", "0.73671806", "0.7323886", "0.7295187", "0.7261868", "0.72600996", "0.7254409", "0.72472084", "0.722904", "0.7219554", "0.7214941", "0.7211812", "0.7204388", "0.716829", "0.7168116", "0.71676886", "0.71614975", "0.7131892", "0.71218497", "0.70436037", "0.7036914", "0.70357746", "0.7026194", "0.7022636", "0.70066196", "0.70049715", "0.70011246", "0.69886374", "0.69734126", "0.6936497", "0.6919546", "0.69158006", "0.6905175", "0.690429", "0.68937254", "0.6881072", "0.6878019", "0.68691367", "0.686127", "0.6860055", "0.68591994", "0.6855526", "0.68550426", "0.6853533", "0.68526614", "0.68249613", "0.68208575", "0.6818912", "0.6818379", "0.68062466", "0.6776212", "0.6758441", "0.67449355", "0.6723274", "0.6714276", "0.67140883", "0.67130953", "0.6708241", "0.67076653", "0.6660768", "0.6657048", "0.66516006", "0.6640655", "0.66354793", "0.66313386", "0.66283125", "0.6614756", "0.660137", "0.6593271", "0.65868264", "0.6582285", "0.657354", "0.65719646", "0.6555559", "0.6553046", "0.6543664", "0.65381724", "0.6532446", "0.6522262", "0.6519869", "0.65172565", "0.65160793", "0.6510179", "0.650945", "0.6494266", "0.6488955", "0.64730465", "0.64654046", "0.6464335", "0.6455705", "0.64542264", "0.6442825", "0.64387083", "0.6434776", "0.6430464", "0.64249367", "0.64241546", "0.64111656" ]
0.6669963
61
Reads from file environment.yaml values and changes environment variables.
def change_environment_variables(): values = load('environment.yaml') for key in values.keys(): os.environ[key] = values[key] info(f'Changed environment variables to {values}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_evironment():\n environment = Utility.load_yaml(os.getenv(\"system_file\", \"./system.yaml\"))\n for key in environment:\n if key in os.environ:\n environment[key] = os.getenv(key)\n Utility.environment = environment", "def load_envs_from_file(file_path=constants.ENV_FILE_DEFAULT_PATH.value):\n #pylint: disable=unspecified-encoding\n with open(file_path, \"r\") as file:\n for line in file:\n line = line.strip()\n if not line:\n continue\n if line.startswith(\"#\"):\n continue\n key, value = line.split(\"=\", 1)\n environ[key] = value", "def load_envs(env_file: Optional[str] = None) -> None:\n dotenv.load_dotenv(dotenv_path=env_file, override=True)", "def fix_dot_env_file():\n # Create path to the .env file\n env_file_path = Path(\".env\")\n\n # Ensure that the .env file exists\n env_file_path.touch(exist_ok=True)\n\n # Otherwise, extract all the lines in the .env file\n env_file_lines = env_file_path.read_text().splitlines(keepends=False)\n\n # Extract all the environment variables in the .env file\n env_vars = [line.split(\"=\")[0] for line in env_file_lines]\n\n # For each of the desired environment variables, check if it exists in the .env\n # file\n env_vars_missing = [\n env_var\n for env_var in DESIRED_ENVIRONMENT_VARIABLES.keys()\n if env_var not in env_vars\n ]\n\n # Create all the missing environment variables\n with env_file_path.open(\"a\") as f:\n for env_var in env_vars_missing:\n value = \"\"\n if env_var == \"GPG_KEY_ID\":\n gpg = subprocess.Popen(\n [\"gpg\", \"--list-secret-keys\", \"--keyid-format=long\"],\n stdout=subprocess.PIPE,\n )\n grep = subprocess.Popen(\n [\"grep\", \"sec\"], stdin=gpg.stdout, stdout=subprocess.PIPE\n )\n value = (\n subprocess.check_output(\n [\"sed\", \"-E\", \"s/.*\\\\/([^ ]+).*/\\\\1/\"],\n stdin=grep.stdout,\n )\n .decode()\n .strip(\"\\n\")\n )\n gpg.wait()\n grep.wait()\n if value == \"\":\n value = input(DESIRED_ENVIRONMENT_VARIABLES[env_var])\n f.write(f'{env_var}=\"{value}\"\\n')", "def load_env(env_files):\n env = {}\n for env_file in env_files:\n with open(env_file) as f:\n for line in f:\n if line and line[0] != \"#\":\n try:\n index = line.index(\"=\")\n env[line[:index].strip()] = line[index + 1 :].strip()\n except ValueError:\n # Ignore lines that don't have a '='\n pass\n return env", "def load_env():\n global api_key\n\n # Load the config file\n env_file = path.join(path.dirname(path.abspath(__file__)), 'env.yml')\n try:\n stream = open(env_file, 'r')\n y = yaml.safe_load(stream)\n except IOError:\n print(\"ERROR: Environment file {} not found\".format(env_file))\n sys.exit(3)\n except yaml.parser.ParserError as e:\n print(\"ERROR: Invalid Environment file\")\n print(e)\n sys.exit(3)\n\n api_key = y['api_key']", "def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0", "def load_local_into_env(cls, filename, stage=None):\n env_vars = cls.open_local(filename, stage=stage, for_env=True)\n\n for key, value in env_vars.items():\n if value is None:\n print('Environment variable: {0} has no value and will not be set.'.format(key))\n else:\n if isinstance(value, bool):\n value = str(value).lower()\n elif not isinstance(value, str):\n value = str(value)\n os.environ[key] = value\n\n return env_vars", "def parse_env_file(env_file):\n environment = {}\n\n with open(env_file, 'r') as f:\n for line in f:\n\n if line[0] == '#':\n continue\n\n parse_line = line.strip().split('=')\n if len(parse_line) == 2:\n k, v = parse_line\n environment[k] = v\n else:\n raise errors.DockerException(\n 'Invalid line in environment file {0}:\\n{1}'.format(\n env_file, line))\n\n return environment", "def read_envdir():\n env_dir = \"env\"\n env_vars = glob.glob(os.path.join(env_dir, '*'))\n for env_var in env_vars:\n with open(env_var, 'r') as env_var_file:\n os.environ.setdefault(env_var.split(os.sep)[-1],\n env_var_file.read().strip())", "def load_env_variables(self, environment):\n env_dir = os.path.join(self.__get_environments_root_dir(), environment)\n return read_and_combine_yamls_in_dir(env_dir)", "def envsubst(input_file) -> str:\n with open(input_file, 'r') as base:\n content_after = content_before = base.read()\n for match in re.finditer(r'\\$\\{?(.+?)\\}?', content_before):\n env_var = match.group(1)\n env_var_value = os.getenv(env_var)\n content_after = content_before.replace(match.group(0), env_var_value or '')\n return content_after", "def parse_env_file_form_dict(env='base'):\r\n cur_path = os.path.abspath(os.path.dirname(__file__))\r\n file_path = os.path.join(cur_path, r\"./config/{}.env\".format(env))\r\n\r\n with open(file_path, 'r') as fh:\r\n logger.debug(\"Env file fetched is {}.env\".format(env))\r\n vars_dict = dict(\r\n tuple(line.split('='))\r\n for line in fh.readlines() if not line.startswith('#')\r\n )\r\n # print(\"Parsed dict values are {}\".format(vars_dict))\r\n return vars_dict", "def _patch_etc_environment( cls, env_file, dirs=None, dirs_var='PATH', env_pairs=None ):\n\n def parse_entry( s ):\n m = cls.env_entry_re.match( s )\n return m.group( 1 ), m.group( 2 )\n\n env_file.seek( 0 )\n env = dict( parse_entry( _ ) for _ in env_file.read( ).splitlines( ) )\n\n # Do we have directories to add to a path?\n if dirs is not None:\n path = filter( None, env.get( dirs_var, '' ).split( ':' ) )\n path.extend( dirs )\n env[ dirs_var ] = ':'.join( path )\n\n # Do we have other environment variables to write?\n if env_pairs is not None:\n for (k, v) in env_pairs.iteritems():\n env[k] = v\n\n env_file.seek( 0 )\n env_file.truncate( 0 )\n for var in sorted( env.items( ) ): \n env_file.write( '%s=\"%s\"\\n' % var )", "def SetEnvironmentVars(self):\n for name, value, section in self._marchConfig():\n fetch_name = self._get_param_name(name, section)\n self._set_env_prop(fetch_name, value)", "def load_config(env_file_path: str) -> None:\n if os.path.isfile(env_file_path):\n load_dotenv(dotenv_path=env_file_path)\n else:\n logger.info(f\".env file does not exist on {env_file_path}. Loading environment variable from the machine\")", "def test_env_top_dict(self, monkeypatch):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n environment:\n FOO: This is foo\n FOO_WITH_QUOTES: \"\\\"Quoted foo\\\"\" # Quotes included in value\n BAR: \"This is bar\"\n MAGIC: 42\n SWITCH_1: true # YAML boolean\n SWITCH_2: \"true\" # YAML string\n EMPTY: \"\"\n EXTERNAL: # Comes from os env\n EXTERNAL_NOTSET: # Missing in os env\n \"\"\"\n )\n\n monkeypatch.setenv(\"EXTERNAL\", \"Outside world\")\n monkeypatch.delenv(\"EXTERNAL_NOTSET\", raising=False)\n\n config = scuba.config.load_config(\".scuba.yml\")\n\n expect = dict(\n FOO=\"This is foo\",\n FOO_WITH_QUOTES='\"Quoted foo\"',\n BAR=\"This is bar\",\n MAGIC=\"42\", # N.B. string\n SWITCH_1=\"True\", # Unfortunately this is due to str(bool(1))\n SWITCH_2=\"true\",\n EMPTY=\"\",\n EXTERNAL=\"Outside world\",\n EXTERNAL_NOTSET=\"\",\n )\n assert expect == config.environment", "def load_config_yaml(file_path, strict=False, tag='!ENV'):\n pattern = re.compile(r'.*?\\${(\\w+)}.*?')\n loader = yaml.SafeLoader\n\n # the tag will be used to mark where to start searching for the pattern\n # e.g. somekey: !ENV somestring${MYENVVAR}blah blah blah\n loader.add_implicit_resolver(tag, pattern, None)\n\n def constructor_env_variables(loader, node):\n \"\"\"\n Extracts the environment variable from the node's value\n :param yaml.Loader loader: the yaml loader\n :param node: the current node in the yaml\n :return: the parsed string that contains the value of the environment\n variable\n \"\"\"\n value = loader.construct_scalar(node)\n match = pattern.findall(value) # to find all env variables in line\n if match:\n full_value = value\n for g in match:\n full_value = full_value.replace(\n f'${{{g}}}', os.environ.get(g, g)\n )\n return full_value\n return value\n\n loader.add_constructor(tag, constructor_env_variables)\n\n if file_path:\n with open(file_path, 'r') as f:\n config = yaml.load(f, Loader=loader)\n return config if config else {}\n else:\n if strict:\n raise ValueError('Invalid config file path: %s' % file_path)\n else:\n logger.warning('Using default configuration. Config file path was: %s', file_path)\n return {}", "def load_config(filename):\n AS[\"config\"] = load_yaml_file(filename)", "def set_env_var(self):\n\n list_env_vars = self.config.items('environment_variables')\n for env_var in list_env_vars:\n os.environ[env_var[0].upper()] = env_var[1]", "def save_to_env_file(self, envs, env_file_location):\n\n if not self.pre_initiated and envs:\n file_instance = PyFunceble.helpers.File(env_file_location)\n\n try:\n content = file_instance.read()\n except FileNotFoundError:\n content = \"\"\n\n for environment_variable, value in envs.items():\n to_write = \"{0}={1}\".format(environment_variable, value)\n regex = r\"{0}=.*\".format(environment_variable)\n\n if content:\n if PyFunceble.helpers.Regex(f\"^{regex}\").get_matching_list(\n content.splitlines()\n ):\n content = PyFunceble.helpers.Regex(regex).replace_match(\n content, to_write\n )\n else:\n if not content.endswith(\"\\n\"):\n content += \"\\n{0}\\n\".format(to_write)\n else:\n content += \"{0}\\n\".format(to_write)\n else:\n content += \"{0}\\n\".format(to_write)\n\n file_instance.write(content, overwrite=True)", "def update_env_in_script(fn, names):\n with open(fn) as ifs:\n content = ifs.read()\n content = _prepend_env_paths(content, names)\n with open(fn, 'w') as ofs:\n ofs.write(content)", "def save_envs_to_file(file_path=constants.ENV_FILE_DEFAULT_PATH.value):\n #pylint: disable=unspecified-encoding\n with open(file_path, \"w\") as file:\n for key, value in environ.items():\n if key in constants.ENV_FILE_ALLOWED_KEYS.value:\n file.write(\"{}={}\\n\".format(key, value))", "def set_envvars(self):\n # self.logger.trace(\"update os.environ with %s\", self.environ)\n for key in os.environ:\n current = self.environ.get(key)\n if current is None:\n del os.environ[key]\n for key, value in self.environ.items():\n if value is not None:\n os.environ[key] = str(value)", "def envload(self):\n\n check = load_dotenv(self.envpath, override=True)\n\n return check", "def read_vars(var_file):\n try:\n with open(var_file, \"r\") as f:\n kargovars = yaml.load(f)\n except:\n print(\n \"Can't read variables file %s\" % var_file\n )\n sys.exit(1)\n return kargovars", "def container_environ(\n services_docker_compose_file, devel_environ, osparc_simcore_root_dir\n):\n dc = dict()\n with services_docker_compose_file.open() as f:\n dc = yaml.safe_load(f)\n\n container_environ = create_environ(skip_system_environ=True)\n container_environ.update(\n {\"OSPARC_SIMCORE_REPO_ROOTDIR\": str(osparc_simcore_root_dir)}\n )\n\n environ_items = dc[\"services\"][THIS_SERVICE].get(\"environment\", list())\n\n for item in environ_items:\n key, value = item.split(\"=\")\n\n match = variable_expansion_pattern.match(value)\n if match:\n varname, default_value = match.groups()\n value = devel_environ.get(varname, default_value)\n container_environ[key] = value\n\n return container_environ", "def update_variables(old_contents):\n new_contents = []\n\n for line in old_contents:\n words = line.split()\n\n for word in words:\n # Using the whitespace split above, the keys in the yaml file will\n # have a : at the end, so we need to strip that off before\n # replacing\n if word.endswith(':'):\n word = word[:-1]\n\n if word in VAR_MAPPINGS.keys():\n line = line.replace(word, VAR_MAPPINGS[word])\n\n new_contents.append(line)\n\n return new_contents", "def _env_var_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> str:\n args = node.value.split()\n\n # Check for a default value\n if len(args) > 1:\n return os.getenv(args[0], \" \".join(args[1:]))\n if args[0] in os.environ:\n return os.environ[args[0]]\n logger.error(\"Environment variable %s not defined\", node.value)\n raise XKNXException(node.value)", "def load_env():\n project_dir = abspath(join(dirname(__file__), '../..', '..', '..'))\n dotenv.read_dotenv(join(project_dir, 'nijanthan/practise/base_pro/.env'))", "def _env_vars(self, cred_file=None, section='default'):\n if cred_file:\n parser = ConfigParser.SafeConfigParser()\n parser.optionxform = str\n parser.read(os.path.expanduser(cred_file))\n for name, value in parser.items(section):\n if name == 'OS_AUTH_URL':\n if not self.module.params.get('login_url'):\n self.module.params['login_url'] = value\n if name == 'OS_USERNAME':\n if not self.module.params.get('login_user'):\n self.module.params['login_user'] = value\n if name == 'OS_PASSWORD':\n if not self.module.params.get('login_password'):\n self.module.params['login_password'] = value\n if name == 'OS_TENANT_ID':\n if not self.module.params.get('login_tenant_name'):\n self.module.params['login_tenant_name'] = value\n else:\n if not self.module.params.get('login_url'):\n authurl = os.getenv('OS_AUTH_URL')\n self.module.params['login_url'] = authurl\n\n if not self.module.params.get('login_user'):\n username = os.getenv('OS_USERNAME')\n self.module.params['login_user'] = username\n\n if not self.module.params.get('login_password'):\n password = os.getenv('OS_PASSWORD')\n self.module.params['login_password'] = password\n\n if not self.module.params.get('login_tenant_name'):\n tenant = os.getenv('OS_TENANT_ID')\n self.module.params['login_tenant_name'] = tenant", "def load_environment(path: Optional[str] = None):\n environment = deserialize_environment_from_file(path=path)\n EnvironmentProvider().environment = environment", "def test_parse_from_env_vars(mock_os_environ, settings_update, var_content, expected):\n climate = core.Climate()\n os.environ[\"MY_VAR\"] = var_content\n climate.update(settings_update)\n actual = dict(climate.settings)\n assert actual == expected", "def test_env_top_list(self, monkeypatch):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n environment:\n - FOO=This is foo # No quotes\n - FOO_WITH_QUOTES=\"Quoted foo\" # Quotes included in value\n - BAR=This is bar\n - MAGIC=42\n - SWITCH_2=true\n - EMPTY=\n - EXTERNAL # Comes from os env\n - EXTERNAL_NOTSET # Missing in os env\n \"\"\"\n )\n\n monkeypatch.setenv(\"EXTERNAL\", \"Outside world\")\n monkeypatch.delenv(\"EXTERNAL_NOTSET\", raising=False)\n\n config = scuba.config.load_config(\".scuba.yml\")\n\n expect = dict(\n FOO=\"This is foo\",\n FOO_WITH_QUOTES='\"Quoted foo\"',\n BAR=\"This is bar\",\n MAGIC=\"42\", # N.B. string\n SWITCH_2=\"true\",\n EMPTY=\"\",\n EXTERNAL=\"Outside world\",\n EXTERNAL_NOTSET=\"\",\n )\n assert expect == config.environment", "def refresh(self):\n self.__envs = {}\n cmd_result = self.openshift.do_action(\"set\", [\"env\", self.resource_type, self.deployment_name, \"--list\"])\n for line in cmd_result.out().split(\"\\n\"):\n for env_type in self.types:\n match_obj = re.match(env_type.pattern, line)\n if match_obj:\n env = env_type(openshift=self.openshift,\n deployment=self.deployment_name,\n match=match_obj,\n environ=self)\n self.__envs[env.name] = env\n break", "def from_env_file(cls, filename: str):\n # Import standard modules\n from dotenv import load_dotenv\n from os import getenv, path\n\n # Validate `path`\n if not path.isfile(filename):\n err = f\"No such file or directory: '{filename}'\"\n raise FileNotFoundError(err)\n\n load_dotenv(filename)\n\n key = getenv('API_KEY')\n secret = getenv('API_SECRET')\n url = getenv('API_URL')\n\n if not all([key, secret]):\n err = (\n '`API_KEY` and `API_SECRET` are mandatory attributes.\\n'\n 'Please make sure they are contained in your `.env` file'\n )\n raise KeyError(err)\n\n return cls(key, secret, url)", "def envvars(envs):\n old_env = {}\n for var, value in envs.items():\n old_env[var] = os.environ.get(var)\n os.environ[var] = value\n\n yield\n\n for var in envs:\n if old_env[var] is None:\n del os.environ[var]\n else:\n os.environ[var] = old_env[var]", "def create_vars_dot_env(self):\n\n print(\"Creating vars.env in your Google Drive!\")\n\n with open(self.envpath, \"w\") as envfile:\n envfile.write(\"COLAB_ENV = Active\\n\")", "def envs(self, envs):\n self._instructions_setter('ENV', envs)", "def parse_settings(filename, env={}):\n\n if not exists(filename):\n return {}\n\n with open(filename, 'r') as settings:\n for line in settings:\n if line[0] == '#' or len(line.strip()) == 0: # ignore comments and newlines\n continue\n try:\n k, v = map(lambda x: x.strip(), line.split(\"=\", 1))\n env[k] = expandvars(v, env)\n except Exception:\n echo(\"Error: malformed setting '{}', ignoring file.\".format(line), fg='red')\n return {}\n return env", "def config_from_yaml(self, filename):\n with open(filename, 'r') as f:\n config = yaml.load(f)\n config = self._process_config_imports(config)\n self._config.update(config)", "def apply_env_to_config(config):\n env_vars = get_environment_vars()\n update_config_with_env(env_vars, config)", "def test_env_variables_replaced(self):\n password = \"ABC123qwe\"\n parsed_config = self._get_parsed_config(\"full_config.yml\")\n logger_with_replaced_password = parsed_config.loggers[0]\n\n # replaced if env variable is present\n self.assertEqual(\n password,\n logger_with_replaced_password._auth.password,\n msg=\"password is not replaced\",\n )\n\n db_backup_item_with_unchaged_password = parsed_config.backup_items[3]\n\n # not replaced if there is no such env variable\n self.assertEqual(\n \"${MYSQL_PASSWORD}\",\n db_backup_item_with_unchaged_password.password,\n msg=\"password should not be replaced\",\n )", "def parse_env_file(cls, env_file_location):\n\n result = {}\n content = \"\"\n\n file_instance = PyFunceble.helpers.File(env_file_location)\n\n if file_instance.exists():\n content = file_instance.read()\n\n for line in content.splitlines():\n line = line.strip()\n\n if line.startswith(\"#\"):\n continue\n\n if \"#\" in line:\n line = line[: line.find(\"#\")]\n\n if \"=\" in line:\n splited = line.split(\"=\")\n result[splited[0]] = splited[1]\n\n return result", "def load_envfile(instance):\n validate(ENVFILE_SCHEMA, instance)\n semantic_validate(instance)\n\n # At the moment the object model is mostly 1-to-1 with the configuration\n # format. In the future that might change; the idea is for the object model\n # to be an abstraction rather than exactly the same as config format, so\n # e.g. same object model might support two different versions of the config\n # format.\n\n # We do however make some minor changes.\n instance = freeze(instance)\n\n # 1. Drop unneeded fields:\n instance = instance.remove(\"Envfile-version\")\n instance = instance.transform([\"local\", \"templates\", match_any, \"type\"],\n discard)\n\n # 2. Some objects want to know their own name:\n def add_name(mapping):\n # Convert {a: {x: 1}} to {a: {name: a, x: 1}}:\n for key, value in mapping.items():\n mapping = mapping.set(key, value.set(\"name\", key))\n return mapping\n\n instance = instance.transform([\"local\", \"templates\"], add_name)\n instance = instance.transform([\"application\", \"requires\"], add_name)\n instance = instance.transform([\"application\", \"services\"], add_name)\n instance = instance.transform(\n [\"application\", \"services\", match_any, \"requires\"], add_name)\n\n return System.create(instance)", "def update_from_env(self):\n for key, value in os.environ.items():\n if not key.startswith(self._prefix):\n continue\n\n setting = key[len(self._prefix):]\n if setting not in self._default_settings:\n continue\n\n setting_value = getattr(self, setting)\n if isinstance(setting_value, bool):\n value = (value == 'True')\n elif isinstance(setting_value, (int, float)):\n value = type(setting_value)(value)\n elif isinstance(setting_value, (list, dict)):\n value = json.loads(value)\n\n setattr(self, setting, value)\n self._explicit_settings.add(setting)", "def handle_dot_env_file(dot_env_file='.env') -> None:\n if os.path.isfile(dot_env_file):\n try:\n load_dotenv(dotenv_path=dot_env_file)\n except Exception:\n raise click.FileError('There was an error when processing the .env file, please check it out.')", "def load_yaml_config(path, env = ''):\n config = load_yaml(path)\n\n if config:\n if 'all' in config:\n all = config['all']\n else:\n return {}\n if env != '':\n if env in config:\n all.update(config[env])\n return all\n else:\n return {}\n\n return config", "def load_yaml_config(path, env = ''):\n config = load_yaml(path)\n\n if config:\n if 'all' in config:\n all = config['all']\n else:\n return {}\n if env != '':\n if env in config:\n all.update(config[env])\n return all\n else:\n return {}\n\n return config", "def test_workflow_environment():\n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"environment-variables\": {\n \"FOO\": \"BAR\",\n \"FOO2\": \"BAR2\"\n }\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-workflow-environment-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n @checkrun\n def execute(workflow_inst):\n def _check():\n assert os.environ['FOO'] == \"BAR\"\n assert os.environ[\"OMP_NUM_THREADS\"] == '1'\n return True\n \n # driver env\n _check()\n \n # worker env\n assert all(workflow_inst.run_on_each_worker(_check).values())\n \n os.environ['FOO'] = 'ORIGINAL_FOO'\n _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n assert execute.didrun\n \n # Environment is restored after execution is finished.\n assert os.environ['FOO'] == 'ORIGINAL_FOO'\n assert 'FOO2' not in os.environ", "def env_file_op(api_name,api_version,spread_sheet_id,client_secret_file_name):\n\n lines = [\"SPREAD_SHEET_ID = {0} \\n\".format(spread_sheet_id),\"API_NAME = {0} \\n\".format(api_name),\"API_VERSION = {0} \\n\".format(api_version),\"CLIENT_SECRET_FILE = {0} \\n\".format(client_secret_file_name)]\n \n path = str(os.path.expanduser('~')) +'/.config/hackerjobs/.env'\n with open(path,'w+') as file:\n file.writelines(lines)", "def load_file(file_name: str) -> \"EnvironmentConfig\":\n return pickle.load(open(file_name, \"rb\"))", "def read_env_file(path: str) -> dict: \n with open(path, 'r') as f:\n return dict(tuple(line.replace('\\n', '').split('=',1)) for line\n in f.readlines() if not line.startswith('#'))", "def initFromFile(self):\n\n bootFilename = os.path.join(os.environ['CRAB3_BOOTSTRAP_DIR'], BOOTSTRAP_ENVFILE)\n if not os.path.isfile(bootFilename):\n msg = \"The CRAB3_BOOTSTRAP_DIR environment variable is set, but I could not find %s\" % bootFilename\n raise EnvironmentException(msg)\n else:\n with open(bootFilename) as fd:\n self.update(json.load(fd))", "def del_env(self, envname):\n\n with open(self.envpath, \"r\") as envfile:\n my_vars = {}\n for line in envfile.readlines():\n key, value = self.__kv_pair(line)\n if key is not None:\n my_vars[key] = value\n\n current_value = my_vars.pop(envname, None)\n\n if current_value is None:\n return # do nothing if not set\n\n new_lines = [f\"{k} = {v}\\n\" for k, v in my_vars.items()]\n\n with open(self.envpath, \"w\") as envfile:\n envfile.writelines(new_lines)\n\n os.environ.unsetenv(envname)", "def _setEnv(self):\n try:\n global_env_prfix = \"/GlobalEnv/\"\n if self.etcd_key_prefix is not None:\n global_env_prfix = self.etcd_key_prefix + \"/GlobalEnv/\"\n value = self.etcd.get(global_env_prfix)\n if value[0] is not None:\n jsonConfig = json.loads(value[0].decode('utf-8'))\n for key in jsonConfig.keys():\n os.environ[key] = jsonConfig[key]\n else:\n raise TypeError(\"config manager key {} must be set as \\\n a prerequisite ...\".format(global_env_prfix))\n except Exception as e:\n self.logger.error(\"Exception raised in _setEnv\\\n with error:{}\".format(e))\n raise e", "def load_settings(env=\"prod\"):\n global config\n config = configparser.SafeConfigParser()\n config.read(CONFIG_FILES.get(env))", "def test_set_value_in_env_file(self) -> None:\n\n self.helper.set_env_file_path(self.temp_env_file.name)\n self.helper.set_name(\"GHOST_FINDER\")\n\n self.assertIsNone(self.helper.get_value())\n\n expected = 'GHOST_FINDER=\"no\"\\n'\n\n self.helper.set_value_in_env_file(\"no\")\n\n with open(self.temp_env_file.name, \"r\", encoding=\"utf-8\") as file_stream:\n self.assertTrue(\"no\" in file_stream.read())\n\n expected = \"no\"\n\n self.assertEqual(expected, self.helper.get_value())", "def test_environments_opts_from_file(\n self,\n cd_tmp_path: Path,\n fx_deployments: YamlLoaderDeployment,\n mocker: MockerFixture,\n runway_context: MockRunwayContext,\n ) -> None:\n runway_context.env.root_dir = cd_tmp_path\n mocker.patch.object(\n Module, \"opts_from_file\", {\"environments\": {\"test\": [\"us-east-1\"]}}\n )\n deployment = fx_deployments.load(\"environments_map_str\")\n mod = Module(\n context=runway_context,\n definition=deployment.modules[0],\n deployment=deployment,\n )\n assert mod.environments == {\n \"test\": [\"us-east-1\"],\n \"dev\": \"012345678901/us-west-2\",\n }", "def _read_config(path):\n with open(path) as f:\n data = f.read()\n data = os.path.expandvars(data)\n data = yaml.safe_load(data)\n return data", "def read_environment(self):\n # Setup credentials\n if os.getenv(\"DO_API_TOKEN\"):\n self.api_token = os.getenv(\"DO_API_TOKEN\")\n if os.getenv(\"DO_API_KEY\"):\n self.api_token = os.getenv(\"DO_API_KEY\")", "def overwrite_environment_variable(self, key, value):\n if value is not None:\n self._printer(\"$env:{0} = \\\"{1}\\\"\".format(key, value))\n else:\n self._printer(\"$env:{0} = \\\"\\\"\".format(key))", "def load_yaml(fname):\n with open(fname) as f:\n val = yaml.safe_load(os.path.expandvars(f.read()))\n return val", "def parse(self, filepath: str) -> dict:\n\n properties = {\"env_vars\": {}, \"inputs\": [], \"outputs\": []}\n reader = self._get_reader(filepath)\n parser = self._get_parser(reader.language)\n\n if not parser:\n return properties\n\n for chunk in reader.read_next_code_chunk():\n if chunk:\n for line in chunk:\n matches = parser.parse_environment_variables(line)\n for key, match in matches:\n if key == \"env_vars\":\n properties[key][match.group(1)] = match.group(2)\n else:\n properties[key].append(match.group(1))\n\n return properties", "def test_settings_env_file_and_env(mock_env_settings_file, tmpdir):\n climate = core.Climate(prefix=\"TEST_STUFF\")\n assert isinstance(climate.settings, Mapping)\n assert dict(climate.settings) == {\n \"testgroup\": {\"testvar\": 7, \"test_var\": 6},\n \"othergroup\": {\"blabla\": 555},\n \"testgroup_test_var\": 9,\n }", "def test_env_alias(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n aliases:\n al:\n script: Don't care\n environment:\n FOO: Overridden\n MORE: Hello world\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n\n assert config.aliases[\"al\"].environment == dict(\n FOO=\"Overridden\",\n MORE=\"Hello world\",\n )", "def set_env():\n env.local_dotenv_path = os.path.join(\n os.path.dirname(__file__), 'etc/base_image/.env')\n dotenv.load_dotenv(env.local_dotenv_path)\n env.project_name = os.environ.get('PROJECT_NAME', '')\n env.project_dir = posixpath.join('/srv/images/', env.project_name)\n env.use_ssh_config = True\n\n # Bug: when setting this inside a function. Using host_string as workaround\n env.hosts = [os.environ.get('HOST_NAME', ''), ]\n env.host_string = os.environ.get('HOST_NAME', '')\n\n env.base_image_name = os.environ.get('BASE_IMAGE_NAME', '')\n env.build_dir = '/srv/build'\n env.local_path = os.path.dirname(__file__)", "def set_environment_variables(env_dict, session):\n for key, value in env_dict.items():\n session.env[key] = value", "def config_env_var_verify():\n with open('skywalking/config.py', 'r') as config_file:\n data = config_file.read().replace('\\n', '')\n for each in OPTIONS.keys():\n if f'_{each.upper()}' not in data:\n raise Exception(f'Environment variable for {each.upper()} is not found in config.py\\n'\n f'This means you have a mismatch of config.py variable and env var name')", "def get_environmentals(self):\n for k, v in utils.slurm_envs(default.SBATCH_VARS_FOR_WORKFLOW).items():\n setattr(self, k, v)", "def load_env(env_path=''):\n if not env_path:\n env_path = Path.home() / '.pincidents'\n load_dotenv(dotenv_path=env_path)", "def env_config():\n # setup\n env = {'ELB_GCP_PROJECT': 'expected-gcp-project',\n 'ELB_GCP_REGION': 'expected-gcp-region',\n 'ELB_GCP_ZONE': 'expected-gcp-zone',\n 'ELB_BATCH_LEN': '93',\n 'ELB_CLUSTER_NAME': 'expected-cluster-name',\n 'ELB_RESULTS': 'gs://expected-results',\n 'ELB_USE_PREEMPTIBLE': 'true',\n 'ELB_BID_PERCENTAGE': '91'}\n\n for var_name in env:\n os.environ[var_name] = str(env[var_name])\n\n yield env\n\n # cleanup\n for var_name in env:\n # os.unsetenv does not work on every system\n del os.environ[var_name]", "def test_vm_role_from_env_file(yaml_file):\n pair = get_environment_pair(yaml_file)\n if not pair:\n pytest.skip(\"Unable to resolve environment pair\")\n template_params = pair[\"yyml\"].get(\"parameters\") or {}\n env_params = pair[\"eyml\"].get(\"parameters\") or {}\n\n if \"vm_role\" not in template_params:\n pytest.skip(\"vm_role not in parameters\")\n\n if \"vm_role\" not in env_params:\n pytest.skip(\"vm_role not in environment file. Error checked elsewhere\")\n\n vm_role = env_params.get(\"vm_role\", \"\")\n if not isinstance(vm_role, string_types):\n vm_role = str(vm_role)\n msg = \"vm_role {} contains non-alphanumeric or non-underscore characters\".format(\n vm_role\n )\n assert re.match(r\"^\\w+$\", vm_role), msg", "def exp_config():\n with open(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"experiment.yaml\")\n ) as f:\n exp_config = list(yaml.safe_load_all(f))\n\n for config in exp_config[0]:\n backward.populate_space(config)\n\n return exp_config", "def test_env_file(self, tmp_path: Path) -> None:\n test_env = tmp_path / \"test.env\"\n test_env.write_text(\"test_value: test\")\n\n result = CFNgin(ctx=self.get_context(), sys_path=tmp_path)\n assert result.env_file[\"test_value\"] == \"test\"\n\n test_us_east_1 = tmp_path / \"test-us-east-1.env\"\n test_us_east_1.write_text(\"test_value: test-us-east-1\")\n\n test_us_west_2 = tmp_path / \"test-us-west-2.env\"\n test_us_west_2.write_text(\"test_value: test-us-west-2\")\n\n lab_ca_central_1 = tmp_path / \"lab-ca-central-1.env\"\n lab_ca_central_1.write_text(\"test_value: lab-ca-central-1\")\n\n result = CFNgin(ctx=self.get_context(), sys_path=tmp_path)\n assert result.env_file[\"test_value\"] == \"test-us-east-1\"\n\n result = CFNgin(ctx=self.get_context(region=\"us-west-2\"), sys_path=tmp_path)\n assert result.env_file[\"test_value\"] == \"test-us-west-2\"\n\n result = CFNgin(\n ctx=self.get_context(name=\"lab\", region=\"ca-central-1\"), sys_path=tmp_path\n )\n assert result.env_file[\"test_value\"] == \"lab-ca-central-1\"", "def test_parse_from_file_vars(original, file_exists, mock_os_environ, tmpdir):\n climate = core.Climate()\n filepath = tmpdir.join(\"testvarfile\")\n filename = str(filepath)\n if file_exists:\n with open(filename, \"w\") as f:\n f.write(\"apassword\\n\")\n update_dict = {\"this_var_from_file\": filename}\n if original:\n update_dict[\"this_var\"] = \"the original password\"\n climate.update(update_dict)\n assert isinstance(climate.settings, Mapping)\n actual = dict(climate.settings)\n expected = {}\n if original:\n expected = {\"this_var\": \"the original password\"}\n if file_exists:\n expected = {\"this_var\": \"apassword\"}\n assert actual == expected", "def read_config_environment(self, config_data=None, quiet=False):\r\n\r\n # Add all variables that start with KAGGLE_ to config data\r\n\r\n if config_data is None:\r\n config_data = {}\r\n for key, val in os.environ.items():\r\n if key.startswith('KAGGLE_'):\r\n config_key = key.replace('KAGGLE_', '', 1).lower()\r\n config_data[config_key] = val\r\n\r\n return config_data", "def test_parse_from_file_root_var(mock_os_environ, tmpdir):\n climate = core.Climate()\n filepath = tmpdir.join(\"testfile.yaml\")\n filename = str(filepath)\n with open(filename, \"w\") as f:\n f.write(\"b: 1\\n\" \"c: 2\\n\")\n update_dict = {\n \"a\": \"old\",\n \"b\": \"old\",\n \"_from_file\": filename,\n \"d\": \"old\",\n }\n climate.update(update_dict)\n assert isinstance(climate.settings, Mapping)\n actual = dict(climate.settings)\n expected = {\"a\": \"old\", \"b\": 1, \"c\": 2, \"d\": \"old\"}\n assert actual == expected", "def parse_env(env_file, env=None):\n parser = configparser.ConfigParser()\n parser.read_file(env_file)\n\n if env is None:\n if not parser.has_section('pblog'):\n raise EnvError(\"pblog section was not found\")\n env = parser['pblog']['env']\n\n env_section = 'pblog:%s' % env\n if not parser.has_section(env_section):\n raise EnvError(\"Environment %s not defined\" % env_section)\n\n return Environment(\n name=env,\n url=parser[env_section]['url'].rstrip('/'),\n username=parser[env_section]['username'],\n local_app_module=parser[env_section].get('wsgi'))", "def read_config(filename, args):\n # Initial vars\n config = defaultdict(dict)\n splitter = operator.methodcaller('split', ' ')\n\n converters = {\n __script__: {\n 'env': safe_path,\n 'pre_requirements': splitter,\n },\n 'pip': {\n 'allow_external': splitter,\n 'allow_unverified': splitter,\n }\n }\n default = copy.deepcopy(CONFIG)\n sections = set(iterkeys(default))\n\n # Expand user and environ vars in config filename\n filename = os.path.expandvars(os.path.expanduser(filename))\n\n # Read config if it exists on disk\n if os.path.isfile(filename):\n parser = SafeConfigParser()\n\n try:\n parser.read(filename)\n except ConfigParserError:\n error('Cannot parse config file at {0!r}'.format(filename))\n\n # Apply config for each possible section\n for section in sections:\n if not parser.has_section(section):\n continue\n\n items = parser.items(section)\n\n # Make auto convert here for integers and boolean values\n for key, value in items:\n try:\n value = int(value)\n except (TypeError, ValueError):\n try:\n value = bool(strtobool(value))\n except ValueError:\n pass\n\n if section in converters and key in converters[section]:\n value = converters[section][key](value)\n\n config[section][key] = value\n\n # Update config with default values if necessary\n for section, data in iteritems(default):\n if section not in config:\n config[section] = data\n else:\n for key, value in iteritems(data):\n config[section].setdefault(key, value)\n\n # Update bootstrap config from parsed args\n keys = set((\n 'env', 'hook', 'pre_requirements', 'quiet', 'recreate', 'requirements'\n ))\n\n for key in keys:\n value = getattr(args, key)\n config[__script__].setdefault(key, value)\n\n if key == 'pre_requirements' and not value:\n continue\n\n if value is not None:\n config[__script__][key] = value\n\n return config", "def test_env_invalid(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n environment: 666\n \"\"\"\n )\n self._invalid_config(\"must be list or mapping\")", "def env(config, args):\n print config.template(\"scripts/env.sh\", project=args.project)", "def test_env_params_are_defined_in_template(yaml_file):\n\n bad = []\n template_pair = get_environment_pair(yaml_file)\n\n if not template_pair:\n pytest.skip(\"No yaml/env pair could be determined\")\n\n template = template_pair.get(\"yyml\").get(\"parameters\", {})\n environment = template_pair.get(\"eyml\").get(\"parameters\", {})\n\n if not isinstance(template, dict) or not isinstance(environment, dict):\n pytest.skip(\"No parameters defined in environment or template\")\n\n template = template.keys()\n environment = environment.keys()\n\n for parameter in environment:\n if parameter not in template:\n bad.append(\n (\n \"{} is defined in the environment file but not in \"\n + \"the template file \"\n ).format(parameter)\n )\n msg = (\n \"All parameters defined in an environment file must \"\n + \"be defined in the template file. \"\n + \". \".join(bad)\n )\n\n assert not bad, msg", "def load_environment(self, env):\n self.env = env", "def overwrite_environment_variable(self, key, value):\n if value is not None:\n value = BashParentEnvironment._format_environment_value(value)\n self._printer(\"export {0}=\\\"{1}\\\"\".format(key, value))\n else:\n self._printer(\"unset {0}\".format(key))", "def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]", "def process_inventory(inv_name, output_file):\n try:\n gen_dict = _load_yml(inv_name)\n env_vars_dict = gen_dict.get('deployment-environment')\n out = open(output_file, 'w')\n if env_vars_dict is None or env_vars_dict == {}:\n out.write('---\\n')\n out.write('deployment_environment_variables: {}\\n')\n else:\n out.write('---\\n')\n out.write('deployment_environment_variables:\\n')\n for k in env_vars_dict:\n out.write(' ' + k + ': ' + env_vars_dict[k] + '\\n')\n out.close()\n except Exception:\n sys.stderr.write(\"Unable to write the file: \" + output_file + \"\\n\")\n sys.exit(1)", "def load_config(filename):\n with open(filename, \"r\") as stream:\n try:\n global CONFIG\n CONFIG = yaml.load(stream)\n except yaml.YAMLError as ex:\n print(ex)", "def load_service_variables_for_env(self, service, environment):\n service_dir_for_env = \\\n os.path.join(self.__get_services_root_dir_for_env(environment), service)\n return read_and_combine_yamls_in_dir(service_dir_for_env)", "def loadenv(self):\n logging.debug('Loading OpenStack authentication information from environment')\n # Grab any OS_ found in environment\n for var in os.environ:\n if var[0:3] == 'OS_':\n value = os.environ[var]\n # Don't print out password or token to debug\n if 'PASSWORD' not in var or 'TOKEN' not in var:\n logging.debug('Using %s from environment for %s', value, var)\n self.creds[var[3:].lower()] = value", "def test_get_value_from_file(self) -> None:\n\n self.temp_env_file.write(\"IS_THIS_A_GHOST=yes\\n\")\n\n self.temp_env_file.seek(0)\n\n self.helper.set_env_file_path(self.temp_env_file.name)\n self.helper.set_name(\"IS_THIS_A_GHOST\")\n\n expected = \"yes\"\n actual = self.helper.get_value_from_env_file()\n\n self.assertEqual(expected, actual)", "def load(self, file):\n self.__log(f'Starting to load settings from {file}', 'warning')\n contents = load_yaml(file)\n for item in contents:\n if item == 'options':\n self.__log(f'Found options in {file}, loading them', 'warning')\n for i in contents[item]:\n self.__log(f'Setting {i.lower()} to {contents[item][i]}')\n self.set(i.lower(), contents[item][i])\n elif item == 'config':\n self.__log(f'Found configuration variables in {file}, loading them', 'warning')\n for i in contents[item]:\n self.__log(f'Setting {i.upper()} to {contents[item][i]}')\n self.set(i.upper(), contents[item][i])\n else:\n raise UnknownYamlContentError", "def _set_ci_environment_variables(parent_shell):\n variables_to_set = {\n \"JOBSTAMPS_ALWAYS_USE_HASHES\": \"1\",\n \"CLINT_FORCE_COLOR\": \"1\",\n \"PYTHONDONTWRITEBYTECODE\": \"1\"\n }\n\n for key, value in variables_to_set.items():\n os.environ[key] = value\n parent_shell.overwrite_environment_variable(key, value)", "def parse_dotenv(filename):\n values = {}\n linenum = 0\n try:\n with open(filename, 'r') as f:\n for line in f:\n line = line.rstrip('\\n')\n linenum += 1\n if line and not line.startswith('#'):\n key, value = line.split('=', 1)\n values[key] = value\n except ValueError:\n c.eprint('%s: line %s: invalid format' % (filename, linenum))\n raise Exception()\n return values", "def setup_method(self, method):\n super().setup_method(method)\n\n Env.ENVIRON = {}\n self.env.read_env(\n Path(__file__, is_file=True)('test_env.txt'),\n PATH_VAR=Path(__file__, is_file=True).__root__\n )", "def read_app_settings():\n document = open('config/config.yaml', 'r')\n return yaml.load(document, Loader=yaml.FullLoader)", "def read_file(self, filename):\n # The config file is Python code -- makes life easy.\n config_vars = {}\n try:\n execfile(filename, config_vars)\n except IOError, exc:\n if exc.filename is None: # arg! execfile() loses filename\n exc.filename = filename\n raise exc\n self.set_from_dict(config_vars)", "def push(self):\n\t\tif self.old_vars is not None:\n\t\t\treturn\n\n\t\tself.old_vars = {}\n\t\tfor k, v in self.vars.items():\n\t\t\tself.old_vars[k] = os.environ.get(k)\n\t\t\tif v is None:\n\t\t\t\tif k in os.environ:\n\t\t\t\t\tdel os.environ[k]\n\t\t\telse:\n\t\t\t\tos.environ[k] = v", "def env(name, value):\n import os\n os.environ[name] = value", "def apply_config(filename):\n with open(filename) as config_file:\n config = json.load(config_file)\n for setting, value in config.items():\n CoreConfig.__dict__[setting] = value" ]
[ "0.7406269", "0.69363457", "0.66602415", "0.6634683", "0.65131164", "0.64999026", "0.649091", "0.6366587", "0.63470614", "0.6244345", "0.61629504", "0.6037942", "0.6028842", "0.5990134", "0.5982547", "0.59603816", "0.59407336", "0.59406185", "0.5874832", "0.5864534", "0.58374506", "0.58318186", "0.5820436", "0.5795604", "0.5777806", "0.5751473", "0.5736653", "0.57312787", "0.5728917", "0.5696418", "0.56870896", "0.5685378", "0.56843615", "0.56790537", "0.566509", "0.5661225", "0.5646417", "0.5643091", "0.5642174", "0.563623", "0.5634392", "0.5631463", "0.5623577", "0.5599448", "0.5598129", "0.5597426", "0.5585577", "0.55839825", "0.55839825", "0.5574768", "0.5562031", "0.555074", "0.5540737", "0.5535308", "0.5505367", "0.5480842", "0.54784864", "0.5472438", "0.54549783", "0.5452318", "0.545111", "0.54473066", "0.5447052", "0.5432674", "0.54302335", "0.54249835", "0.541743", "0.54166794", "0.54141396", "0.5408052", "0.53989387", "0.5395611", "0.53950214", "0.5391364", "0.5391299", "0.53854907", "0.538341", "0.53772163", "0.5357316", "0.53560096", "0.53432965", "0.53422946", "0.53360677", "0.5334712", "0.53347087", "0.53344584", "0.53298986", "0.5327289", "0.5327124", "0.53268874", "0.53252167", "0.53250563", "0.5316707", "0.53056043", "0.5303237", "0.53013164", "0.5297767", "0.5296617", "0.52949446", "0.52932924" ]
0.8063575
0
Initialize the Referee. Creates a turn order based on the ages of the players. If none of the players given have colors, then colors are assigned to the players. Otherwise, the colors of the players are used. If the players have colors, then they must be unique. Creates a Fish game board for the players to play on.
def __init__(self, players, board_size, board=None, timeout=10): if not 2 <= len(players) <= 4: raise ValueError("Invalid number of players provided.") if board is None: board = Board(*board_size) self.__check_board_is_valid(board, players) self.board = board self.__set_colors(players) self.players = {p.get_color(): p for p in players} self.state = State([BoardPlayer(p.get_color()) for p in players], board) self.violators = [] self.timeout = timeout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, player1, player2):\n # # players of the game {player1name: {color: , red_marbles:}}\n # self._players = {player1[0]: {\"name\": player1[0], \"color\": player1[1]},\n # player2[0]: {\"name\": player2[0], \"color\": player2[1]}}\n # # empty board, no marbles yet\n # self._board = self.create_board()\n # # current player's turn\n # self._turn = None\n # # winner state\n # self._winner = None\n # # red marbles captured for each player, needs addition of black and white marbles\n # self._captured = {player1[0]: 0, player2[0]: 0}\n pass", "def init_board(self):\n\n self.__board = dict()\n order = ['rook', 'knight', 'bishop', 'queen', 'king', 'bishop',\n 'knight', 'rook']\n for j, name in enumerate(order):\n\n self.__board[(0, j)] = ChessGame.Piece( name, ChessGame.WHITE)\n self.__board[(7, j)] = ChessGame.Piece( name, ChessGame.BLACK)\n self.__board[(1, j)] = ChessGame.Piece('pawn', ChessGame.WHITE)\n self.__board[(6, j)] = ChessGame.Piece('pawn', ChessGame.BLACK)\n\n self.__players = { ChessGame.WHITE: set(), ChessGame.BLACK: set() }\n for color in (ChessGame.BLACK, ChessGame.WHITE):\n self.__players[color] = {(x, y) for (x, y), piece in\n self.__board.iteritems() if piece.color == color }\n\n return", "def __init__(self, num_players):\n self.num_players = num_players\n self.firework = [[], [], [], [], []]\n self.nb_blue_stone = MAX_BLUE_STONE\n self.nb_red_stone = MAX_RED_STONE\n self.draw = None\n self.hands = None\n self.fill_draw()\n random.shuffle(self.draw)\n self.discard = []\n self.draw_initial_hands()", "def __init__(self):\n\n self._board = list()\n self._palace_board_blue = ['d9', 'e8', 'e10', 'f9']\n self._palace_board_red = ['d2', 'e1', 'e3', 'f2']\n self._palace_diagonal_blue = ['d8', 'd10', 'e9', 'f8', 'f10']\n self._palace_diagonal_red = ['d1', 'd3', 'e2', 'f1', 'f3']\n self._board_columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n self._board_rows = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']\n self._general_position_blue = 'e9'\n self._general_position_red = 'e2'\n\n self.setup_board()", "def __init__(self):\n\n self._turn = 'blue'\n self._active_pieces = {'blue': [], 'red': []}\n self._game_state = 'UNFINISHED'\n self._board = [['' for j in range(10)] for i in range(9)]\n\n # add pieces to the board\n self.add_piece('red', Chariot, 'a1')\n self.add_piece('red', Elephant, 'b1')\n self.add_piece('red', Horse, 'c1')\n self.add_piece('red', Guard, 'd1')\n self.add_piece('red', Guard, 'f1')\n self.add_piece('red', Elephant, 'g1')\n self.add_piece('red', Horse, 'h1')\n self.add_piece('red', Chariot, 'i1')\n self.add_piece('red', General, 'e2')\n self.add_piece('red', Cannon, 'b3')\n self.add_piece('red', Cannon, 'h3')\n self.add_piece('red', Soldier, 'a4')\n self.add_piece('red', Soldier, 'c4')\n self.add_piece('red', Soldier, 'e4')\n self.add_piece('red', Soldier, 'g4')\n self.add_piece('red', Soldier, 'i4')\n self.add_piece('blue', Chariot, 'a10')\n self.add_piece('blue', Elephant, 'b10')\n self.add_piece('blue', Horse, 'c10')\n self.add_piece('blue', Guard, 'd10')\n self.add_piece('blue', Guard, 'f10')\n self.add_piece('blue', Elephant, 'g10')\n self.add_piece('blue', Horse, 'h10')\n self.add_piece('blue', Chariot, 'i10')\n self.add_piece('blue', General, 'e9')\n self.add_piece('blue', Cannon, 'b8')\n self.add_piece('blue', Cannon, 'h8')\n self.add_piece('blue', Soldier, 'a7')\n self.add_piece('blue', Soldier, 'c7')\n self.add_piece('blue', Soldier, 'e7')\n self.add_piece('blue', Soldier, 'g7')\n self.add_piece('blue', Soldier, 'i7')", "def __init__(self, *, red_cards: Optional[List[Card]] = None, blue_cards: Optional[List[Card]] = None,\r\n neutral_card: Optional[Card] = None, starting_player=None,\r\n bitboard_king: Optional[List[int]] = None, bitboard_pawns: Optional[List[int]] = None):\r\n if not (red_cards and blue_cards and neutral_card):\r\n cards = set(ONITAMA_CARDS)\r\n card1, card2 = random.sample(cards, k=2)\r\n red_cards = [ONITAMA_CARDS.get(card1), ONITAMA_CARDS.get(card2)]\r\n cards -= {card1, card2}\r\n\r\n card1, card2 = random.sample(cards, k=2)\r\n blue_cards = [ONITAMA_CARDS.get(card1), ONITAMA_CARDS.get(card2)]\r\n cards -= {card1, card2}\r\n\r\n card = random.sample(cards, k=1)[0]\r\n neutral_card = ONITAMA_CARDS.get(card)\r\n cards.remove(card)\r\n if starting_player is None:\r\n starting_player = neutral_card.starting_player\r\n \r\n self.red_cards = red_cards\r\n self.blue_cards = blue_cards\r\n self.neutral_card = neutral_card\r\n self.current_player = starting_player\r\n # board\r\n self.bitboard_king = bitboard_king or [0b00100_00000_00000_00000_00000, 0b00000_00000_00000_00000_00100]\r\n self.bitboard_pawns = bitboard_pawns or [0b11011_00000_00000_00000_00000, 0b00000_00000_00000_00000_11011]", "def setup(self):\n piece_order = ['ROOK','KNIGHT','BISHOP','QUEEN','KING','BISHOP','KNIGHT','ROOK']\n for row,colour in zip([0,7],['BLACK','WHITE']):\n for col,piece in enumerate(piece_order):\n self.board[row][col] = colour + '_' + piece\n \n for row,colour in zip([1,6],['BLACK','WHITE']):\n for i in range(8):\n self.board[row][i] = colour + '_' + 'PAWN'\n \n self.toplay = 'WHITE'", "def __init__(self, players):\n self.players = players\n self.board = Board()", "def initialize_board():\n # Wipe current board\n for x in range(len(THE_BOARD.positions)):\n for y in range(len(THE_BOARD.positions)):\n THE_BOARD.positions[x][y] = ' '\n\n all_pieces = []\n\n # Pawns\n white_pawns = [Pawn('white', (6, i)) for i in range(len(THE_BOARD.positions[6]))]\n black_pawns = [Pawn('black', (1, i)) for i in range(len(THE_BOARD.positions[1]))]\n all_pieces.extend(white_pawns)\n all_pieces.extend(black_pawns)\n\n # Rooks\n rook1 = Rook('black', (0, 0))\n all_pieces.append(rook1)\n rook2 = Rook('black', (0, 7))\n all_pieces.append(rook2)\n rook3 = Rook('white', (7, 0))\n all_pieces.append(rook3)\n rook4 = Rook('white', (7, 7))\n all_pieces.append(rook4)\n\n # Knights\n knight1 = Knight('black', (0, 1))\n all_pieces.append(knight1)\n knight2 = Knight('black', (0, 6))\n all_pieces.append(knight2)\n knight3 = Knight('white', (7, 1))\n all_pieces.append(knight3)\n knight4 = Knight('white', (7, 6))\n all_pieces.append(knight4)\n\n # Bishops\n bishop1 = Bishop('black', (0, 2))\n all_pieces.append(bishop1)\n bishop2 = Bishop('black', (0, 5))\n all_pieces.append(bishop2)\n bishop3 = Bishop('white', (7, 2))\n all_pieces.append(bishop3)\n bishop4 = Bishop('white', (7, 5))\n all_pieces.append(bishop4)\n\n # King and Queen\n queen1 = Queen('black', (0, 4))\n all_pieces.append(queen1)\n queen2 = Queen('white', (7, 4))\n all_pieces.append(queen2)\n king1 = King('black', (0, 3))\n all_pieces.append(king1)\n king2 = King('white', (7, 3))\n all_pieces.append(king2)\n\n # Add every single piece to the board. Only then can they update their spaces threatened\n for piece in all_pieces:\n THE_BOARD.update(piece)\n THE_BOARD.update_all_spaces_threatened()", "def __init__(self, ghost_players=[]):\n self.players = [Player(), Player(), Player(), Player()]\n self.hist = []\n self.round = 1\n self.current_player = 0\n self.first_winner_was = -1\n self.current_dice = -1\n self.observation_pending = False\n self.current_move_pieces = []\n self.current_enemys = []\n self.current_start_attempts = 0\n self.enemys_order = {\n 0: [1, 2, 3],\n 1: [2, 3, 0],\n 2: [3, 0, 1],\n 3: [0, 1, 2]\n }\n self.game_winners = []\n self.ghost_players = ghost_players", "def create(self):\n\n for i in range(8):\n # Create white pawns\n self.board[1][i] = Piece(\"pawn\", 1, i, 0)\n # Create black pawns\n self.board[6][i] = Piece(\"pawn\", 6, i, 1)\n\n # Create white rooks\n self.board[0][0] = Piece(\"rook\", 0, 0, 0)\n self.board[0][7] = Piece(\"rook\", 0, 7, 0)\n\n # Create black rooks\n self.board[7][0] = Piece(\"rook\", 7, 0, 1)\n self.board[7][7] = Piece(\"rook\", 7, 7, 1)\n\n # Create white knights\n self.board[0][1] = Piece(\"knight\", 0, 1, 0)\n self.board[0][6] = Piece(\"knight\", 0, 6, 0)\n\n # Create black knights\n self.board[7][1] = Piece(\"knight\", 7, 1, 1)\n self.board[7][6] = Piece(\"knight\", 7, 6, 1)\n\n # Create white bishop\n self.board[0][2] = Piece(\"bishop\", 0, 2, 0)\n self.board[0][5] = Piece(\"bishop\", 0, 5, 0)\n\n # Create black bishop\n self.board[7][2] = Piece(\"bishop\", 7, 2, 1)\n self.board[7][5] = Piece(\"bishop\", 7, 5, 1)\n\n # Create white queen and king\n self.board[0][3] = Piece(\"queen\", 0, 3, 0)\n self.board[0][4] = Piece(\"king\", 0, 4, 0)\n\n # Create black queen and king\n self.board[7][3] = Piece(\"queen\", 7, 3, 1)\n self.board[7][4] = Piece(\"king\", 7, 4, 1)", "def __init__(self, player):\n \n self.colour = player\n self.game_in_head = Board()", "def __init__(self, players):\n\n # Define the players\n self.players = players\n\n # Define who starts the game\n self.nplayer = 1 \n\n # Define the board\n self.board = [0] * 9", "def __init__(self, player):\r\n other_player = \"lower\" if player == \"upper\" else \"upper\"\r\n \r\n #set our sides \r\n self.player_information[\"us\"][\"player_side\"] = player\r\n self.player_information[\"them\"][\"player_side\"] = other_player\r\n\r\n #create our board edge and board representation\r\n self.board_edge = hex_boundary_getter((0,0), 4, [])\r\n self.board_array = generate_board()", "def __init__(self, rows=6, columns=7, win_length=4):\n\n self._board = [[0 for i in xrange(columns)] for i in xrange(rows)]\n self._rows = rows\n self._columns = columns\n self._win_length = win_length\n self.current_player = None\n self.winner = None\n print \"The game is afoot!\"", "def populate(self):\n counter = 0\n placers = [piece_class.Rook, piece_class.Knight, piece_class.Bishop, \n piece_class.Queen, piece_class.King, piece_class.Bishop, \n piece_class.Knight, piece_class.Rook, piece_class.Pawn, \n piece_class.Pawn, piece_class.Pawn, piece_class.Pawn, \n piece_class.Pawn, piece_class.Pawn, piece_class.Pawn, \n piece_class.Pawn]\n \n \n #Creates new piece objects\n for i in placers:\n self.board[counter] = (i(WHITE, piece_class.PIECEDICT[WHITE][i]))\n counter += 1\n \n counter = 48\n placers.reverse()\n placers[11], placers[12] = placers[12], placers[11]\n \n for i in placers:\n self.board[counter] = (i(BLACK, piece_class.PIECEDICT[BLACK][i]))\n counter += 1\n\n# self.board[11] = self.empty\n# self.board[12] = self.empty\n# self.board[57] = self.empty\n# self.board[58] = self.empty\n# self.board[5] = self.empty\n# self.board[61] = self.empty\n# self.board[59] = self.empty\n# self.board[55] = self.empty\n# self.board[54] = self.empty\n# self.board[53] = self.empty\n# self.board[52] = self.empty\n# self.board[51] = self.empty\n# self.board[50] = self.empty\n# self.board[49] = self.empty\n# self.board[48] = piece_class.Bishop(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Bishop])\n# self.board[(40-16)] = piece_class.Rook(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Rook])\n# self.board[(41-16)] = piece_class.Rook(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Rook])\n## self.board[(41-7)] = piece_class.Bishop(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Bishop])\n# self.board[56] = piece_class.King(BLACK, piece_class.PIECEDICT[BLACK][piece_class.King])\n \n# self.board[18] = piece_class.Rook(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Rook])\n# self.board[21] = piece_class.Bishop(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Bishop])\n# self.board[27] = piece_class.Bishop(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Bishop])\n# self.board[36] = piece_class.Knight(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Knight])\n# self.board[41] = piece_class.Rook(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Rook])\n## self.board[32] = piece_class.Bishop(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Bishop])\n# self.board[48] = piece_class.King(WHITE, piece_class.PIECEDICT[WHITE][piece_class.King])\n## self.board[59] = piece_class.King(BLACK, piece_class.PIECEDICT[BLACK][piece_class.King])\n# self.board[49] = piece_class.Pawn(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Pawn])\n# self.board[50] = piece_class.Queen(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Queen])\n# self.board[59] = piece_class.Bishop(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Bishop])\n# self.board[52] = piece_class.Pawn(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Pawn])\n# \n# del self.board[64:]\n\n return self.board", "def __init__(self):\n self.board = [\n BS, BS, BS, BS,\n BS, BS, BS,\n BS, BS, BS, BS,\n EM, EM, EM,\n WS, WS, WS, WS,\n WS, WS, WS,\n WS, WS, WS, WS\n ]\n self.curr_player = WHITE_PLAYER", "def __init__(self):\n self.deck = Deck()\n self.player1 = Player(INITIAL_CHIPS)\n self.player2 = Player(INITIAL_CHIPS)\n self.flop = []\n self.turn = None\n self.river = None\n\n self.this_player = self.player2\n self.other_player = self.player1 # 一局开始前会对换一次玩家\n\n self.last_action = None", "def initializeGame(self):\n # Fill deck with cards and shuffle it\n self.deck.fill(104)\n self.deck.shuffle()\n #print \"Deck initialized\"\n\n # Initialize the field\n self.field.initialize(self.deck.draw(4))\n self.field.sortField()\n #self.field.printField()\n\n # Set players to initial state again\n # Distribute cards and set bulls to 0\n for p in self.players:\n p.bulls = 0\n p.setHand(self.deck.draw(10))", "def __init__(self):\n\n self.__turn_info = { 'turn': ChessGame.WHITE }\n self.init_board()", "def __init__(self):\n # The starting counts are set to 0 and modified when the board is initiated.\n self.num_black_pieces = 0\n self.num_black_kings = 0\n self.num_white_pieces = 0\n self.num_white_kings = 0\n # Creates a new board and fills it with the appropriate pieces.\n self.board = self._initiate_board()\n self.moves = []", "def __init__(self, playerColors : Dict[str, str]):\n initialGameState = {\n \"counter\" : {\"Team1\" : 0, \"Team2\" : 0},\n \"lastChanged\" : None,\n \"wonRounds\" : {\"Team1\" : 0, \"Team2\" : 0},\n \"wonGames\" : {\"Team1\" : 0, \"Team2\" : 0},\n \"currentMaxPoints\" : self.maxPointsWithoutOvertime,\n \"sidesChanged\" : False,\n \"playerPositions\" : {\"Team1\" : {\"Player1\" : 1, \"Player2\": 2}, \"Team2\" : {\"Player1\" : 3, \"Player2\": 4}},\n \"servePosition\" : 0,\n \"playerColors\" : playerColors,\n \"undoStack\" : [],\n \"redoStack\" : [],\n \"observers\" : []}\n self.setGameState(initialGameState)", "def __init__(self, players):\n\n # Instantiate a Players object with the players queue\n self._players = Players(players)\n # Instantiate the Die to be used for the current game\n self._die = Die()\n # Track the game status\n self._active_turn = True\n self._end_game = False", "def __init__(self, player):\n self._piece_type = 'bishop'\n self._value = 6 if player == \"white\" else -6\n self._summary = 'W-Bs' if player == \"white\" else 'B-Bs'\n\n self._directions = []\n for _ in range(4):\n self._directions.append([])\n\n for i in range(1, 8):\n self._directions[0].append((i, i))\n self._directions[1].append((i, -i))\n self._directions[2].append((-i, i))\n self._directions[3].append((-i, -i))", "def setup_new_board(self):\n\n logger.info(u'setup_new_board()')\n\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n \n self.black_checkers = [ch.Checker(u'black', self) for i in xrange(12)]\n self.white_checkers = [ch.Checker(u'white', self) for i in xrange(12)]\n\n u\"\"\" Place checkers in starting squares \"\"\"\n i = 0\n for row in xrange(3):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.white_checkers[i])\n i += 1\n\n i = 0\n for row in xrange(5, 8):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.black_checkers[i])\n i += 1", "def __init__(self):\n self.die_a = die_class.Die(self.angry_die_faces)\n self.die_b = die_class.Die(self.angry_die_faces)\n self.game_stage = 1\n self.just_cheated_a = False\n self.just_cheated_b = False\n self.game_won = False", "def __init__(self, ui, player1, player2, istournament=False, tournamentgame=None):\n self.player1 = player1\n self.player2 = player2\n self.istournament = istournament\n self.tournamentgame = tournamentgame\n\n self.gameState = [0, 0, 0,\n 0, 0, 0,\n 0, 0, 0]\n\n self.ui = ui\n self.currentplayer = 1\n if random.uniform(0, 1) > 0.5:\n self.currentplayer = -1\n\n self.displayBoard(ui)", "def __init__(self, player_field=None, board=None, seed=1234,\n randomize_players=True):\n self.board = board or Board()\n random.seed(seed) # note AH: What does this line actually do?\n self.randomize_players = randomize_players\n self.player_field = player_field or [Player, Player]\n self.result = []", "def __init__(self, colour):\n self.colour = colour\n self.board = Board()\n self.pieces = self.assign_pieces()\n self.strategy = Strategy()", "def __init__(self, testBoard=None):\n if (testBoard == None):\n self.blackPieces = self.getStartingBlackPieces()\n self.whitePieces = self.getStartingWhitePieces()\n self.blackKing = Piece(\"black\", \"king\", 0, 4) #direct access to kings for\n self.whiteKing = Piece(\"black\", \"king\", 7, 4) #checkmate checks\n\n\n else:\n self.blackPieces = self.makeCustonBlackPieces(testBoard)\n self.whitePieces = self.makeCustonWhitePieces(testBoard)", "def __init__(self, player):\n self._piece_type = 'pawn'\n self._value = 2 if player == \"white\" else -2\n self._summary = 'W-Pw' if player == \"white\" else 'B-Pw'\n\n self._directions = []\n if player == \"white\":\n self._directions.append([(-1, 1)])\n self._directions.append([(0, 1), (0, 2)])\n self._directions.append([(1, 1)])\n else:\n self._directions.append([(-1, -1)])\n self._directions.append([(0, -1), (0, -2)])\n self._directions.append([(1, -1)])", "def __init__(self, player):\n self._piece_type = 'rook'\n self._value = 10 if player == \"white\" else -10\n self._summary = 'W-Rk' if player == \"white\" else 'B-Rk'\n\n self._directions = []\n for _ in range(4):\n self._directions.append([])\n\n for i in range(1, 8):\n self._directions[0].append((i, 0))\n self._directions[1].append((-i, 0))\n self._directions[2].append((0, i))\n self._directions[3].append((0, -i))", "def __init__(self, referee):\n super(GameVisualizerWindow, self).__init__()\n self.referee = referee\n self.darea = None # set in init_ui()\n self.scores = None # set in init_ui()\n self.player_color_order = [player.get_color() for player in referee.get_current_state().get_players()]\n self.init_ui()", "def __init__(self, players=None):\n self.game = Game()\n if players:\n self.player1 = players[0]\n self.player2 = players[1]\n else:\n self.player1 = Player('X')\n self.player2 = Player('O')\n self.record = Record()\n self.winning_moves = []", "def setup(match):\r\n match.height = constants.HEIGHT\r\n match.width = constants.WIDTH\r\n match.board = makeBoard(match.height, match.width)\r\n match.seedlist = constants.SEEDLIST\r\n if match.seedlist == None:\r\n match.seedlist=randseed(match.height, match.width)\r\n numofrow = len(match.seedlist[0]) #opposite notation because column-major not row-major\r\n for x in match.seedlist:\r\n match.board[x[0]][x[1]] = constants.ALIVE\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n # TO DO:\r\n # Initialize board according to match.seedlist\r", "def __init__(self, board, turn):\n self.player = turn\n self.roll = self.roll_dice()\n #array of applied board states\n self.moves = []\n self.board = board\n self.generate_valid_moves()", "def __init__(self):\n self.board = [[T.Tile().getColor() for x in range(6)] for y in range(6)]", "def __init__(self):\r\n\t\tself.game_board = [['0','0','0'],['0','0','0'],['0','0','0']]\r\n\t\tself.count = 0\r\n\t\tself.x_turn = True\r\n\t\r\n\r\n\t\tpass", "def __init__(self, players, x_dist, y_dist, num_to_win, turn_start=0, max_turns=1000,\\\r\n winner=-1, brd=None, board_history=None):\r\n self.players = players\r\n self.x_dist = x_dist\r\n self.y_dist = y_dist\r\n\r\n self.turn_number = turn_start\r\n if brd == None:\r\n self.board = Board(x_dist, y_dist)\r\n else:\r\n self.board = brd\r\n if board_history == None:\r\n self.board_history = [self.board]\r\n else: \r\n self.board_history = board_history\r\n self.num_to_win = num_to_win\r\n\r\n self.max_turns = max_turns\r\n\r\n self.winner = winner", "def __init__(self,player1: Player = ManualPlayer(\"P1\"),\\\r\n player2: Player = ManualPlayer(\"P2\")):\r\n\r\n self.board = np.zeros((BOARD_SIZE,BOARD_SIZE)\\\r\n ,dtype=np.int8)\r\n self.board[3,3] = '2'\r\n self.board[4,4] = '2'\r\n self.board[3,4] = '1'\r\n self.board[4,3] = '1' \r\n\r\n self.players = []\r\n self.players.append(player1)\r\n self.players.append(player2)\r\n self.turn = 1\r\n self.count = 0", "def __init__(self, lobby_id, players):\n self.card_wished = None\n self.wait_for_card_wish = False\n self.current_player = None\n self.direction_clock_wise = True\n self.current_draw_punishment = 1\n\n self.players = []\n self.lobby_id = lobby_id\n if isinstance(players, list):\n for player in players:\n self.players.append(player)\n else:\n self.players.append(players)\n\n players_len = len(self.players)\n if players_len > 0:\n self.current_player = self.players[random.randint(0, players_len) - 1]\n\n # Get the cards of the game\n game = Game.objects.get(name=\"maumau\")\n card_set = CardSet.objects.get(id=game.card_set_id)\n self.cards = list(card_set.cards.all())\n\n self.__shuffle_cards()\n for player in self.players:\n self.draw_cards(player, 5)\n\n self.discard_pile = []\n self.discard_pile.append(self.cards.pop())", "def __init__(self, players, turns, game, deterministic_cache):\n self.players = players\n self.turns = turns\n self.game = game\n self.deterministic_cache = deterministic_cache\n self.opponents = players", "def __init__(self,state,player=WHITE):\n if(state==None):\n self.gameState = dict()\n for x in range(0,WIDTH):\n for y in range(0,HEIGHT):\n self.gameState[x,y] = EMPTY\n for x in range(0,WIDTH):\n self.gameState[x,BSTARTROW] = BLACK#Blacks starting row\n self.gameState[x,WSTARTROW] = WHITE#Whites starting row\n #whites.append(Board.pawn(Board.pos(x,WSTARTROW),WHITE))\n #blacks.append(Board.pawn(Board.pos(x,BSTARTROW),BLACK))\n else:\n self.gameState = state\n \n self.whoseTurn = player\n self.cachedWin = False # set to True in winFor() if\n self.cachedWinner = None", "def __init__(self):\n self._board = []\n for i in range(10):\n self._board.append([None for i in range(9)])\n self.place_pieces()", "def __init__(self, screen, win_size_x, win_size_y, player_num):\n self._player_list = list(Player(f\"Player {x}\") for x in range(player_num))\n self._player_num = player_num\n print(\"The number of players is: \", player_num)\n self._screen = screen\n self._player_turn = 0\n self._ui_player_turn = UI((1820, 10), (100, 50), f\"Player {self._player_turn}\")\n self._screen.blit(self._ui_player_turn.update(\n f\"Player {self._player_turn}\"), self._ui_player_turn._location)\n self._core_deck = Deck(\"Deck/test_deck.txt\")\n self._war_deck = []\n self._map = Map(self._player_num, win_size_x, win_size_y)\n self._clock = pygame.time.Clock()\n self._run = True\n self._fps = 30\n self.each_player_draws_hand(self._core_deck)", "def __init__(self, rows, columns, live_probability=0.3, seed=0):\n self.live_probability = live_probability\n self.seed = seed\n self.rows = rows\n self.columns = columns\n self.grid = [\n [Cell() for column_cells in range(self.columns)]\n for row_cells in range(self.rows)\n ]\n\n self.generate_board()", "def __init__(self):\n self.game_board = [' '] * 9\n self.size = len(self.game_board)\n self.move = 'X'\n self.player1 = None\n self.player2 = None\n self.current_player = None\n self.board_coords = {\n (1, 3): 0, (2, 3): 1, (3, 3): 2,\n (1, 2): 3, (2, 2): 4, (3, 2): 5,\n (1, 1): 6, (2, 1): 7, (3, 1): 8\n }\n\n self.winning_cases = [\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n ]", "def __init__(self):\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]", "def __init__(self, players, num_of_players):\r\n self.players = players\r\n self.num_of_players = num_of_players\r\n self.active_players = num_of_players\r\n self.dealer = Dealer()\r\n self.card_stack = CardStack()\r\n self.money_stack = MoneyStack()\r\n self.cur_player = 0\r\n self.round_num = 0\r\n self.round_player_money = 0", "def __init__(self, colorNames):\n self._colorOptions = '' # initials for color choices\n for color in colorNames:\n self._colorOptions += color[0].upper()\n # following will be reset when startGame is called\n self._currentTurnNum = self._lengthOfPattern = self._maxNumberOfTurns = 0", "def game_setup(self):\n self.deck = Shoe(6)\n self.initial_draw()\n self.pot = ask_for_bet(self.player.money)\n show_table(self.player, self.dealer, self.pot)\n self.surrender_and_insurance()", "def setup_game(self, player, opponent):\n\n self.display.clear_screen()\n\n ship_index = 0\n\n while not player.ready(len(self.SHIP_INFO)):\n # prints the currrent board\n board = self.display.construct_player_board(player, opponent, True)\n self.display.print_board(board)\n\n ship_name, ship_length = self.SHIP_INFO[ship_index]\n ship_to_add = Ship(ship_name, ship_length)\n\n try:\n player.add_ship(ship_to_add)\n except Exception as e:\n ship_to_add = player.ships[ship_index]\n\n origin, orientation = self.display.prompt_for_ship_placement(\n ship_to_add)\n\n try:\n player.place_ship(ship_to_add, origin, orientation,\n self.BOARD_SIZE)\n except ValueError as ve:\n self.display.clear_screen()\n print(ve)\n print()\n continue\n\n self.display.clear_screen()\n ship_index += 1\n self.display.prompt_switch(opponent.name)", "def test_initialization(number: int) -> None:\n for _ in range(number):\n if random.random() < 0.5:\n size = random.randint(3, 10)\n baby_position = [random.randint(0, size - 1), random.randint(0, size - 1)]\n num_berries = random.randint(1, size)\n else:\n size = [random.randint(3, 10), random.randint(3, 10)]\n baby_position = [\n random.randint(0, size[0] - 1),\n random.randint(0, size[1] - 1),\n ]\n num_berries = random.randint(1, size[0])\n print(f\"\\n\\n\\nSize of the board {size}\")\n print(f\"Baby position: {baby_position}\")\n print(f\"Number of berries to be placed randomly: {num_berries}\")\n game = Game(size, baby_position, 0, 0, 0, 0, num_berries)\n print(f\"Here is the board:\\n{game.get_board()}\")\n print(game.get_baby())\n for b in game.get_berries():\n print(b)", "def __init__(self):\n # start with the wild cards\n self.pile = [Card.wild_card(), Card.wild_card()]\n for i in range(Card.num_values):\n for j in range(Card.num_colors):\n for k in range(Card.num_shapes):\n self.pile.append(Card(i + 1, j + 1, k + 1, False))\n assert len(self.pile) == Pile.num_cards\n shuffle(self.pile)", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def setup_game(num_players=1, num_decks=1):\n players = []\n for i in range (num_players):\n players.append(player(i+1))\n new_dealer = dealer(\"Dealer\")\n play_shoe = shoe(num_decks).shuffle_shoe()\n return players, new_dealer, play_shoe", "def make_board(self):\n generate = lambda: random.randint(1, 100) in range(1, self.p_pit+1)\n some_number = self.some_number\n agent = Agent(some_number)\n agent.program = Oozeplorer_Percept(agent)\n self.add_agent(agent)\n gold = Gold()\n self.add_thing(gold, None)\n for row in range(1, some_number + 1):\n for col in range(1, some_number + 1):\n valid_spot = (row, col) != gold.location and (row, col) != (1, 1)\n if valid_spot and generate():\n t_pt = Pit()\n t_pt.location = (row, col)\n self.things.append(t_pt)", "def __init__(self, boardDimensions, shipsAfloat):\r\n self.enemyBoard = [[BoardState.OPEN for j in range(boardDimensions)] for i in range(boardDimensions)]\r\n self.boardDimensions = boardDimensions\r\n self.shipsAfloat = shipsAfloat", "def __init__(self):\n self.numShipsPerPlayer = 0\n self.playerType = 1 # Whether P2 is a human (1) or AI (2-4 for difficulty)\n \n self.grid = Grid()\n self.shipDir = 0 # Direction of the ship currently being placed (index of c.DIRS)\n self.lenShip = 1 # Length of the ship to place next\n \n self.p1Ships = []\n self.p2Ships = []\n \n # Number of special shots each player has (gain one every 10 rounds)\n self.round = 0\n self.p1_special_shots = 0\n self.p2_special_shots = 0\n \n self.is_P1_turn = False\n self.is_placing = False\n self.is_shooting = False\n self.in_transition = False\n \n self.msg = \"\" # Message to display below game board", "def __init__(self):\n\t\tself.current = Piece.EX\n\t\tself.board = [Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK]", "def place_pieces(self):\n # Soldiers\n # RED\n self.add_piece('a4', Soldier('RED'))\n self.add_piece('c4', Soldier('RED'))\n self.add_piece('e4', Soldier('RED'))\n self.add_piece('g4', Soldier('RED'))\n self.add_piece('i4', Soldier('RED'))\n # BLUE\n self.add_piece('a7', Soldier('BLUE'))\n self.add_piece('c7', Soldier('BLUE'))\n self.add_piece('e7', Soldier('BLUE'))\n self.add_piece('g7', Soldier('BLUE'))\n self.add_piece('i7', Soldier('BLUE'))\n # Cannons\n # RED\n self.add_piece('b3', Cannon('RED'))\n self.add_piece('h3', Cannon('RED'))\n # BLUE\n self.add_piece('b8', Cannon('BLUE'))\n self.add_piece('h8', Cannon('BLUE'))\n # Generals\n # RED\n self.add_piece('e2', General('RED'))\n # BLUE\n self.add_piece('e9', General('BLUE'))\n # Chariots\n # RED\n self.add_piece('a1', Chariot('RED'))\n self.add_piece('i1', Chariot('RED'))\n # BLUE\n self.add_piece('a10', Chariot('BLUE'))\n self.add_piece('i10', Chariot('BLUE'))\n\n # Horses\n # RED\n self.add_piece('c1', Horse('RED'))\n self.add_piece('h1', Horse('RED'))\n # BLUE\n self.add_piece('c10', Horse('BLUE'))\n self.add_piece('h10', Horse('BLUE'))\n # Elephants\n # RED\n self.add_piece('b1', Elephant('RED'))\n self.add_piece('g1', Elephant('RED'))\n # BLUE\n self.add_piece('b10', Elephant('BLUE'))\n self.add_piece('g10', Elephant('BLUE'))\n # Advisors\n # RED\n self.add_piece('d1', Guard('RED'))\n self.add_piece('f1', Guard('RED'))\n # BLUE\n self.add_piece('d10', Guard('BLUE'))\n self.add_piece('f10', Guard('BLUE'))", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[-1,0],[1,0],[0,-1]],\\\n 'palace':[[-1,-1],[1,-1]]}\\\n })", "def __init__(self, board):\n self.board = board\n self.tableaus = [] # any exposed card is clickable\n self.foundations = [] # only top card is clickable\n self.waste = [] # only top card is clickable\n self.stock = [] # only top card is clickable", "def __init__(self):\n self.board_dict = dict()\n for i in range(self.BOARD_WIDTH):\n for j in range(self.BOARD_WIDTH):\n self.board_dict[i, j] = 0, None\n\n self.players_locations = dict()\n self.last_moved = None", "def __init__(self, name, board):\n self.board = board\n self.width = 50\n self.height = 25\n self.rewards = []\n self.reward_sum = sum(self.rewards)\n self.collisions = []\n # Data to see correlation between directions and distance measures\n self.direction_history = []\n self.distances = []\n self.agent_position = {'x': 10, 'y': 600}\n\n self.corners = [[self.agent_position['x'] + self.width, self.agent_position['y'], '1s'], [\n self.agent_position['x'] + self.width, self.agent_position['y'] + self.height, '11']]\n self.line_pos = [self.show_distances(p) for p in self.corners]\n self.angle = 90", "def __init__(self):\n\n self._length = 8\n self.board = []\n self.columns = \"ABCDEFGH\"\n for colNum in range(0, self._length):\n self.board.append([])\n for rowNum in range(0, self._length):\n self.board[colNum].append(Tile(colNum, rowNum))\n\n self.board[3][3].color = \"blue\"\n self.board[3][4].color = \"red\"\n self.board[4][3].color = \"red\"\n self.board[4][4].color = \"blue\"", "def initialise_cup_competition(bye_list=[]):\n # create a list of all teams\n teams = list(Team.active_objects.all())\n\n # if no. of teams + no. of byes is less than 16 then one or more random byes are required\n random_byes = 16 - len(teams) - len(bye_list)\n\n # if it's more than then there's a problem\n # TODO - raise an error here\n if random_byes < 0:\n print(len(teams), len(bye_list))\n print(\"Error: teams + byes > 16\")\n exit\n\n # add a None for each random bye to be incorporated into the random list\n for i in range(random_byes):\n print(\" adding random bye\")\n teams.append(None)\n\n # randomise the list\n random.shuffle(teams)\n\n print(\"randomised teams:\")\n print(teams)\n\n # flag for monitoring when planned byes are needed\n bye_next = False\n # flag for monitoring byes assigned in previous step\n bye_previous = False\n\n for position in range(1, 17):\n\n # pass over the position when a bye is required\n if bye_next:\n bye_next = False\n continue\n\n # get the next team from the randomised list\n team = teams.pop()\n\n # pass over any Nones in the list and set the previous flag\n if not team and not bye_previous:\n print(\"if not team and not bye_previous\")\n bye_previous = True\n continue\n elif not team:\n print(\"elif not team\")\n # add the team/None back to the list and reshuffle until the next one is note none\n teams.append(team)\n while teams[-1] is not None:\n print(\"Two Nones in a row - shuffling\")\n random.shuffle(teams)\n team = teams.pop()\n\n # deal with planned byes\n if team in bye_list:\n # if the position is an even number we can only assign the position if the bye_previous flag is set\n if position % 2 == 0:\n if bye_previous:\n team.cup_start_position = position\n # in which case we need to add the a bye back to the list to replace the random one\n teams.append(None)\n else:\n # otherwise we need to re-add the team back to the end of the list and pop the next non-bye team\n index = -1\n while len(teams) + index >= 0:\n print(\"executing while loop: index = \", index)\n if teams[index] is not None and teams[index] not in bye_list:\n old_team = team\n team = teams.pop(index)\n teams.append(old_team)\n break\n index -= 1\n team.cup_start_position = position\n else:\n team.cup_start_position = position\n bye_next = True\n else:\n # simply assign the team to the current position\n team.cup_start_position = position\n\n # finally save the team and set previous flag back to False\n team.save()\n bye_previous = False", "def __init__ (self, cols = 6, rows = 7, requiredToWin = 4):\r\n\t\tself.cols = cols\r\n\t\tself.rows = rows\r\n\t\tself.win = requiredToWin\r\n\t\tself.board = [[NONE] * rows for _ in range(cols)]", "def __init__(self,size, population):\r\n\r\n if size < 2 * population:\r\n raise ValueError(\"Game board not big enough to fit population for both players.\")\r\n\r\n self.board = [[0 for row in range(size)] for column in range(size)]\r\n self.capture_options = []\r\n self.finished = False\r\n \r\n for row in range(size):\r\n # Only populate the given row amount for each player.\r\n if row < population or row > size - population - 1:\r\n for column in range(size):\r\n if (row+column) % 2 == 0:\r\n self.board[row][column] = 1\r\n if row > size - population - 1:\r\n self.board[row][column] = -1", "def __init__(self, players_number: int, marbles: int) -> None:\n self.turn = 0\n self.player = 0\n self.marbles = marbles\n self.players = [Player(pk=i) for i in range(players_number)]\n self.current_marble = Marble.get_zero_marble()", "def set_pieces(self):\n\n for i in range(len(self._game_board)):\n\n # Row 1\n if i == 0:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"black\", \"BCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"black\", \" BH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"black\", \" BE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"black\", \" BA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"black\", \" BG \")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 3\n if i == 2:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"black\", \"BCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 4\n if i == 3:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"black\", \"BSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 7\n if i == 6:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"red\", \"RSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 8\n if i == 7:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"red\", \"RCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 10\n if i == 9:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"red\", \"RCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"red\", \" RH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"red\", \" RE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"red\", \" RA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"red\", \" RG \")\n self._game_board[i][ii].update_location([i, ii])", "def init_players(self):\n complain = \"\"\n players_turn = random.sample(range(self.n_players), self.n_players)\n players_created = {}\n picked_colors = []\n for x in range(self.n_players):\n while True:\n clear_output()\n try:\n color = input(\n f\"{complain}Player {x+1}, please type in one of the following colors: ({', '.join([x.capitalize() for x in self.world.player_colors if x not in picked_colors])}):\\n\").lower()\n if color in self.world.player_colors and color not in picked_colors:\n picked_colors.append(color)\n players_created[players_turn[x]] = Player(\n color.capitalize(), self.start_troops)\n break\n else:\n complain = \"Please enter a valid color\\n\"\n except:\n pass\n\n self.players = [players_created[y] for x in range(\n self.n_players) for y in players_created.keys() if int(y) == x]", "def __init__(self):\n self.boards = [[False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False]]", "def __init__(self):\n self.boards = [[False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False]]", "def __init__(self, players):\n\n self._players = players\n self._game = None", "def __init__(self, board_size=MAX_BOARD_SIZE, cell_size=MAX_CELL_SIZE, dead_color=DEAD, alive_color=ALIVE):\n self._board_size = board_size\n self._cell_size = cell_size\n self.dead_color = dead_color\n self.alive_color = alive_color\n\n self.board = []\n self.mode = 0", "def initialize():\n\n global PLAYER # this means we use the global var PLAYER and cannot have a local var named PLAYER\n global LEVEL_COUNTER\n\n LEVEL_COUNTER = 1\n \n coordinates = generate_coords()\n\n PLAYER = Stark()\n tree = Tree()\n ww = WhiteWalker()\n crown = Crown()\n gray_gem = GrayGem()\n clear_board()\n GAME_BOARD.create(\"Snow\",\"Snow\")\n GAME_BOARD.draw_msg(\"Level \" + str(LEVEL_COUNTER) + \". Winter is coming.\")\n generate_level(coordinates, [PLAYER, ww, gray_gem, crown, tree, tree, gray_gem, tree, tree, gray_gem, tree])\n\n # for i in range(0,NUM_ELTS):\n # place_on_board(elts[i], coordinates[i][0], coordinates[i][1])", "def __init__(self, label, player):\n \n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[0,1,[-1,1],[1,1]],[1,0,[1,1],[1,-1]],[0,-1,[1,-1],[-1,-1]],[-1,0,[-1,-1],[-1,1]]],\\\n 'palace':[]},\\\n 'BLUE':{'board':[[0,1,[-1,1],[1,1]],[1,0,[1,1],[1,-1]],[0,-1,[1,-1],[-1,-1]],[-1,0,[-1,-1],[-1,1]]],\\\n 'palace':[]}\\\n })", "def __init__(self, label, player):\n \n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[0,1,[-1,1],[1,1]],[1,0,[1,1],[1,-1]],[0,-1,[1,-1],[-1,-1]],[-1,0,[-1,-1],[-1,1]]],\\\n 'palace':[]},\\\n 'BLUE':{'board':[[0,1,[-1,1],[1,1]],[1,0,[1,1],[1,-1]],[0,-1,[1,-1],[-1,-1]],[-1,0,[-1,-1],[-1,1]]],\\\n 'palace':[]}\\\n })", "def __init__(self, board, position, player, piece_type):\n self.board = board\n self.position = position\n self.player = player\n self.piece_behavior = piece_type", "def __init__(self):\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n self.player_count: int = None\n self.player_hand_0: arcade.SpriteList = None\n self.player_hand_1: arcade.SpriteList = None\n self.deck: arcade.SpriteList = None\n self.pile: arcade.SpriteList = None", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def __init__(self):\n self._game_state = \"UNFINISHED\"\n self._current_player = \"BLACK\"\n self._game_board = Board()", "def __init__(self, player):\n self._piece_type = 'knight'\n self._value = 6 if player == \"white\" else -6\n self._summary = 'W-Kt' if player == \"white\" else 'B-Kt'\n\n self._directions = []\n self._directions.append([(-2, -1)])\n self._directions.append([(-2, 1)])\n self._directions.append([(2, -1)])\n self._directions.append([(2, 1)])\n self._directions.append([(-1, -2)])\n self._directions.append([(-1, 2)])\n self._directions.append([(1, -2)])\n self._directions.append([(1, 2)])", "def __init__(\r\n self,\r\n player_count=4,\r\n strategy=[HumanRandom(), HumanRandom(), HumanRandom(), HumanRandom()],\r\n rules=None,\r\n ):\r\n # shuffle the cards\r\n shuffle(self.community_cards)\r\n shuffle(self.chance_cards)\r\n\r\n self.player_positions = [0] * player_count\r\n self.current_player = randint(0, player_count - 1)\r\n self.player_list = []\r\n for i in range(player_count):\r\n self.player_list.append(\r\n Player(uid=i, token=self.token[i], strategy=strategy[i])\r\n )\r\n self.full_turn_count = 1", "def __init__(self, initial=[1, 3, 5, 7]):\n self.piles = initial.copy()\n self.player = 0\n self.winner = None", "def __init__(self, initial=[1, 3, 5, 7]):\n self.piles = initial.copy()\n self.player = 0\n self.winner = None", "def initialize_board(self):\n seed = self.seed and self.seed.any()\n if not (self.shape or seed):\n raise Exception(\"Either a shape or a seed is required.\")\n\n elif self.shape and seed:\n # Center the seed on a game board\n board = self._center_seed(self.shape, self.seed)\n\n elif self.shape:\n # The probability a cell starts off dead\n prob_dead = [1 - self.game.weight]\n # Class probabilities for live cells\n probs_alive = [self.game.weight * (1/self.classes)] * self.classes\n\n board = np.random.choice(\n self.classes + 1,\n np.prod(self.shape),\n p = prob_dead + probs_alive\n ).reshape(self.shape)\n \n else: # Only a seed is given\n self.shape = self.seed.shape\n board = self.seed\n\n self.array = board\n self.start_array = board\n self.prev_array = None", "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "def newGame(self):\n self.last_move = \"go\"\n self.values = [None for i in range(64)]\n for i in range(8):\n self.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN))\n self.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN))\n\n self.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK))\n self.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT))\n self.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP))\n self.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN))\n self.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING))\n self.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP))\n self.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT))\n self.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK))\n\n self.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK))\n self.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT))\n self.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP))\n self.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN))\n self.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING))\n self.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP))\n self.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT))\n self.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK))", "def __init__(self):\n self.board = Board()\n #self.player1 = player1\n #self.player2 = player2\n self.winner = None", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def __init__(self, python_board: list[list[int]] = None, red_active: bool = True) -> None:\n\n game_board = [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n\n if python_board is not None:\n self.board_array = np.array(python_board)\n else:\n self.board_array = np.array(game_board)\n\n self.move_number = 0\n\n # Creating the kernels to use in a 2d convolution to check the board for a winner later\n across = np.array([[1, 1, 1, 1]])\n vertical = np.transpose(across)\n main_diagonal = np.eye(4, dtype=np.uint8)\n off_diagonal = np.fliplr(main_diagonal)\n self._detection_kernels_red = [across, vertical, main_diagonal, off_diagonal]\n self._detection_kernels_yellow = [kernel * -1 for kernel in self._detection_kernels_red]\n\n self._is_red_active = red_active\n\n # Matches moves to their indices in self._valid_moves, this order is very important\n # for optimising alpha-beta pruning\n self._valid_move_order = {3: 0, 2: 1, 4: 2, 5: 3, 1: 4, 0: 5, 6: 6}\n self._valid_moves = [3, 2, 4, 5, 1, 0, 6]\n self._column_to_row = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n\n self._win_state = None\n\n # This code reads in the hash keys for use in Zobrist hashing, for more information, see\n # opening_book_gen.py\n red_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_red_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n red_hash_keys.append([int(r) for r in row])\n self._red_hash_keys = np.array(red_hash_keys)\n\n yellow_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_yellow_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n yellow_hash_keys.append([int(r) for r in row])\n self._yellow_hash_keys = np.array(yellow_hash_keys)\n\n self.hash = 0", "def __init__(self, player, board):\n self.player = player\n self.board = board", "def __init__(self):\n # Current player\n self.player = X\n\n # Board\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]\n\n # Winner\n self.winner = None\n\n # Game over\n self._gameover = False", "def __init__(self) -> None:\n self._grid_sol = []\n self._grid_init = []\n self._grid_display = []\n self._difficulty = 2\n self._move_history = []\n for _ in range(9):\n self._grid_sol.append([0, 0, 0, 0, 0, 0, 0, 0, 0])\n self._grid_display.append([0, 0, 0, 0, 0, 0, 0, 0, 0])\n self._grid_init.append([0, 0, 0, 0, 0, 0, 0, 0, 0])", "def new_game(self):\n self.board = [None] * 9\n self.player = \"X\"\n self.winner = None", "async def startingGame(self, ctx):\n # =-=-=-= ATTRIBUTE ROLES FOR PLAYERS =-=-=-= #\n self.msgToDelete.append(await ctx.message.channel.send(\"Attribution des rôles ...\"))\n self.rolesOrder = self.roles.copy()\n self.rolesOrder = list(dict.fromkeys(self.rolesOrder)) # Remove redundant roles for playGame()\n random.seed(time.time())\n random.shuffle(self.players)\n random.shuffle(self.roles)\n self.roles = self.roles[:len(self.players) + 3]\n if self.roles.count(\"Franc-Maçon\") % 2 != 0: # Only One Freemason in\n for i in range(len(self.roles)):\n if self.roles[i] == \"Franc-Maçon\":\n self.roles[(i + 1) % len(self.roles)] = \"Franc-Maçon\"\n random.seed(time.time())\n random.shuffle(self.players)\n random.shuffle(self.roles)\n break\n\n for numberPlayer in range(len(self.players)):\n # At least there is less player than role, so I need to get the number of players instead of roles.\n\n self.syncRole(user=self.players[numberPlayer], roleToAdd=self.roles[numberPlayer],\n listToAdd=self.playersAndRoles)\n\n # =-=-=-= ATTRIBUTE ROLES FOR DECK =-=-=-= #\n for numberCentralRole in range(len(self.players), len(self.players) + 3):\n if numberCentralRole == len(self.players) + 0:\n position = \"gauche\"\n elif numberCentralRole == len(self.players) + 1:\n position = \"milieu\"\n else:\n position = \"droite\"\n self.syncRole(user=position, roleToAdd=self.roles[numberCentralRole], listToAdd=self.centralDeck)\n\n # =-=-=-= Preparing requires =-=-=-= #\n await self.createRole(ctx=ctx)\n self.msgToDelete.append(await ctx.message.channel.send(\"Création du village ...\"))\n print(\"Create village ...\")\n self.lastVoiceChannel = ctx.author.voice.channel\n await self.deleteCategory(ctx=ctx, reason=\"Pas de dualité de channel.\")\n await self.createGameSpace(ctx=ctx)\n await self.movePlayer(ctx=ctx, voiceChannel=self.voiceChannel, reason=\"Début de partie.\")\n\n print(\"Game started\")\n self.msgToDelete.append(await ctx.message.channel.send(\"Début de la partie.\"))\n\n await ctx.channel.send(\"Cleaning all messages ...\")\n await self.delAllMsg(waitingTime=5)\n await self.playGame(ctx=ctx)" ]
[ "0.7091265", "0.7042805", "0.6867666", "0.6535027", "0.6531117", "0.6422226", "0.6401937", "0.6389531", "0.6383958", "0.6380879", "0.6368232", "0.63284415", "0.6323319", "0.63110787", "0.6310818", "0.6294931", "0.6289826", "0.6264066", "0.62364113", "0.62338966", "0.6192807", "0.61491203", "0.6146116", "0.61014855", "0.6089106", "0.60876924", "0.60698", "0.60693467", "0.6054934", "0.60505307", "0.6036493", "0.60224706", "0.60176045", "0.596715", "0.5964045", "0.59623724", "0.5958877", "0.5956607", "0.5954424", "0.5953319", "0.5948711", "0.5947236", "0.59163475", "0.5903495", "0.58922297", "0.58894897", "0.5886033", "0.58855176", "0.5884357", "0.5875599", "0.5873476", "0.58549243", "0.5853799", "0.58504474", "0.5842807", "0.5839824", "0.58326924", "0.5830908", "0.583021", "0.58281976", "0.58258706", "0.58121806", "0.5811118", "0.5804382", "0.5802154", "0.57994", "0.57960516", "0.5793324", "0.5788024", "0.57852525", "0.57672656", "0.5760571", "0.5758265", "0.5758265", "0.5754522", "0.5749848", "0.5745463", "0.5741384", "0.5741384", "0.5733195", "0.57280487", "0.57199615", "0.57199615", "0.5715565", "0.5714904", "0.57140106", "0.57087106", "0.57087106", "0.57024056", "0.56958663", "0.56845516", "0.5681807", "0.56735057", "0.56735057", "0.5672401", "0.5668347", "0.5658572", "0.5653843", "0.5651041", "0.56508243" ]
0.6177999
21
Check that the board used is large enough and has enough valid tiles for the given list of players, raises a ValueError if any of these conditions are not met.
def __check_board_is_valid(self, board, players): total_tiles = board.get_rows() * board.get_cols() # check the board is big enough if total_tiles < (6 - len(players)) * len(players): raise ValueError("Board specified by board_size is too small.") # check that the board has enough active tiles if len(players) == 3 and total_tiles - board.get_holes_in_board() < 9: raise ValueError("Too many holes to place all penguins") elif total_tiles - board.get_holes_in_board() < 8: raise ValueError("Too many holes to place all penguins")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_board(board):\n valid = len(board) == 9\n valid &= -1 <= sum(board) <= 1\n valid &= reduce(lambda x, y: x and y, [utils.is_valid_player(p) for p in board])\n return valid", "def test_generate_board_too_many_mines_errors(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 10\n height = 12\n mines = int(width * height)\n\n # act and expect error\n with self.assertRaises(ValueError):\n game.generate_board(width, height, mines)", "def _check_integrity(self):\n\n count = 0\n for (x, y) in self.__players[ChessGame.BLACK].union(\n self.__players[ChessGame.WHITE]):\n assert (x, y) in self.__board\n count += 1\n\n assert count == len(self.__board)", "def validate_ground_size(ground: tuple, points: tuple) -> None:\n\n for coordinates in points:\n limit = any(\n size < point for size, point in zip(ground, coordinates)\n )\n if limit:\n raise GroundSizeLimitError", "def tile_checker(stage_tiles,\n player_new):\n tile = stage_tiles.get(\"{0},{1}\".format(player_new[0], player_new[1]), \"ocean\")\n # Check each possible terrain\n if tile == \"rock\" or tile == \"mountain\":\n valid = False\n color.write(\"You can't move into a {}!\\n\".format(tile),\"ERROR\")\n else:\n valid = True\n\n return valid", "def isValidTeamSize(size, minimum, maximum) :\n\n return isInteger(size) and int(size) >= minimum and int(size) <= maximum", "def validate_move(move, player_board):\n select_row = move.select_row\n select_col = move.select_col\n \n player_board_rows = player_board.shape[0]\n player_board_cols = player_board.shape[1]\n \n if select_row >= player_board_rows or select_row < 0 or \\\n select_col >= player_board_cols or select_col < 0 or \\\n player_board[select_row][select_col] != -1:\n return False\n \n return True", "def validate_image(image, number_tiles):\n TILE_LIMIT = 99 * 99\n\n try:\n number_tiles = int(number_tiles)\n except BaseException:\n raise ValueError(\"number_tiles could not be cast to integer.\")\n\n if number_tiles > TILE_LIMIT or number_tiles < 2:\n raise ValueError(\n \"Number of tiles must be between 2 and {} (you \\\n asked for {}).\".format(\n TILE_LIMIT, number_tiles\n )\n )", "def check_correctness(sol_list, board, pents):\n # All tiles used\n if len(sol_list) != len(pents):\n return False\n # Construct board\n sol_board = np.zeros(board.shape)\n seen_pents = [0]*len(pents)\n for pent, coord in sol_list:\n pidx = get_pent_idx(pent)\n if seen_pents[pidx] != 0:\n return False\n else:\n seen_pents[pidx] = 1\n if not add_pentomino(sol_board, pent, coord, True, pents): \n return False\n \n # Check same number of squares occupied\n if np.count_nonzero(board) != np.count_nonzero(sol_board):\n return False\n # Check overlap\n if np.count_nonzero(board) != np.count_nonzero(np.multiply(board, sol_board)):\n return False\n \n return True", "def check_boards(self):\n succesful = True\n marker = self.game.player\n print(f\"-----Starting check_winning_boards-----\")\n winning_boards= [\n [\n [marker]*3,\n [\" \"]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [marker]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [\" \"]*3,\n [marker]*3\n ],\n [\n [marker, \" \", \" \"],\n [marker, \" \", \" \"],\n [marker, \" \", \" \"]\n ],\n [\n [\" \",marker, \" \"],\n [\" \",marker, \" \"],\n [\" \",marker, \" \"]\n ],\n [\n [\" \", \" \",marker],\n [\" \", \" \",marker],\n [\" \", \" \",marker]\n ],\n [\n [marker, \" \", \" \"]\n ,[\" \", marker,\" \"],\n [\" \", \" \",marker]\n ],\n [\n [\" \", \" \", marker],\n [\" \",marker, \" \"],\n [marker, \" \", \" \"]\n ]\n ]\n for board in winning_boards:\n if self.game.check_win_conditions(board) != -10:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n marker = self.game.ai_player\n print(f\"-----Starting check_winning_boards-----\")\n winning_boards= [\n [\n [marker]*3,\n [\" \"]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [marker]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [\" \"]*3,\n [marker]*3\n ],\n [\n [marker, \" \", \" \"],\n [marker, \" \", \" \"],\n [marker, \" \", \" \"]\n ],\n [\n [\" \",marker, \" \"],\n [\" \",marker, \" \"],\n [\" \",marker, \" \"]\n ],\n [\n [\" \", \" \",marker],\n [\" \", \" \",marker],\n [\" \", \" \",marker]\n ],\n [\n [marker, \" \", \" \"]\n ,[\" \", marker,\" \"],\n [\" \", \" \",marker]\n ],\n [\n [\" \", \" \", marker],\n [\" \",marker, \" \"],\n [marker, \" \", \" \"]\n ]\n ]\n for board in winning_boards:\n if self.game.check_win_conditions(board) != 10:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n \n tie_boards = [\n [ \n [\"O\",\"O\",\"X\"],\n [\"X\",\"O\",\"O\"],\n [\"X\",\"X\",\" \"]\n ],\n [\n [\"O\",\"X\",\" \"],\n [\" \",\"X\",\" \"],\n [\" \",\"O\",\" \"]\n ],\n [\n ['O', 'O', 'X'],\n ['X', 'X', 'O'],\n ['O', 'O', 'X']\n ]\n ]\n for board in tie_boards:\n if self.game.check_win_conditions(board) != 0:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n\n print(f\"-----Ending check_winning_boards-----\")", "def test_generate_board_width_greater_than_height(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 19\n height = 10\n mines = int(width * height / 2)\n\n # act\n game.generate_board(width, height, mines)\n\n # assert\n self.assertEqual(width, len(game.board[0]), 'Board width incorrect.')\n self.assertEqual(height, len(game.board), 'Board height incorrect.')\n\n minesFound = (sum(game.board[row][col].is_mine for col in range(width)\n for row in range(height)))\n\n self.assertEqual(mines, minesFound,\n 'Wrong number of mines found.')", "def check_correct_numbers(puzzle_size: int, puzzle_list: list) -> None:\n for number in range(puzzle_size * puzzle_size):\n if number not in puzzle_list:\n raise ParsingError(\"Puzzle does not contain expected numbers.\")", "def multi_player_support(self, num_of_players):\n if self.screen['columns'] / num_of_players > 40:\n return True\n else:\n return False", "def is_valid_board_size(board_size):\n if not isinstance(board_size, int):\n return False\n if board_size < 3 or board_size > Board.LARGEST_BOARD_SIZE:\n return False\n return True", "def is_valid_board(self):\n total = sum(range(1, self.n+1))\n d = {x : [set([]), set([])] for x in range(1, self.n+1)}\n for row_index in range(self.n):\n for col_index in range(self.n):\n num = self.board[row_index][col_index]\n try:\n if row_index in d[num][0] or col_index in d[num][1]:\n print(\"Invalid solution.\")\n return\n except KeyError:\n print(\"Unsolved solution.\") # d[0]\n return\n\n d[num][0].add(row_index)\n d[num][1].add(col_index)\n print(\"Valid solution!\")", "def test_tiles_to_bounds():\n tiles = [morecantile.Tile(x=150, y=182, z=9), morecantile.Tile(x=151, y=182, z=9)]\n assert len(utils.tiles_to_bounds(tiles)) == 4", "def is_board_valid(bd):\n return is_rows_valid(bd) and is_cols_valid(bd) and is_sqrs_valid(bd)", "def _validate_tiling(inst: Any, attr: Any, value: List[List[float]]) -> None:\n if len(value) == 0:\n raise ValueError(\"Tiling must have at least 1 row\")\n\n if any(len(t) == 0 for t in value):\n raise ValueError(\"Tiling must have at least 1 column\")\n\n if min(len(t) for t in value) != max(len(t) for t in value):\n raise ValueError(\"Tiling must have the same number of column for each row.\")", "def check_grid(self) -> None:\n if not len(self.grid) == 81:\n raise ValueError(\"Grid does not have 81 elements. Aborting\")", "def test_generate_board_height_greater_than_width(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 9\n height = 17\n mines = int(width * height / 2)\n\n # act\n game.generate_board(width, height, mines)\n\n # assert\n self.assertEqual(width, len(game.board[0]), 'Board width incorrect.')\n self.assertEqual(height, len(game.board), 'Board height incorrect.')\n\n minesFound = (sum(game.board[row][col].is_mine for col in range(width)\n for row in range(height)))\n\n self.assertEqual(mines, minesFound,\n 'Wrong number of mines found.')", "def _validate_elem_length(max_num_levels, elems_flat, axis):\n assertions = []\n\n elem_length = ps.shape(elems_flat[0])[axis]\n\n # The default size limit will overflow a 32-bit int, so make sure we're\n # using 64-bit.\n size_limit = 2**(ps.cast(max_num_levels, np.int64) + 1)\n enough_levels = ps.less(ps.cast(elem_length, np.int64), size_limit)\n enough_levels_ = tf.get_static_value(enough_levels)\n if enough_levels_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n enough_levels, True,\n message='Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis=={}`.'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit)))\n elif not enough_levels_:\n raise ValueError(\n 'Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis == {}`'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit))\n\n is_consistent = ps.reduce_all([ps.equal(ps.shape(elem)[axis], elem_length)\n for elem in elems_flat[1:]])\n\n is_consistent_ = tf.get_static_value(is_consistent)\n if is_consistent_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n is_consistent, True,\n message='Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat])))\n elif not is_consistent_:\n raise ValueError(\n 'Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat]))\n return elem_length, assertions", "def is_in_check(self, player):\n # List of coords in board\n col = ['a','b','c','d','e','f','g','h','i'] # the columns\n a = []\n for i in range(10):\n a.append([j + str(i+1) for j in col])\n \n # Flatten the list\n board_coords = []\n for sublist in a:\n for coord in sublist:\n board_coords.append(coord)\n \n # getting each object in the board for a player\n pieces_coords = []\n pieces_left = []\n for row in range(10):\n for column in range(9):\n if self.get_board()[row][column] is not None and self.get_board()[row][column].get_color() == player.upper():\n # pieces left on the board for the player\n pieces_coords.append((row, column))\n pieces_left.append(self.get_board()[row][column])\n \n p_b_coord = (pieces_coords, board_coords)\n \n counter = 0 \n for piece_coord in pieces_coords: \n for board_coord in board_coords: \n translated_index = self.column_to_letter(piece_coord[1]) + str(piece_coord[0]) \n piece = self.get_piece_type(translated_index)\n if piece is not None:\n if piece.check_legal(translated_index, board_coord, self.get_board(), self.get_game_state()) == True:\n counter += 1\n print(counter)\n if counter == 0:\n self._current_state = upper(player) + '_WON'\n return True \n return False", "def illegal(self):\r\n ## First checks for integer value\r\n if type(self.blockList) is int: return 1\r\n ## Then checks for 6 rows\r\n if len(self.blockList) > 6: return 1\r\n for row in self.blockList:\r\n ## Then checks that each row has 6 columns\r\n if len(self.blockList[row]) > 6: return 1\r\n for column in self.blockList[row]:\r\n ## 18 blocks is the maximum number of blocks that can be on the board\r\n if block < 0 or block > 18: return 1\r\n return 0", "def check_numbers_present(puzzle_size: int, puzzle_list: list) -> None:\n if puzzle_size <= 0:\n raise ParsingError(\"Puzzle length must be greater than 0\")\n if puzzle_size * puzzle_size != len(puzzle_list):\n raise ParsingError(\"Puzzle does not contain expected amount of numbers.\")", "def _checkCells(self):\r\n if(self.startCell.isEmpty()):\r\n raise IllegalMoveException(\"No pawn in start cell\")\r\n if(self.endCell.isOccupied()):\r\n raise IllegalMoveException(\"Targeted cell is already occupied\")\r\n return True", "def check_valid_placement(n: int, row: int, col: int, grid: List) -> bool:\n if SudokuGrid.in_square(n, row, col, grid) or \\\n SudokuGrid.in_row(n, row, col, grid) or \\\n SudokuGrid.in_col(n, row, col, grid):\n return True\n return False", "def checkBoardValid(self):\n for i in range(9):\n for j in range(9):\n if self.board[i, j] == 0:\n continue\n\n if not self.isPossibleAssign((i, j), self.board[i, j]):\n return False\n\n return True", "def is_legal_move(player, row_from, col_from, row_to, col_to):\r\n illegal_moves = [(0, 0), (2, 0), (0, 4), (2, 4)]\r\n\r\n \"\"\"special moves that are move available according to diagram\r\n List of tuples to and from values that are not possible\"\"\"\r\n moves_not_permitted = [[(0, 2), (1, 1)], [(0, 2), (1, 3)], [(1, 1), (2, 2)], [(1, 3), (2, 2)]]\r\n row_diff = abs(row_from - row_to)\r\n col_diff = abs(col_from - col_to)\r\n\r\n if player == 'hounds':\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\r\n \"\"\"\r\n if (col_to - col_from) < 0: # no moves to the left of the board\r\n return False\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n \"\"\"When player is a hare\"\"\"\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\"\"\"\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n\r\n else:\r\n return False", "def check_board(board_state, player_symbol, display_message = False):\n\n is_board_completely_filled = board_state.isalpha()\n\n indices_set = set([ind+1 for ind, val in enumerate(board_state) if val == player_symbol])\n\n if {1, 2, 3}.issubset(indices_set) or {4, 5, 6}.issubset(indices_set) or {7, 8, 9}.issubset(indices_set):\n\n if display_message:\n print(\"Row completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n\n if {1, 4, 7}.issubset(indices_set) or {2, 5, 8}.issubset(indices_set) or {3, 6, 9}.issubset(indices_set):\n\n if display_message:\n print(\"Column completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n if {1, 5, 9}.issubset(indices_set) or {3, 5, 7}.issubset(indices_set):\n\n if display_message:\n print(\"Diagonal completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n\n if is_board_completely_filled:\n\n if display_message:\n print(\"Game is drawn...!\")\n\n return \"Draw\"\n\n return False", "def check_won(grid):\r\n for i in range(len(grid)):\r\n for j in range(len(grid[i])):\r\n if grid[i][j] >= 32:\r\n return True \r\n return False", "def _check_winning_combinations(board, player):\n winning_combinations = (\n ((0, 0), (0, 1), (0, 2)),\n ((1, 0), (1, 1), (1, 2)),\n ((2, 0), (2, 1), (2, 2)),\n ((0, 0), (1, 0), (2, 0)),\n ((0, 1), (1, 1), (2, 1)),\n ((0, 2), (1, 2), (2, 2)),\n ((0, 0), (1, 1), (2, 2)),\n ((0, 2), (1, 1), (2, 0))\n )\n\n if any(combination for combination in winning_combinations if _is_winning_combination(board, combination, player)):\n return player\n\n return None", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def validatePosition(boardsize, pos):\n return pos.x in range(0, boardsize) and pos.y in range(0,boardsize)", "def test_generate_board_max_mines(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 10\n height = 12\n\n # act\n game.generate_board(width, height, width * height - 1)\n\n # assert\n self.assertEqual(width, len(game.board[0]), 'Board width incorrect.')\n self.assertEqual(height, len(game.board), 'Board height incorrect.')\n\n minesFound = (sum(1 for row in range(height)\n for col in range(width) if game.board[row][col].is_mine))\n\n self.assertEqual(width * height - 1, minesFound,\n 'Wrong number of mines found.')", "def enough_players():\n return True", "def valid_attempt(board):\n for i in range(n):\n if [] in board[i]:\n return 0\n return 1", "def checkCrash(self,player, upperPipes, lowerPipes):\n\t\tplayer['w'] = self.IMAGES['player'].get_width()\n\t\tplayer['h'] = self.IMAGES['player'].get_height()\n\n\t\t# if player crashes into ground\n\t\tif (player['y'] + player['h'] >= self.BASEY - 1) or (player['y'] - 10 < 0):\n\t\t\treturn [True, True]\n\t\telse:\n\n\t\t\tplayerRect = pygame.Rect(player['x'], player['y'],\n\t\t\t\t\t\t player['w'], player['h'])\n\t\t\tpipeW = self.IMAGES['pipe'][0].get_width()\n\t\t\tpipeH = self.IMAGES['pipe'][0].get_height()\n\n\t\t\tfor uPipe, lPipe in zip(upperPipes, lowerPipes):\n\t\t\t\t# upper and lower pipe rects\n\t\t\t\tuPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)\n\t\t\t\tlPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)\n\n\t\t\t\t# player and upper/lower pipe hitmasks\n\t\t\t\tpHitMask = self.HITMASKS['player']\n\t\t\t\tuHitmask = self.HITMASKS['pipe'][0]\n\t\t\t\tlHitmask = self.HITMASKS['pipe'][1]\n\n\t\t\t\t# if bird collided with upipe or lpipe\n\t\t\t\tuCollide = self.pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)\n\t\t\t\tlCollide = self.pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)\n\n\t\t\t\tif uCollide or lCollide:\n\t\t\t\t\treturn [True, False]\n\n\t\treturn [False, False]", "def are_sizes_valid(sizes):\n return all(isinstance(size, int) and size >= 16 and size <= 28 for size in sizes)", "def test_generate_board_height_equal_to_width(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 20\n height = 20\n mines = int(width * height / 2)\n\n # act\n game.generate_board(width, height, mines)\n\n # assert\n self.assertEqual(width, len(game.board[0]), 'Board width incorrect.')\n self.assertEqual(height, len(game.board), 'Board height incorrect.')\n\n minesFound = (sum(game.board[row][col].is_mine for col in range(width)\n for row in range(height)))\n self.assertEqual(mines, minesFound,\n 'Wrong number of mines found.')", "def test_room_has_tiles(self):\n self.assertEqual(self.room.tile_set.count(), self.room.grid_size ** 2)", "def board_tiles_availability(self):\n for row in range(GameData.rows):\n for col in range(GameData.columns):\n if self.board[row][col] == 0:\n return False\n # Game is draw, no more moves left!\n return True", "def _check_winner_3d(self, board, action, height, player=None):\n slices = []\n slices.append(board[action[0], :, :])\n slices.append(board[:, action[1], :])\n slices.append(board[:, :, height])\n # todo: stack with a loop for Score N. Also, these slices don't have to be checked all the time, maybe add some if-conditions\n slices.append(np.stack((board[0, 0, :], board[1, 1, :], board[2, 2, :], board[3, 3, :]), axis=0))\n slices.append(np.stack((board[0, 3, :], board[1, 2, :], board[2, 1, :], board[3, 0, :]), axis=0))\n\n temp = 0\n for slice in slices:\n temp = self.check_combo(slice, player)\n if temp != 0:\n break\n winner = temp\n\n #game_over = winner != 0 or len(np.argwhere(self.board).reshape(-1, )) == 0\n return winner", "def is_valid(data):\n check = [0 for i in range(4)]\n # calculate how many ships are with different lengths\n for i in range(10):\n for j in range(10):\n if type(data[i][j]) == Ship:\n check[data[i][j]._Ship__length - 1] += 1\n # check ships\n for i in range(4):\n if check[i] != (i + 1) * (4 - i):\n return False\n # check corners\n for i in range(1, 10):\n for j in range(10):\n try:\n if type(data[i - 1][j + 1]) == Ship and \\\n type(data[i][j]) == Ship:\n return False\n except:\n pass\n try:\n if type(data[i - 1][j - 1]) == Ship and \\\n type(data[i][j]) == Ship:\n return False\n except:\n pass\n return True", "def check_won (grid):\r\n for i in range (4):\r\n for j in range (4):\r\n if grid[i][j] >= 32:\r\n return True\r\n return False", "def isValid(self, game):\n if self.x == None or self.y == None or self.team == None:\n return False\n \n if self.y < 0 or self.y >= game.map.height:\n return False\n if self.x < 0 or self.x >= game.map.height:\n return False\n\n citytile = game.map.getCell(self.x, self.y).citytile\n if citytile == None:\n return False\n \n if not citytile.canBuildUnit():\n return False\n\n # TODO handle multiple units building workers in same turn\n if game.workerUnitCapReached(self.team):\n return False\n \n return True", "def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass", "def check_won (grid):\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] >= 32:\r\n return True\r\n return False", "def check_collisions(game_grid, player_list):\n\n for cycle in player_list:\n cycle.check_collision(game_grid)", "def checkAll(self, player, board):\n #retrieve current moves of the player who made the last move\n currentMoves = self.getPlayerMoves(player,board)\n\n #check column win\n is_col_win = self.checkWin(currentMoves, self.columnWins)\n if is_col_win != False:\n return True\n\n #check row win\n is_row_win = self.checkWin(currentMoves, self.rowWins)\n if is_row_win != False:\n return True\n\n #check diagonal win\n is_diag_win = self.checkWin(currentMoves, self.diagonalWins)\n if is_diag_win != False:\n return True\n else:\n return False", "def is_board_valid(board, rows, cols):\n return all(all(is_cell_valid(board, r, c) for c in xrange(cols)) for r in xrange(rows))", "def check_win(players: List[Player]) -> Tuple[bool, Optional[Player]]:\n total_players = len(players)\n for player in players:\n if player.influence == 0:\n total_players -= 1\n if total_players == 1:\n for player in players:\n if player.influence >0:\n return True, player\n return False, None", "def shipvalidator(point1: tuple, point2: tuple, board: list):\n valid = True\n # Is horizontal\n if point1[0] == point2[0]:\n # No collisions\n for i in range(min(point1[1], point2[1]), max(point1[1], point2[1])):\n if board[point1[0]][i] != \" \":\n valid = False\n # Is vertical\n elif point1[1] == point2[1]:\n # Doesn't overlap\n for i in range(min(point1[0], point2[0]), max(point1[0], point2[0])):\n if board[i][point1[1]] != \" \":\n valid = False\n else:\n valid = False\n return valid", "def test_room_has_tiles(self):\n self.assertGreaterEqual(self.room.tile_set.count(), 2)", "def validate_invasion(self, player, territory):\n current_territory = [ts.territory for ts in self.territory.filter(player=player)]\n valid_coordinates = []\n for t in current_territory:\n valid_coordinates += t.get_valid_moves()\n valid_moves = [territory.arena.get_by_coordinates(coord) for coord in set(valid_coordinates)]\n return territory in valid_moves", "def checkValidMove(self, move):\n boardCopy = copy.deepcopy(self)\n tilesChange = False\n if move == Move.UP:\n boardCopy.moveUp()\n elif move == Move.DOWN:\n boardCopy.moveDown()\n elif move == Move.LEFT:\n boardCopy.moveLeft()\n elif move == Move.RIGHT:\n boardCopy.moveRight()\n else:\n raise ValueError('Invalid Move was input')\n \n for i in range(4):\n for j in range(4):\n if boardCopy.getTile(i,j) != self.getTile(i,j):\n tilesChange = True\n del(boardCopy)\n return tilesChange", "def check(self):\n winner = None\n count = 0\n\n for y in range(self.gridSize):\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for item in self.grid[y]:\n # Check row of the grid\n if item == \"P1\":\n P1 += 1\n elif item == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for x in range(self.gridSize):\n # Check column of the grid\n if self.grid[x][y] == \"P1\":\n P1 += 1\n elif self.grid[x][y] == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for y in range(self.gridSize):\n # Check right top to left bottom across the grid\n for x in range(self.gridSize):\n if x == y:\n if self.grid[x][y] == \"P1\":\n P1 += 1\n elif self.grid[x][y] == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for y in range(self.gridSize):\n # Check the left top to the right bottom across the grid\n for x in range(self.gridSize - 1, -1, -1):\n # Check how many filled spaces there are\n if \".\" not in self.grid[y][x]:\n count += 1\n if x + y == self.gridSize - 1:\n if self.grid[y][x] == \"P1\":\n P1 += 1\n elif self.grid[y][x] == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n # Check if there is a winner if so return the winner\n if winner != None:\n return winner\n # Check if the fields that are filled are equal to the possible spaces to be filled in the grid\n if count == self.gridSize**2:\n return \"Tie\"", "def verify_valid_game(self, lotto_game:List[int]):\n is_valid = True;\n lotto_game_temp = []\n for number in lotto_game:\n if number not in lotto_game_temp:\n lotto_game_temp.append(number)\n else:\n print(number, lotto_game, lotto_game_temp)\n is_valid = False\n raise InvalidLottoGame\n \n lotto_game_temp_length = len(lotto_game_temp)\n if lotto_game_temp_length != LOTTO_GAME_LENGTH and lotto_game_temp_length != LOTTO_GAME_WITH_BONUS_LENGTH:\n print(lotto_game_temp)\n is_valid = False\n raise InvalidLottoGame\n\n return is_valid", "def _validate(self) -> None:\n for box in self.boxes:\n if any(box[0] == s[0] and box[1] == s[1] for s in self.wall_squares):\n raise RuntimeError('In illegal state. Box should not be inside wall.')\n if box[0] == self.current_location[0] and box[1] == self.current_location[1]:\n raise RuntimeError('In illegal state. Box should not be inside player.')\n if any(self.current_location[0] == s[0] and self.current_location[1] == s[1] for s in self.wall_squares):\n raise RuntimeError('In illegal state. Player should not be inside wall.')", "def check_won (grid):\r\n p=0\r\n for k in range(len(grid)):\r\n for g in range(len(grid[k])): \r\n if grid[k][g]>=32:\r\n p+=1\r\n else:\r\n ()\r\n if p>0:\r\n return True\r\n else:\r\n return False", "def check_win(self, player):\n def check_row_win(player):\n for row in self.game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_column_win(player):\n # For doing a column check, transpose the grid and do a row check\n trans_game_state = numpy.transpose(self.game_state)\n for row in trans_game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_diag_win(player):\n # Left to right diagonal\n if player == self.game_state[0][0] == self.game_state[1][1] == self.game_state[2][2]:\n return True\n # Right to left diagonal\n if player == self.game_state[0][2] == self.game_state[1][1] == self.game_state[2][0]:\n return True\n return False\n\n if check_column_win(player) or check_diag_win(player) or check_row_win(player):\n return True\n return False", "def is_valid(queens):\n n = len(queens)\n cols = range(n)\n return n == len(set(queens[i] + i for i in cols)) == len(set(queens[i] - i for i in cols))", "def check_test_case_validity(test_case_dataset):\n for i, test_case in enumerate(test_case_dataset):\n assert \"NAME\" in test_case, f\"Test case #{i} Invalid NAME\"\n\n assert (\n \"N_STATES\" in test_case\n and isinstance(test_case[\"N_STATES\"], int)\n and 0 < test_case[\"N_STATES\"] <= 64\n ), f\"Test case #{i} Invalid N_STATES\"\n\n assert (\n \"N_SYMBOLS\" in test_case\n and isinstance(test_case[\"N_SYMBOLS\"], int)\n and 0 < test_case[\"N_SYMBOLS\"] <= 64\n ), f\"Test case #{i} Invalid N_SYMBOLS\"\n\n assert (\n \"PLAYER_INPUT_SIZES\" in test_case\n and isinstance(test_case[\"PLAYER_INPUT_SIZES\"], list)\n and len(test_case[\"PLAYER_INPUT_SIZES\"]) > 1\n and all(\n (isinstance(x, int) and x > 0) for x in test_case[\"PLAYER_INPUT_SIZES\"]\n )\n ), f\"Test case #{i} Invalid PLAYER_INPUT_SIZES\"\n\n assert \"REPETITIONS\" not in test_case or (\n isinstance(test_case[\"REPETITIONS\"], int) and 0 < test_case[\"REPETITIONS\"]\n ), f\"Test case #{i} Invalid REPETITIONS\"\n\n assert \"DEBUG\" not in test_case or isinstance(\n test_case[\"DEBUG\"], bool\n ), f\"Test case #{i} Invalid DEBUG\"\n\n assert \"VIRTUAL_MACHINE\" not in test_case or (\n isinstance(test_case[\"VIRTUAL_MACHINE\"], str)\n and test_case[\"VIRTUAL_MACHINE\"] in [\"./spdz2k-party.x\", \"./semi2k-party.x\"]\n ), f\"Test case #{i} Invalid VIRTUAL_MACHINE\"\n\n if \"PLAYER_DATA\" in test_case:\n assert isinstance(\n test_case[\"PLAYER_DATA\"], list\n ), f\"Test case #{i} Invalid PLAYER_DATA - Not a list\"\n for j, size in enumerate(test_case[\"PLAYER_INPUT_SIZES\"]):\n player_data = test_case[\"PLAYER_DATA\"][j]\n max_value = test_case[\"N_SYMBOLS\"]\n assert (\n isinstance(player_data, list)\n and len(player_data) == size\n and all(\n (isinstance(x, int) and 0 <= x <= max_value)\n for x in player_data\n )\n ), f\"Test case #{i} Invalid PLAYER_DATA - User {j} inputs are invalid\"", "def test_gameboard_size(self):\n self.assertEqual(self.gameBoard.get_size(), (100, 100))\n self.assertEqual(self.gameBoard.get_columns(), 100)\n self.assertEqual(self.gameBoard.get_rows(), 100)", "def is_valid(x, y):\n return (x >= 0) & (x < BOARD_SIZE) & (y >= 0) & (y < BOARD_SIZE)", "def is_valid(field):\n requirement = {4: 1, 3: 2, 2: 3, 1: 4}\n ships = {4: 0, 3: 0, 2: 0, 1: 0}\n used = []\n for row in range(len(field)):\n for column in range(len(field[row])):\n if row < 10 or column < 10:\n coord = change((column, row))\n ship = has_ship(coord, field)\n if ship:\n ship = ship_size(coord, field)\n if ship and ship[0] > 0 and ship[1][0] not in used:\n try:\n ships[ship[0]] += 1\n used.extend(ship[1])\n except KeyError:\n return False\n else:\n return False\n return requirement == ships", "def game_tie(self):\n\n shape = self.board.shape\n if np.count_nonzero(self.board) == (shape[0] * shape[1]):\n # The board is full\n player = 0\n return True\n else:\n return False", "def test_invalid_game_setup(self):\n with self.assertRaises(ValueError):\n self._game.add_player(self._creator, 1)\n with self.assertRaises(ValueError):\n self._game.add_player(self._users[1], 0)\n for x in xrange(1, 4):\n self._game.add_player(self._users[x], x)\n with self.assertRaises(ValueError):\n self._game.add_player(self._users[4], 1)", "def _check_enough_unique_cards(self, raw_game_board: list, game_specs: list):\n self._katakana_database.create_katakana_database_connection()\n\n if len(raw_game_board) == game_specs[3]:\n pass\n\n elif len(raw_game_board) < game_specs[3]:\n additional_cards = self._get_all_katakanas()\n needed_unique_cards = game_specs[3]-len(raw_game_board)\n\n for i in range(needed_unique_cards):\n random.shuffle(additional_cards)\n raw_game_board.append(\n (additional_cards[i][0], additional_cards[i][1]))\n\n elif len(raw_game_board) < game_specs[3]:\n cards_to_be_removed = len(raw_game_board) - game_specs[3]\n random.shuffle(raw_game_board)\n for i in range(cards_to_be_removed):\n raw_game_board.pop()\n\n return raw_game_board", "def valid_tile(self, i, j):\n if (i >= 0 and i < self.rows) and (j >= 0 and j < self.cols):\n return True\n return False", "def validate_data(values):\n try:\n [int(value) for value in values]\n if len(values) != 6:\n raise ValueError(\n f'Exactly 6 values are required - you provided {len(values)}'\n )\n except ValueError as e:\n print(f'Invalid data entered: {e}, please try again!\\n')\n return False\n\n return True", "def test_get_valid_placement(self):\n # returns different locations on the board (indexed locations)\n self.assertEqual(tictactoe.get_valid_placement(\"1\"), 0)\n self.assertEqual(tictactoe.get_valid_placement(\"5\"), 4)\n self.assertEqual(tictactoe.get_valid_placement(\"9\"), 8)\n self.assertEqual(tictactoe.get_valid_placement(\"3 4 5\"), 2)\n\n # returns \"error 10000\", the location is NOT on the board\n self.assertEqual(tictactoe.get_valid_placement(\"0\"), 10000)\n self.assertEqual(tictactoe.get_valid_placement(\"-1\"), 10000)\n self.assertEqual(tictactoe.get_valid_placement(\"10000000000000000000000000000000000000000000\"), 10000)\n self.assertEqual(tictactoe.get_valid_placement(\"10\"), 10000)\n self.assertEqual(tictactoe.get_valid_placement(\"345\"), 10000)\n\n # if input is not a string of int or int, returns None\n self.assertIsNone(tictactoe.get_valid_placement(\"three\"))\n self.assertIsNone(tictactoe.get_valid_placement(\"-three\"))\n self.assertIsNone(tictactoe.get_valid_placement(\"three four five\"))\n self.assertIsNone(tictactoe.get_valid_placement(\"help\"))\n self.assertIsNone(tictactoe.get_valid_placement(\"@username\"))", "def valid_move(self, player, move):\n if self.rounds < len(self.players):\n if ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or not (True in [(pt in player.corners) for pt in move])):\n return (False)\n else:\n return (True)\n\n elif ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or (self.board).adj(player, move)\n or not (self.board).corner(player, move)):\n return (False)\n\n else:\n return (True)", "def _check_hospital_capacity(self):\n\n errors = []\n for hospital in self.hospitals:\n if len(hospital.matching) > hospital.capacity:\n errors.append(\n ValueError(\n f\"{hospital} is matched to {hospital.matching} which \"\n f\"is over their capacity of {hospital.capacity}.\"\n )\n )\n\n if errors:\n raise Exception(*errors)\n\n return True", "def is_valid(field, ships):\n requirement = {i: 5 - i for i in range(4, 0, -1)}\n found_ships = {i: 0 for i in range(4, 0, -1)}\n used = set()\n for row in range(len(field)):\n for column in range(len(field[row])):\n if row < 10 and column < 10:\n ship = has_ship(field, (row, column))\n if ship:\n ship = find_ship(ships, (row, column))\n if ship.bow not in used:\n try:\n found_ships[ship.length] += 1\n used.add(ship.bow)\n except KeyError:\n return False\n else:\n return False\n return requirement == found_ships", "def test_cell_existence(board: list, i: int, j: int) -> bool:\n return not (i < 0 or i > len(board)-1 or j < 0 or j > len(board)-1)", "def check_won(board,player):\n # X axis\n if (\n (len(set(board[1:4])) == 1 and ' ' not in set(board[1:4])) or\n (len(set(board[4:7])) == 1 and ' ' not in set(board[4:7])) or\n (len(set(board[7:10])) == 1 and ' ' not in set(board[7:10]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n # Y axis\n if (\n (len(set(board[1::3])) == 1 and ' ' not in set(board[1::3])) or\n (len(set(board[2::3])) == 1 and ' ' not in set(board[2::3])) or\n (len(set(board[3::3])) == 1 and ' ' not in set(board[3::3]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n # Diagonals\n if (\n (len(set(board[1::4])) == 1 and ' ' not in set(board[1::4])) or\n (len(set(board[3:9:2])) == 1 and ' ' not in set(board[3:9:2]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n\n return False", "def assert_bounds(self, pos):\n row, col = pos\n\n if not (row in range(self.BOARD_SIZE) and\n col in range(self.BOARD_SIZE)):\n raise IndexError(\"Cannot place a worker out of board bounds\")", "def is_valid(field):\r\n taken_coordinates = []\r\n count_ships = [0]*4\r\n # counting ships\r\n try:\r\n for row in range(10):\r\n for cell in range(10):\r\n if (row, cell) not in taken_coordinates and\\\r\n has_ship((row, cell), field):\r\n taken_coordinates.extend(ship_coordinates((row, cell), field))\r\n count_ships[ship_size((row, cell), field) - 1] += 1\r\n except IndexError:\r\n return False\r\n # check if the amount of ship is correct and if they are not crossing\r\n if count_ships == [i for i in range(4, 0, -1)] and\\\r\n len(taken_coordinates) == len(set(taken_coordinates)):\r\n return True\r\n return False", "def is_valid_sudoku_board(board):\n if type(board)!=list:#checks that the Type of the board is list\n return False\n for i in range(len(board)):\n if type(board[i])!=list:#checks that the Type of a row is list\n return False\n if len(board[i])!=9:#checks that the Number of numbers in a row is 9\n return False\n if len(board)!=9:#checks that the number of rows is 9\n return False\n for row in range(len(board)):\n for col in range(len(board[row])):\n if type(board[row][col])!=int or board[row][col]>9 or board[row][col]<0:#checks that the Type and the value is valid\n return False\n if is_not_dup_board(board)== False:#checks if every block col and row has duplicate numbers\n return False\n return True", "def test_generate_board_negative_mines_errors(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 10\n height = 12\n mines = -1\n\n # act and expect error\n with self.assertRaises(ValueError):\n game.generate_board(width, height, mines)", "def _check(self):\n if not isinstance(self.fc_layers, tuple):\n raise TypeError(f'fc_layers require tuple, get {type(self.fc_layers)}')\n if not isinstance(self.use_dropout, tuple):\n raise TypeError(f'use_dropout require tuple, get {type(self.use_dropout)}')\n if not isinstance(self.drop_prob, tuple):\n raise TypeError(f'drop_prob require tuple, get {type(self.drop_prob)}')\n if not isinstance(self.use_activation, tuple):\n raise TypeError(f'use_activation require tuple, get {type(self.use_activation)}')\n l_fc_layer = len(self.fc_layers)\n l_use_drop = len(self.use_dropout)\n l_drop_prob = len(self.drop_prob)\n l_use_activation = len(self.use_activation)\n pass_check = l_fc_layer >= 2 and l_use_drop < l_fc_layer and l_drop_prob < l_fc_layer and l_use_activation < l_fc_layer and l_drop_prob == l_use_drop\n if not pass_check:\n msg = 'Wrong BaseDiscriminator parameters!'\n raise ValueError(msg)", "def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves", "def _check_size(size):\r\n\r\n if not isinstance(size, (list, tuple)):\r\n raise ValueError(\"Size must be a tuple\")\r\n if len(size) != 2:\r\n raise ValueError(\"Size must be a tuple of length 2\")\r\n if size[0] < 0 or size[1] < 0:\r\n raise ValueError(\"Width and height must be >= 0\")\r\n\r\n return True", "def _checkPlayer(self):\r\n pawn = self.startCell.getPawn()\r\n if(not pawn.owner == self.player):\r\n message = (\"Player (%r) is not allowed to move that pawn (%r)\" %\r\n (self.player, pawn))\r\n raise IllegalMoveException(message)", "def check_ship_fits(self, ship_length, row, column, orientation):\n if orientation == 'H':\n if column + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True\n else:\n if row + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True", "def check_won (grid):\r\n w=False\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]>=32:\r\n w=True\r\n break\r\n return w", "def boundary_checker(stage, player_new):\n # Go through each possible direction a player can travel\n if player_new[0] == 0:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n elif player_new[1] == 0:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n elif player_new[0] > stage[0]:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n elif player_new[1] > stage[1]:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n # Flag validity if player still within bounds of map\n else:\n valid = True\n\n return valid", "def is_game_over(self):\n\n # This checks whether or not the board is full...\n if len(self.board.values()) == 100 and \\\n 0 not in self.board.values():\n p1 = self._longest_chain(1)\n p2 = self._longest_chain(2)\n if len(p1) > len(p2):\n return 1\n elif len(p2) > len(p1):\n return 2\n else:\n return 0\n\n # If it's not full. We check for boxes\n else:\n for x in range(self.width-1):\n for y in range(self.height-1):\n slice = self._slice((x,y), (2,2))\n if 0 not in slice[0] and 0 not in slice[1]:\n # is this slice a box?\n if slice[0][0] == slice[0][1] and \\\n slice[0][1] == slice[1][0] and \\\n slice[1][0] == slice[1][1]:\n return slice[0][0] # winner\n\n return -1 # game is not over", "def _test_player_list_size(self):\n return len(self.player_list)", "def test_failed_draw_case():\n hands = draw_hands(4)[0]\n start_tile = pick_start_tile(hands)\n\n\n # testing that I'm handling weird start players right\n fail_draws = 0\n good_draws = 0\n for i in range(10000):\n hands = draw_hands(4)[0]\n start_tile = pick_start_tile(hands)[0]\n if start_tile is None:\n fail_draws += 1\n else:\n good_draws += 1\n\n return fail_draws, good_draws", "def check_for_validity_puzzle_1(limits: tuple, rep_char: str, password: str):\n\n reps = password.count(rep_char)\n\n lower, upper = limits\n\n if lower <= reps <= upper:\n return True\n else:\n return False", "def checkValid(self) -> None:\n if len(self.times) != len(self.milestones):\n raise ValueError(\"Times and milestones are not the same length\")\n if len(self.times)==0:\n raise ValueError(\"Trajectory is empty\")\n for (tprev,t) in zip(self.times[:-1],self.times[1:]):\n if tprev > t:\n raise ValueError(\"Timing is not sorted\")\n n = len(self.milestones[0])\n for q in self.milestones:\n if len(q) != n:\n raise ValueError(\"Invalid milestone size\")\n return", "def _validateSpies(self, config, team, sabotaged):\r\n spies = [s for s in team if s in self.getSpies(config)]\r\n \"\"\"If there are more spies in our config than the number of sabotages made \r\n then return True, because this config is compatible with the sabotages made. \r\n Otherwise it is not compatible, so return False.\"\"\"\r\n return len(spies) >= sabotaged", "def test_validate_complete_board_all_filled_all_zeroes(self):\n players_scoreboard = {'ones': 0, 'twos': 0, 'threes': 0, 'fours': 0, 'fives': 0,\n 'sixes': 0, 'three of a kind': 0, 'four of a kind': 0, 'full house': 0,\n 'small straight': 0, 'large straight': 0, 'chance': 0, 'yahtzee': 0}\n actual = validate_complete_board(players_scoreboard)\n expected = True\n self.assertEqual(expected, actual)", "def isValid(self, game):\n if self.x == None or self.y == None or self.team == None:\n return False\n\n citytile = game.map.getCell(self.x, self.y).citytile\n if citytile == None:\n return False\n \n if not citytile.canBuildUnit():\n return False\n\n # TODO handle multiple units building workers in same turn\n if game.cartUnitCapReached(self.team):\n return False\n \n return True", "def test_case_06_side_too_big(self):\n self.__assert_equals_test_case([(195, 10, 201)], 'InvalidInput')", "def check_puzzle_unicity(puzzle_list: list) -> None:\n if len(set(puzzle_list)) != len(puzzle_list):\n raise ParsingError(\"Puzzle numbers must be unique.\")", "def is_valid_board(self):\n for (row, col), value in np.ndenumerate(self.final_values): # Iterate through each position\n if not self.__is_valid_value(row, col, value): # Check that the value is valid\n return False # An invalid (duplicate) value was found\n return True", "def __is_board_full(self):\r\n for row in self.__board:\r\n if {self.PLAYER1, self.PLAYER2} & set(row) != 0:\r\n return False\r\n return True", "def col_win(board, player):\n for row in board.T:\n if check_row(row, player):\n return True\n return False" ]
[ "0.6559245", "0.65429807", "0.6363457", "0.63405097", "0.6310487", "0.61935323", "0.61810786", "0.61345714", "0.60889655", "0.6072533", "0.5982412", "0.5976069", "0.5964283", "0.59554005", "0.5949775", "0.59412664", "0.59252197", "0.5920554", "0.59201723", "0.5910528", "0.58921754", "0.5885179", "0.58829325", "0.58699566", "0.5851385", "0.58319217", "0.58312607", "0.58167565", "0.5778928", "0.5770184", "0.57376695", "0.5726052", "0.5717762", "0.5714924", "0.57008696", "0.5698004", "0.5688165", "0.56756765", "0.5669125", "0.565052", "0.56477946", "0.56428385", "0.5636335", "0.5625087", "0.56235456", "0.56125826", "0.56080794", "0.560767", "0.56068635", "0.5603986", "0.56014895", "0.5591384", "0.55878717", "0.5587191", "0.5581519", "0.5572864", "0.5571161", "0.5563078", "0.55597204", "0.55518514", "0.5547989", "0.554221", "0.55399984", "0.55380994", "0.5529614", "0.5528684", "0.55265546", "0.5521894", "0.5511453", "0.5510543", "0.55040437", "0.5494427", "0.5493815", "0.5490437", "0.54901284", "0.5484323", "0.5482476", "0.5464811", "0.5455376", "0.5455042", "0.54539394", "0.5444004", "0.5442869", "0.54362863", "0.5434457", "0.54251057", "0.5413714", "0.5407999", "0.5405891", "0.54013884", "0.53971314", "0.5389438", "0.5362188", "0.5360816", "0.5357468", "0.5356079", "0.53515613", "0.53460133", "0.53399235", "0.5337271" ]
0.8507374
0
Set the colors of the players, if needed. If all players have unique colors, return the players asis.
def __set_colors(self, players): colors = set() for p in players: if p.get_color() is None: continue colors.add(p.get_color()) if len(colors) != 0 and len(colors) != len(players): raise ValueError("Each player does not have a unique assigned color.") if len(colors) == 0: for i, p in enumerate(players): p.set_color(BoardPlayer.POSSIBLE_COLORS[i])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_player_colors() -> List[Tuple[float, float, float]]:\n return PLAYER_COLORS", "def init_players(self):\n complain = \"\"\n players_turn = random.sample(range(self.n_players), self.n_players)\n players_created = {}\n picked_colors = []\n for x in range(self.n_players):\n while True:\n clear_output()\n try:\n color = input(\n f\"{complain}Player {x+1}, please type in one of the following colors: ({', '.join([x.capitalize() for x in self.world.player_colors if x not in picked_colors])}):\\n\").lower()\n if color in self.world.player_colors and color not in picked_colors:\n picked_colors.append(color)\n players_created[players_turn[x]] = Player(\n color.capitalize(), self.start_troops)\n break\n else:\n complain = \"Please enter a valid color\\n\"\n except:\n pass\n\n self.players = [players_created[y] for x in range(\n self.n_players) for y in players_created.keys() if int(y) == x]", "def setColors(self):\n #productive\n profprint()\n self.color= [[0,0,0] for i in range(205)]\n self.color255= self.setColors255()\n for i in range(205):\n for j in range(3):\n self.color[i][j] = self.color255[i][j]/float(255)\n\n return self.color", "def updatePlayer(self, _player):\n if _player.color == 'black': self.players['black'] = _player\n else: self.players['white'] = _player", "def setColors(self):\r\n # productive\r\n profprint()\r\n self.color = [[0, 0, 0] for i in range(MAXCOL)]\r\n self.color255 = self.setColors255()\r\n for i in range(MAXCOL):\r\n for j in range(3):\r\n self.color[i][j] = self.color255[i][j] / float(255)\r\n\r\n return self.color", "def __getColors(self):\n colors = {\"leftSideHighColor\" : \"\", \"leftSideDownColor\" : \"\",\\\n \"rightSideHighColor\" : \"\", \"rightSideDownColor\" : \"\"}\n for team, nestedDict in self.playerPositions.items():\n for player, position in nestedDict.items():\n if 1 == position:\n colors[\"leftSideHighColor\"] = self.playerColors[team][player]\n elif 2 == position:\n colors[\"leftSideDownColor\"] = self.playerColors[team][player]\n elif 3 == position:\n colors[\"rightSideDownColor\"] = self.playerColors[team][player]\n elif 4 == position:\n colors[\"rightSideHighColor\"] = self.playerColors[team][player]\n for key, color in colors.items():\n colors[key] = color.capitalize()\n return colors", "def get_color_options(self):\n mask = (self.all_colors != self.player_1.color) & (self.all_colors != self.player_2.color)\n return self.all_colors[mask]", "def change_square_colors(self):\n\n for flea in self.fleas.sprites():\n flea.square.change_color()", "def change_color():\n return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)", "def test_if_bottom_color_player_well_set(self):\n ui = UIRender(TestUI.image_path)\n ui.set_bottom_player_color(CELTIC_GREEN)\n self.assertEqual(ui.bottom_player_color, CELTIC_GREEN)\n self.assertEqual(ui.top_player_color, SPQR_RED)\n ui.set_bottom_player_color(SPQR_RED)\n self.assertEqual(ui.bottom_player_color, SPQR_RED)\n self.assertEqual(ui.top_player_color, CELTIC_GREEN)", "def player(self):\n return self._color", "def assign_entity_colors(pairs):\n groups = []\n for a, b in pairs:\n for grp in groups:\n if a in grp or b in grp:\n grp.add(a)\n grp.add(b)\n else:\n groups.append(set([a, b]))\n\n colors = {}\n for grp in groups:\n color = random.choice(config.COLORS)\n for id in grp:\n colors[id] = color\n return colors", "def _assign_colours_to_groups(self, groups):\n\n pass", "def draw_players(self):\n if self.player_is_skin:\n self.draw_skin_player(self.id)\n else:\n self.draw_colored_player(self.id)\n\n if self.opp_is_skin:\n self.draw_skin_player(3)\n else:\n self.draw_colored_player(3)\n return", "def setLeds(number: int, red: int, green: int, blue: int):\n pass", "def _get_goal_colours() -> List[Tuple[int, int, int]]:\n colour_lst = COLOUR_LIST[:]\n random.shuffle(colour_lst)\n return colour_lst", "def draw_colored_player(self, id):\n if id == self.id:\n pygame.draw.rect(self.screen, self.color_1, pygame.Rect(self.first_player_x, self.first_player_y, 20, 140))\n else:\n pygame.draw.rect(self.screen, self.color_2, pygame.Rect(self.second_player_x, self.second_player_y, 20, 140))\n return", "def shuffle_colors(mutated_genome):\n mutated_genome", "def prepare_colors(self):\n colors = self.colors\n # check whether user has defined colors, if not generate random colors\n if not colors :\n random_colors = [ random.randrange(0, 255) for i in range(3)]\n colors = {\n 'fillColor' : \"rgba({},{},{},0.2)\".format(*random_colors),\n 'strokeColor': \"rgba({},{},{},1)\".format(*random_colors),\n 'pointColor': \"rgba({},{},{},1)\".format(*random_colors),\n 'pointStrokeColor': \"#fff\",\n 'pointHighlightFill': \"#fff\",\n 'pointHighlightStroke': \"rgba({},{},{},1)\".format(*random_colors),\n }\n return colors", "def set_colors(dictionary):\n colors = []\n for i in list(chain(*dictionary.values())):\n color = 'rgb' + str(tuple(np.random.choice(range(256), size=3)))\n colors.append((i, color))\n colors = dict(colors)\n\n return colors", "def change_color_for_id(suit):\n for color in colors.keys():\n if color == suit:\n suit = colors[suit]\n return suit", "def draw(self):\n\n if self.support != \"tablette\":\n for user in self.parent.group.users:\n if user.identifier == 1:\n self.color_user1 = [user.color[0], user.color[1], user.color[2]]\n elif user.identifier == 2:\n self.color_user2 = [user.color[0], user.color[1], user.color[2]]\n if user.identifier == 3:\n self.color_user3 = [user.color[0], user.color[1], user.color[2]]\n else:\n self.color_user4 = [user.color[0], user.color[1], user.color[2]]\n else:\n for user in self.parent.group.users:\n if user.identifier == 1:\n self.color_user1 = [user.color[0], user.color[1], user.color[2]]\n elif user.identifier == 2:\n self.color_user2 = [user.color[0], user.color[1], user.color[2]]\n if user.identifier == 3:\n self.color_user3 = [user.color[0], user.color[1], user.color[2]]\n else:\n self.color_user4 = [user.color[0], user.color[1], user.color[2]]", "def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]", "def load_colors():\n\n print \"Color\"\n\n for key, value in css3_hex_to_names.items():\n color_hex, color_name = key, value\n color = Color(color_hex=color_hex,\n color_name=color_name)\n\n db.session.add(color)\n\n # Once we're done, we should commit our work\n db.session.commit()", "def setRandomColor():\n setColor(getRandomColor())", "def get_scores_in_order_of_players(self):\n \n players = self.referee.get_current_state().get_players()\n\n player_scores = []\n for player_color in self.player_color_order:\n for player in players:\n if player_color == player.get_color():\n player_scores.append(player.get_score())\n break\n\n return player_scores", "def class_colors(names):\r\n return {name: (\r\n random.randint(0, 255),\r\n random.randint(0, 255),\r\n random.randint(0, 255)) for name in names}", "def get_trump_color(self):\n color_counter = collections.Counter()\n for card in self.hand:\n color = card.color\n if color == \"White\":\n continue\n color_counter[color] += 1\n if not color_counter.most_common(1):\n return super().get_trump_color()\n else:\n return color_counter.most_common(1)[0][0]", "def get_card_color(team_cards, game_dict, team):\n\n # Loop through cards by team.\n for j, card in enumerate(team_cards):\n # Loop through all cards of respective team.\n try:\n # Get player name and minute of card.\n player = card.a.text\n minute = card.find(\"span\", {\"class\": \"klammerzahl\"}).text[:-1]\n\n game_dict[\"{}_card_plyr_{}\".format(team, j)] = player\n game_dict[\"{}_card_min_{}\".format(team, j)] = int(minute)\n\n # Scrape card colors and count cards.\n if team_cards[j].div[\"style\"] == \"color:#FBDB04;\":\n game_dict[\"{}_card_clr_{}\".format(team, j)] = 1 # yellow = 1\n game_dict[\"{}_card_yllw\".format(team)] += 1\n\n elif team_cards[j].div[\"style\"] == \"color:#D7110C;\":\n game_dict[\"{}_card_clr_{}\".format(team, j)] = 2 # red = 2\n game_dict[\"{}_card_red\".format(team)] += 1\n\n elif team_cards[j].div[\"class\"][0] == \"icon_gelbrot\":\n game_dict[\"{}_card_clr_{}\".format(team, j)] = 3 # yellow/red=3\n # Counted as two yellow.\n game_dict[\"{}_card_yllw\".format(team)] += 2\n\n else:\n game_dict[\"{}_card_clr_{}\".format(team, j)] = np.nan\n\n except AttributeError:\n pass\n\n return game_dict", "def color(self, sids=None, sat=1):\n if sids == None: # init/overwrite self.colors\n nids = self.nids\n # uint8, single unit nids are 1-based:\n self.colors = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n # overwrite unclustered/multiunit points with GREYRGB\n self.colors[nids < 1] = GREYRGB * sat\n else: # assume self.colors exists\n sidis = self.sids.searchsorted(sids)\n nids = self.nids[sidis]\n self.colors[sidis] = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n self.colors[sidis[nids < 1]] = GREYRGB * sat", "def new_color(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if color_mode == 'RGB':\n color_red = random.randint(0,255)\n color_green = random.randint(0,255)\n color_blue = random.randint(0,255)\n color = (color_red, color_blue, color_green)\n else: #color_mode == 'L':\n color = random.randint(0,255)\n mutated_genome[index][0] = color", "def color(self, sids=None, sat=1):\n if sids is None: # init/overwrite self.colors\n nids = self.nids\n # uint8, single unit nids are 1-based:\n self.colors = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n # overwrite unclustered/multiunit points with GREYRGB\n self.colors[nids < 1] = GREYRGB * sat\n else: # assume self.colors exists\n sidis = self.sids.searchsorted(sids)\n nids = self.nids[sidis]\n self.colors[sidis] = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n self.colors[sidis[nids < 1]] = GREYRGB * sat", "def init_colors():\r\n index = 1\r\n for (name, back, fore) in COLORS:\r\n if curses.has_colors():\r\n curses.init_pair(index, fore, back)\r\n COLOR_PAIR[name] = curses.color_pair(index)\r\n else:\r\n COLOR_PAIR[name] = 0\r\n index += 1", "def colors(self):\n return self[\"colors\"]", "def colors(self):\n return self[\"colors\"]", "def unflip_colors(self):\n self.colors[self.bondA] = self.colA\n self.colors[self.bondB] = self.colB\n self.set_bcol(self.bondA)\n self.set_bcol(self.bondB)\n return", "def setupshades(self):\n\n for n in range(0, self.numcols):\n sb = self.colspins[n]\n sb.setEnabled(True)\n sb.setValue(self.currentshades[n])\n for n in range(self.numcols, len(self.colspins)):\n sb = self.colspins[n]\n sb.setValue(self.currentshades[-1])\n sb.setEnabled(False)\n # Remember to unset this if we change the number of colours\n self.colspins[self.numcols - 1].setReadOnly(True)", "def get_state_colors():\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit(\n \"colors\", {\"colors\": state_colors, \"cases\": state_cases, \"active\": state_active}\n )", "def set(cls, palette: Union[str, Iterable[ColorLike]]):\n if isinstance(palette, str):\n try:\n cls._colors = PALETTES[palette]\n except KeyError as e:\n raise ValueError(\n f\"Invalid palette name '{palette}', valid values are {PALETTES.keys()}\"\n ) from e\n else:\n colors = []\n for c in palette:\n colors.append(parse_color(c))\n\n cls._colors = colors", "def colors(self):\r\n\t\treturn self._colors", "def set_palette(self, red, green, blue, alpha):\n raise DitherError(\"Not implemented\")", "def iter_colors(self):\n return itervalues(self)", "def __init__(self, playerIndex, colour=\"blue\"):\n super().__init__(playerIndex, colour)", "def get_trump_color(self):\n return random.choice(Card.colors[1:])", "def set_color(obj, used_colors):\n color = Helper.get_random_color()\n # Check that the current color is not already used\n while color in used_colors:\n color = Helper.get_random_color()\n used_colors.append(color)\n # Apply the chosen color to the object\n obj[c4d.ID_BASEOBJECT_USECOLOR] = c4d.ID_BASEOBJECT_USECOLOR_ALWAYS\n obj[c4d.ID_BASEOBJECT_COLOR] = color", "def testIterate(self):\n self.assertEquals(set(Color),\n set([Color.RED,\n Color.ORANGE,\n Color.YELLOW,\n Color.GREEN,\n Color.BLUE,\n Color.INDIGO,\n Color.VIOLET]))", "def update(self):\n super().update()\n time_since_start = self.time_since_start() \n curr_mod = time_since_start%self.game.time_cycle_secs\n grade = abs(curr_mod - self.game.time_cycle_secs/2) / (self.game.time_cycle_secs/2)\n color_value = grade*(255-self.game.max_darkness) + self.game.max_darkness\n for sprite in self.all_sprites:\n sprite.color = (color_value, color_value, color_value)", "def switch_colors(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1][0]\n mutated_genome[index1][0] = mutated_genome[index2][0]\n mutated_genome[index2][0] = temp", "def get_color(rank):\n if rank == 1:\n color = int(0xffd700)\n elif rank == 2:\n color = int(0xc0c0c0)\n elif rank == 3:\n color = int(0xcd7f32)\n else:\n color = random.randint(1, 16777215)\n\n return discord.Color(color)", "def get_player_states(self, players):\n for player in self.players.values():\n p = players.add()\n p.id = player.id\n p.pos.CopyFrom(player.pos.to_protocol())", "def color(self, value: tuple) -> None:\n if value in Color.PALETTE:\n self._color = value", "def set_colors(self, colors):\n\n # FIXME: This modifying of the global prompts.prompt_specials needs\n # to be fixed. We need to refactor all of the prompts stuff to use\n # proper configuration and traits notifications.\n if colors.lower()=='nocolor':\n prompts.prompt_specials = prompts.prompt_specials_nocolor\n else:\n prompts.prompt_specials = prompts.prompt_specials_color\n \n self.color_table.set_active_scheme(colors)\n self.prompt1.set_colors()\n self.prompt2.set_colors()\n self.prompt_out.set_colors()", "def test_from_palette(self):\n \n color0 = pero.Color.Red\n color1 = pero.Color.Green\n color2 = pero.Color.Blue\n color3 = pero.Color.Cyan\n color4 = pero.Color.Magenta\n color5 = pero.Color.Yellow\n color6 = pero.Color.Black\n \n colors = (color0, color1, color2, color3, color4, color5, color6)\n palette = pero.Palette(colors)\n \n picked = pero.Palette.from_palette(palette, 2)\n self.assertEqual(len(picked), 2)\n self.assertTrue(picked[0] is color0)\n self.assertTrue(picked[1] is color6)\n \n picked = pero.Palette.from_palette(palette, 3)\n self.assertEqual(len(picked), 3)\n self.assertTrue(picked[0] is color0)\n self.assertTrue(picked[1] is color3)\n self.assertTrue(picked[2] is color6)\n \n picked = pero.Palette.from_palette(palette, 4)\n self.assertEqual(len(picked), 4)\n self.assertTrue(picked[0] is color0)\n self.assertTrue(picked[1] is color2)\n self.assertTrue(picked[2] is color4)\n self.assertTrue(picked[3] is color6)", "def _style_colours(self):\n\n pass", "def switch_player(self):\n if self.playerOne:\n # sets the chip color to blue\n self.red = 0\n self.blue = 255\n # switch the player to player 2 and change the caption\n self.playerOne = False\n pygame.display.set_caption('Connect4 - Player 2')\n else:\n # sets the chip color to red\n self.red = 250\n self.blue = 0\n # switch the player to player 1 and change the caption\n self.playerOne = True\n pygame.display.set_caption('Connect4 - Player 1')", "def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True", "def setColors255(self):\r\n # productive\r\n profprint()\r\n self.color255 = [[0, 0, 0] for i in range(MAXCOL)]\r\n self.color255[0] = [221, 108, 158]\r\n self.color255[1] = [128, 174, 128]\r\n self.color255[2] = [241, 214, 145]\r\n self.color255[3] = [177, 122, 101]\r\n self.color255[4] = [111, 184, 210]\r\n self.color255[5] = [216, 101, 79]\r\n self.color255[6] = [221, 130, 101]\r\n self.color255[7] = [144, 238, 144]\r\n self.color255[8] = [192, 104, 88]\r\n self.color255[9] = [220, 245, 20]\r\n self.color255[10] = [78, 63, 0]\r\n self.color255[11] = [255, 250, 220]\r\n self.color255[12] = [230, 220, 70]\r\n self.color255[13] = [200, 200, 235]\r\n self.color255[14] = [250, 250, 210]\r\n self.color255[15] = [244, 214, 49]\r\n self.color255[16] = [0, 151, 206]\r\n self.color255[17] = [183, 156, 220]\r\n self.color255[18] = [183, 214, 211]\r\n self.color255[19] = [152, 189, 207]\r\n self.color255[20] = [178, 212, 242]\r\n self.color255[21] = [68, 172, 100]\r\n self.color255[22] = [111, 197, 131]\r\n self.color255[23] = [85, 188, 255]\r\n self.color255[24] = [0, 145, 30]\r\n self.color255[25] = [214, 230, 130]\r\n self.color255[26] = [218, 255, 255]\r\n self.color255[27] = [170, 250, 250]\r\n self.color255[28] = [140, 224, 228]\r\n self.color255[29] = [188, 65, 28]\r\n self.color255[30] = [216, 191, 216]\r\n self.color255[31] = [145, 60, 66]\r\n self.color255[32] = [150, 98, 83]\r\n self.color255[33] = [250, 250, 225]\r\n self.color255[34] = [200, 200, 215]\r\n self.color255[35] = [68, 131, 98]\r\n self.color255[36] = [83, 146, 164]\r\n self.color255[37] = [162, 115, 105]\r\n self.color255[38] = [141, 93, 137]\r\n self.color255[39] = [182, 166, 110]\r\n self.color255[40] = [188, 135, 166]\r\n self.color255[41] = [154, 150, 201]\r\n self.color255[42] = [177, 140, 190]\r\n self.color255[43] = [30, 111, 85]\r\n self.color255[44] = [210, 157, 166]\r\n self.color255[45] = [48, 129, 126]\r\n self.color255[46] = [98, 153, 112]\r\n self.color255[47] = [69, 110, 53]\r\n self.color255[48] = [166, 113, 137]\r\n self.color255[49] = [122, 101, 38]\r\n self.color255[50] = [253, 135, 192]\r\n self.color255[51] = [145, 92, 109]\r\n self.color255[52] = [46, 101, 131]\r\n self.color255[53] = [0, 108, 112]\r\n self.color255[54] = [127, 150, 88]\r\n self.color255[55] = [159, 116, 163]\r\n self.color255[56] = [125, 102, 154]\r\n self.color255[57] = [106, 174, 155]\r\n self.color255[58] = [154, 146, 83]\r\n self.color255[59] = [126, 126, 55]\r\n self.color255[60] = [201, 160, 133]\r\n self.color255[61] = [78, 152, 141]\r\n self.color255[62] = [174, 140, 103]\r\n self.color255[63] = [139, 126, 177]\r\n self.color255[64] = [148, 120, 72]\r\n self.color255[65] = [186, 135, 135]\r\n self.color255[66] = [99, 106, 24]\r\n self.color255[67] = [156, 171, 108]\r\n self.color255[68] = [64, 123, 147]\r\n self.color255[69] = [138, 95, 74]\r\n self.color255[70] = [97, 113, 158]\r\n self.color255[71] = [126, 161, 197]\r\n self.color255[72] = [194, 195, 164]\r\n self.color255[73] = [88, 106, 215]\r\n self.color255[74] = [82, 174, 128]\r\n self.color255[75] = [57, 157, 110]\r\n self.color255[76] = [60, 143, 83]\r\n self.color255[77] = [92, 162, 109]\r\n self.color255[78] = [255, 244, 209]\r\n self.color255[79] = [201, 121, 77]\r\n self.color255[80] = [70, 163, 117]\r\n self.color255[81] = [188, 91, 95]\r\n self.color255[82] = [166, 84, 94]\r\n self.color255[83] = [182, 105, 107]\r\n self.color255[84] = [229, 147, 118]\r\n self.color255[85] = [174, 122, 90]\r\n self.color255[86] = [201, 112, 73]\r\n self.color255[87] = [194, 142, 0]\r\n self.color255[88] = [241, 213, 144]\r\n self.color255[89] = [203, 179, 77]\r\n self.color255[90] = [229, 204, 109]\r\n self.color255[91] = [255, 243, 152]\r\n self.color255[92] = [209, 185, 85]\r\n self.color255[93] = [248, 223, 131]\r\n self.color255[94] = [255, 230, 138]\r\n self.color255[95] = [196, 172, 68]\r\n self.color255[96] = [255, 255, 167]\r\n self.color255[97] = [255, 250, 160]\r\n self.color255[98] = [255, 237, 145]\r\n self.color255[99] = [242, 217, 123]\r\n self.color255[100] = [222, 198, 101]\r\n self.color255[101] = [213, 124, 109]\r\n self.color255[102] = [184, 105, 108]\r\n self.color255[103] = [150, 208, 243]\r\n self.color255[104] = [62, 162, 114]\r\n self.color255[105] = [242, 206, 142]\r\n self.color255[106] = [250, 210, 139]\r\n self.color255[107] = [255, 255, 207]\r\n self.color255[108] = [182, 228, 255]\r\n self.color255[109] = [175, 216, 244]\r\n self.color255[110] = [197, 165, 145]\r\n self.color255[111] = [172, 138, 115]\r\n self.color255[112] = [202, 164, 140]\r\n self.color255[113] = [224, 186, 162]\r\n self.color255[114] = [255, 245, 217]\r\n self.color255[115] = [206, 110, 84]\r\n self.color255[116] = [210, 115, 89]\r\n self.color255[117] = [203, 108, 81]\r\n self.color255[118] = [233, 138, 112]\r\n self.color255[119] = [195, 100, 73]\r\n self.color255[120] = [181, 85, 57]\r\n self.color255[121] = [152, 55, 13]\r\n self.color255[122] = [159, 63, 27]\r\n self.color255[123] = [166, 70, 38]\r\n self.color255[124] = [218, 123, 97]\r\n self.color255[125] = [225, 130, 104]\r\n self.color255[126] = [224, 97, 76]\r\n self.color255[127] = [184, 122, 154]\r\n self.color255[128] = [211, 171, 143]\r\n self.color255[129] = [47, 150, 103]\r\n self.color255[130] = [173, 121, 88]\r\n self.color255[131] = [188, 95, 76]\r\n self.color255[132] = [255, 239, 172]\r\n self.color255[133] = [226, 202, 134]\r\n self.color255[134] = [253, 232, 158]\r\n self.color255[135] = [244, 217, 154]\r\n self.color255[136] = [205, 179, 108]\r\n self.color255[137] = [186, 124, 161]\r\n self.color255[138] = [255, 255, 220]\r\n self.color255[139] = [234, 234, 194]\r\n self.color255[140] = [204, 142, 178]\r\n self.color255[141] = [180, 119, 153]\r\n self.color255[142] = [216, 132, 105]\r\n self.color255[143] = [255, 253, 229]\r\n self.color255[144] = [205, 167, 142]\r\n self.color255[145] = [204, 168, 143]\r\n self.color255[146] = [255, 224, 199]\r\n self.color255[147] = [139, 150, 98]\r\n self.color255[148] = [249, 180, 111]\r\n self.color255[149] = [157, 108, 162]\r\n self.color255[150] = [203, 136, 116]\r\n self.color255[151] = [185, 102, 83]\r\n self.color255[152] = [247, 182, 164]\r\n self.color255[153] = [222, 154, 132]\r\n self.color255[154] = [124, 186, 223]\r\n self.color255[155] = [249, 186, 150]\r\n self.color255[156] = [244, 170, 147]\r\n self.color255[157] = [255, 181, 158]\r\n self.color255[158] = [255, 190, 165]\r\n self.color255[159] = [227, 153, 130]\r\n self.color255[160] = [213, 141, 113]\r\n self.color255[161] = [193, 123, 103]\r\n self.color255[162] = [216, 146, 127]\r\n self.color255[163] = [230, 158, 140]\r\n self.color255[164] = [245, 172, 147]\r\n self.color255[165] = [241, 172, 151]\r\n self.color255[166] = [177, 124, 92]\r\n self.color255[167] = [171, 85, 68]\r\n self.color255[168] = [217, 198, 131]\r\n self.color255[169] = [212, 188, 102]\r\n self.color255[170] = [185, 135, 134]\r\n self.color255[171] = [198, 175, 125]\r\n self.color255[172] = [194, 98, 79]\r\n self.color255[173] = [255, 238, 170]\r\n self.color255[174] = [206, 111, 93]\r\n self.color255[175] = [216, 186, 0]\r\n self.color255[176] = [255, 226, 77]\r\n self.color255[177] = [255, 243, 106]\r\n self.color255[178] = [255, 234, 92]\r\n self.color255[179] = [240, 210, 35]\r\n self.color255[180] = [224, 194, 0]\r\n self.color255[181] = [213, 99, 79]\r\n self.color255[182] = [217, 102, 81]\r\n self.color255[183] = [0, 147, 202]\r\n self.color255[184] = [0, 122, 171]\r\n self.color255[185] = [186, 77, 64]\r\n self.color255[186] = [240, 255, 30]\r\n self.color255[187] = [185, 232, 61]\r\n self.color255[188] = [0, 226, 255]\r\n self.color255[189] = [251, 159, 255]\r\n self.color255[190] = [230, 169, 29]\r\n self.color255[191] = [0, 194, 113]\r\n self.color255[192] = [104, 160, 249]\r\n self.color255[193] = [221, 108, 158]\r\n self.color255[194] = [137, 142, 0]\r\n self.color255[195] = [230, 70, 0]\r\n self.color255[196] = [0, 147, 0]\r\n self.color255[197] = [0, 147, 248]\r\n self.color255[198] = [231, 0, 206]\r\n self.color255[199] = [129, 78, 0]\r\n self.color255[200] = [0, 116, 0]\r\n self.color255[201] = [0, 0, 255]\r\n self.color255[202] = [157, 0, 0]\r\n self.color255[203] = [100, 100, 130]\r\n self.color255[204] = [205, 205, 100]\r\n self.color255[205] = [255, 255, 0]\r\n\r\n return self.color255", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def preset_colors( self, labels ):\n size_labels = len( labels )\n self.color_override = self.metadata.get('color_override', {})\n try:\n if self.color_override == {}:\n raise Exception('going to the default')\n colours = self.color_override\n size_colors = len ( colours )\n retval = []\n for label in labels:\n mycolour = colours[label]\n retval.append(mycolour)\n except:\n hex_colors = self.hex_colors\n size_colors = len( hex_colors )\n retval = [ hex_colors[ i % size_colors ] for i in range( size_labels ) ]\n\n retval.reverse()\n return retval", "def update_player_turn(self):\n\n if self.get_player_turn() != 'BLUE':\n\n self._player_turn = 'BLUE'\n\n else:\n\n self._player_turn = 'RED'", "def test_exist_and_change(self):\n colorList = ColorList()\n prev = colorList.pickColor()\n self.assertIsNotNone(prev)\n for i in range(100):\n color = colorList.pickColor()\n self.assertIsNotNone(color)\n self.assertTrue(color.r != prev.r or color.g != prev.g or color.b != prev.b)\n prev = color", "def mutate_color(mutated_genome):\n seed = random.randint(0,2)\n if seed == 0:\n new_color(mutated_genome)\n elif seed == 1:\n change_color(mutated_genome)\n else: #seed == 2:\n switch_colors(mutated_genome)\n #else: seed == 3: # depricated\n # shuffle_colors(mutated_genome)", "def initialize_colors(self) -> None:\n curses.init_pair(ColorPair.black_on_white.value, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(ColorPair.red_on_black.value, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(ColorPair.blue_on_black.value, curses.COLOR_BLUE, curses.COLOR_BLACK)\n curses.init_pair(ColorPair.green_on_black.value, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(ColorPair.white_on_black.value, curses.COLOR_WHITE, curses.COLOR_BLACK)", "def _update_color(self):\n self._vertex_list.colors[:] = self._rgba * self._num_verts", "def mutate_color(color):\n color[random.randrange(0, 3)] = random.random() % 1\n return color", "def set_color_list(self, new_list):\n self.__clr_list = itertools.cycle(new_list)", "def populate_color(cls, values):\n\n color_name = values.get(\"color_name\")\n color_index = values.get(\"color_index\")\n palette = values.get(\"palette\")\n\n # Set a default if needed\n if palette is None:\n palette = \"vtint\"\n\n if (color_name is None) and (color_index is None):\n return values\n\n elif color_name is None:\n raise ValueError(\"'color_name' can't be null when 'color_index' is not null\")\n\n elif color_index is None:\n raise ValueError(\"'color_index' can't be null when 'color_name' is not null\")\n\n values[\"color\"] = get_colors((color_name, color_index), palette=palette)\n return values", "def extract_colors(self, palette, colors):\n return [palette[i:i + 3] for i in range(0, colors * 3, 3)]", "def __init__(self):\n\n super(ColorMap, self).__init__()\n self.by_id = dict()\n\n for color in [Color.white(), Color.black()]:\n self.push_color(color)\n\n # only black and white are added ny now\n self.black_and_white = True", "def setColors255(self):\n #productive\n profprint()\n self.color255= [[0,0,0] for i in range(205)]\n self.color255[0]=[221,108,158]\n self.color255[1]=[128,174,128]\n self.color255[2]=[241,214,145]\n self.color255[3]=[177,122,101]\n self.color255[4]=[111,184,210]\n self.color255[5]=[216,101,79]\n self.color255[6]=[221,130,101]\n self.color255[7]=[144,238,144]\n self.color255[8]=[192,104,88]\n self.color255[9]=[220,245,20]\n self.color255[10]=[78,63,0]\n self.color255[11]=[255,250,220]\n self.color255[12]=[230,220,70]\n self.color255[13]=[200,200,235]\n self.color255[14]=[250,250,210]\n self.color255[15]=[244,214,49]\n self.color255[16]=[0,151,206]\n self.color255[17]=[183,156,220]\n self.color255[18]=[183,214,211]\n self.color255[19]=[152,189,207]\n self.color255[20]=[178,212,242]\n self.color255[21]=[68,172,100]\n self.color255[22]=[111,197,131]\n self.color255[23]=[85,188,255]\n self.color255[24]=[0,145,30]\n self.color255[25]=[214,230,130]\n self.color255[26]=[218,255,255]\n self.color255[27]=[170,250,250]\n self.color255[28]=[140,224,228]\n self.color255[29]=[188,65,28]\n self.color255[30]=[216,191,216]\n self.color255[31]=[145,60,66]\n self.color255[32]=[150,98,83]\n self.color255[33]=[250,250,225]\n self.color255[34]=[200,200,215]\n self.color255[35]=[68,131,98]\n self.color255[36]=[83,146,164]\n self.color255[37]=[162,115,105]\n self.color255[38]=[141,93,137]\n self.color255[39]=[182,166,110]\n self.color255[40]=[188,135,166]\n self.color255[41]=[154,150,201]\n self.color255[42]=[177,140,190]\n self.color255[43]=[30,111,85]\n self.color255[44]=[210,157,166]\n self.color255[45]=[48,129,126]\n self.color255[46]=[98,153,112]\n self.color255[47]=[69,110,53]\n self.color255[48]=[166,113,137]\n self.color255[49]=[122,101,38]\n self.color255[50]=[253,135,192]\n self.color255[51]=[145,92,109]\n self.color255[52]=[46,101,131]\n self.color255[53]=[0,108,112]\n self.color255[54]=[127,150,88]\n self.color255[55]=[159,116,163]\n self.color255[56]=[125,102,154]\n self.color255[57]=[106,174,155]\n self.color255[58]=[154,146,83]\n self.color255[59]=[126,126,55]\n self.color255[60]=[201,160,133]\n self.color255[61]=[78,152,141]\n self.color255[62]=[174,140,103]\n self.color255[63]=[139,126,177]\n self.color255[64]=[148,120,72]\n self.color255[65]=[186,135,135]\n self.color255[66]=[99,106,24]\n self.color255[67]=[156,171,108]\n self.color255[68]=[64,123,147]\n self.color255[69]=[138,95,74]\n self.color255[70]=[97,113,158]\n self.color255[71]=[126,161,197]\n self.color255[72]=[194,195,164]\n self.color255[73]=[88,106,215]\n self.color255[74]=[82,174,128]\n self.color255[75]=[57,157,110]\n self.color255[76]=[60,143,83]\n self.color255[77]=[92,162,109]\n self.color255[78]=[255,244,209]\n self.color255[79]=[201,121,77]\n self.color255[80]=[70,163,117]\n self.color255[81]=[188,91,95]\n self.color255[82]=[166,84,94]\n self.color255[83]=[182,105,107]\n self.color255[84]=[229,147,118]\n self.color255[85]=[174,122,90]\n self.color255[86]=[201,112,73]\n self.color255[87]=[194,142,0]\n self.color255[88]=[241,213,144]\n self.color255[89]=[203,179,77]\n self.color255[90]=[229,204,109]\n self.color255[91]=[255,243,152]\n self.color255[92]=[209,185,85]\n self.color255[93]=[248,223,131]\n self.color255[94]=[255,230,138]\n self.color255[95]=[196,172,68]\n self.color255[96]=[255,255,167]\n self.color255[97]=[255,250,160]\n self.color255[98]=[255,237,145]\n self.color255[99]=[242,217,123]\n self.color255[100]=[222,198,101]\n self.color255[101]=[213,124,109]\n self.color255[102]=[184,105,108]\n self.color255[103]=[150,208,243]\n self.color255[104]=[62,162,114]\n self.color255[105]=[242,206,142]\n self.color255[106]=[250,210,139]\n self.color255[107]=[255,255,207]\n self.color255[108]=[182,228,255]\n self.color255[109]=[175,216,244]\n self.color255[110]=[197,165,145]\n self.color255[111]=[172,138,115]\n self.color255[112]=[202,164,140]\n self.color255[113]=[224,186,162]\n self.color255[114]=[255,245,217]\n self.color255[115]=[206,110,84]\n self.color255[116]=[210,115,89]\n self.color255[117]=[203,108,81]\n self.color255[118]=[233,138,112]\n self.color255[119]=[195,100,73]\n self.color255[120]=[181,85,57]\n self.color255[121]=[152,55,13]\n self.color255[122]=[159,63,27]\n self.color255[123]=[166,70,38]\n self.color255[124]=[218,123,97]\n self.color255[125]=[225,130,104]\n self.color255[126]=[224,97,76]\n self.color255[127]=[184,122,154]\n self.color255[128]=[211,171,143]\n self.color255[129]=[47,150,103]\n self.color255[130]=[173,121,88]\n self.color255[131]=[188,95,76]\n self.color255[132]=[255,239,172]\n self.color255[133]=[226,202,134]\n self.color255[134]=[253,232,158]\n self.color255[135]=[244,217,154]\n self.color255[136]=[205,179,108]\n self.color255[137]=[186,124,161]\n self.color255[138]=[255,255,220]\n self.color255[139]=[234,234,194]\n self.color255[140]=[204,142,178]\n self.color255[141]=[180,119,153]\n self.color255[142]=[216,132,105]\n self.color255[143]=[255,253,229]\n self.color255[144]=[205,167,142]\n self.color255[145]=[204,168,143]\n self.color255[146]=[255,224,199]\n self.color255[147]=[139,150,98]\n self.color255[148]=[249,180,111]\n self.color255[149]=[157,108,162]\n self.color255[150]=[203,136,116]\n self.color255[151]=[185,102,83]\n self.color255[152]=[247,182,164]\n self.color255[153]=[222,154,132]\n self.color255[154]=[124,186,223]\n self.color255[155]=[249,186,150]\n self.color255[156]=[244,170,147]\n self.color255[157]=[255,181,158]\n self.color255[158]=[255,190,165]\n self.color255[159]=[227,153,130]\n self.color255[160]=[213,141,113]\n self.color255[161]=[193,123,103]\n self.color255[162]=[216,146,127]\n self.color255[163]=[230,158,140]\n self.color255[164]=[245,172,147]\n self.color255[165]=[241,172,151]\n self.color255[166]=[177,124,92]\n self.color255[167]=[171,85,68]\n self.color255[168]=[217,198,131]\n self.color255[169]=[212,188,102]\n self.color255[170]=[185,135,134]\n self.color255[171]=[198,175,125]\n self.color255[172]=[194,98,79]\n self.color255[173]=[255,238,170]\n self.color255[174]=[206,111,93]\n self.color255[175]=[216,186,0]\n self.color255[176]=[255,226,77]\n self.color255[177]=[255,243,106]\n self.color255[178]=[255,234,92]\n self.color255[179]=[240,210,35]\n self.color255[180]=[224,194,0]\n self.color255[181]=[213,99,79]\n self.color255[182]=[217,102,81]\n self.color255[183]=[0,147,202]\n self.color255[184]=[0,122,171]\n self.color255[185]=[186,77,64]\n self.color255[186]=[240,255,30]\n self.color255[187]=[185,232,61]\n self.color255[188]=[0,226,255]\n self.color255[189]=[251,159,255]\n self.color255[190]=[230,169,29]\n self.color255[191]=[0,194,113]\n self.color255[192]=[104,160,249]\n self.color255[193]=[221,108,158]\n self.color255[194]=[137,142,0]\n self.color255[195]=[230,70,0]\n self.color255[196]=[0,147,0]\n self.color255[197]=[0,147,248]\n self.color255[198]=[231,0,206]\n self.color255[199]=[129,78,0]\n self.color255[200]=[0,116,0]\n self.color255[201]=[0,0,255]\n self.color255[202]=[157,0,0]\n self.color255[203]=[100,100,130]\n self.color255[204]=[205,205,100]\n \n return self.color255", "def assigning_colors():\n rgb_colors = {}\n for name, hex in matplotlib.colors.cnames.items():\n color = []\n # So the values are from 0-255 and not 0-1\n for i in matplotlib.colors.to_rgb(hex):\n color.append(int(i * 255))\n\n color = tuple(color)\n rgb_colors[name] = color\n\n return rgb_colors", "def set_colors(self, ):\n try:\n odd = self._parent.settings.get_key('interface.odd_color')\n even = self._parent.settings.get_key('interface.even_color')\n self.dialog.instruments.set_odd_color(odd)\n self.dialog.accounts.set_odd_color(odd)\n self.dialog.instruments.set_even_color(even)\n self.dialog.accounts.set_even_color(even)\n except od_exception_config_key_error:\n pass", "def color_picker(self):\n\n\t\tnum_to_select = 4\n\t\tpeg_color_list = [] #creates the list to store the peg object\n\n\t\t#write a for loop to set a loop to select 4 colors from SOLUTION in mm_model\n\t\tfor i in range(num_to_select): #use i just to run the loop, variable is not used elsewhere \n\t\t\t# print(i)\n\t\t\tcolor = random.choice(MasterModel.COLORS)\n\t\t\t# print(color)\n\t\t\t#associate color with peg objects\n\t\t\tpeg = ColorPeg(color)\n\n\t\t\t#append the peg_color list to make a list of peg objects \n\t\t\tpeg_color_list.append(peg)\n\t\t\t# print (peg_color_list)\n\t\t\n\t\t#create object for solution so it can be stored in model.py\n\t\tsolution = Guess(peg_color_list)\n\n\t\t#put solution into the self.guesses dictionary in the model\n\t\tself.model.guesses[\"solution\"] = solution\n\n\n\t\t#Testing Stuff:\n\t\t# for peg in peg_color_list:\n\t\t# \tprint(peg.peg_color)\n\n\t\t# print(self.model.guesses[\"solution\"])", "def clear_colors(self):\n for r in range(0, self.maze.get_nrows()):\n for c in range(0, self.maze.get_ncols()):\n self.set_color((r, c), 'white', draw=False)\n\n self.cvs.itemconfig(self.cvs_cells[self.maze.get_start_cell()],\n fill='green')\n self.cvs.itemconfig(self.cvs_cells[self.maze.get_end_cell()],\n fill='red')\n\n self.draw()", "def test_update_team_color(self):\n league = self.create_league()\n season = self.create_season(league)\n team = self.create_team(season)\n\n WHITE = \"ffffff\"\n\n #get original color for final testing later\n response = self.get_url(\n \"api-my-team-detail\",\n url_kwargs={\"team_id\": team.id}\n )\n old_color = response.data['color']\n self.assertNotEquals(old_color, WHITE)\n\n #update team color\n data = { 'color': WHITE }\n response = self.patch_url(\n \"api-my-team-detail\",\n url_kwargs={\"team_id\": team.id},\n data=data,\n )\n\n #confirm new color\n response = self.get_url(\n \"api-my-team-detail\",\n url_kwargs={\"team_id\": team.id}\n )\n new_color = response.data['color']\n self.assertNotEquals(old_color, new_color)\n self.assertEquals(WHITE, new_color)", "def set_color(self, color, filled):\n for cell in filled:\n self.board[cell[0], cell[1]] = color", "def assign_colors(data: List[EmissionPerCapita]) -> dict:\r\n colors = {}\r\n for emission in data:\r\n r = random.randint(1, 255)\r\n g = random.randint(1, 255)\r\n b = random.randint(1, 255)\r\n color = \"rgb(\" + str(r) + \",\" + str(g) + \",\" + str(b) + \")\"\r\n dict.update(colors, {emission.name: color})\r\n\r\n return colors", "def same_color(suit):\n\tif suit == 's':\n\t\treturn 'c'\n\telif suit == 'c':\n\t\treturn 's'\n\telif suit == 'd':\n\t\treturn 'h'\n\telif suit == 'h':\n\t\treturn 'd'", "def same_player(self, other):\n return self.name == other.name \\\n and self.color == other.color", "def set_all(self, color):\n for x in range(self.width):\n for y in range(self.height):\n self.set([x,y], color)", "def highlightWinners(self,screen,frame,midpos = (800,450)):\n for i, pos in enumerate(self.winnerPos):\n \n if i > self.nseeds-1:\n break\n drawPulsatingCirlce(screen,getOffsetPos(pos,midpos),frame,colour = self.winnerColour[i],size = 20,cycle_length = 60,magnitude = 0.8,reverse_alpha = True)\n \n #pygame.draw.circle(screen, self.winnerColour[i], getOffsetPos(pos,midpos), 10,2)\n #pygame.draw.circle(screen, self.winnerColour[i], getOffsetPos(pos,midpos), 20,2)", "def updateShoesColor(self, shoesColor): \n self.avatarConfiguration[\"shoes\"] = str(shoesColor)\n self.paintShoes()", "def __init__(self, player1, player2):\n # # players of the game {player1name: {color: , red_marbles:}}\n # self._players = {player1[0]: {\"name\": player1[0], \"color\": player1[1]},\n # player2[0]: {\"name\": player2[0], \"color\": player2[1]}}\n # # empty board, no marbles yet\n # self._board = self.create_board()\n # # current player's turn\n # self._turn = None\n # # winner state\n # self._winner = None\n # # red marbles captured for each player, needs addition of black and white marbles\n # self._captured = {player1[0]: 0, player2[0]: 0}\n pass", "def set_pattern(colors=('green', 'blue', 'red')): # (10)\n for i in range(0, int(ceil(float(NUM_LEDS)/float(len(colors))))):\n for color in colors:\n push_color(color)", "def color():\n\tcolors = ['b', 'r', 'g', 'k']\n\twhile True:\n\t\tfor c in colors:\n\t\t\tyield c", "def resetPalette(self):\n pal = (0,0,0)\n for i in range(1,256):\n pal += (i,i,i)\n if self._displayPjt:\n self._displayPjt.setColorPalette(pal)\n if self._displayUsr:\n self._displayUsr.setColorPalette(pal)\n if self._displayVtk:\n self._displayVtk.setColorPalette(pal)", "def colors(self):\n return self._colors", "def random_color():\n return random.choice(colors)", "def colors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetHeatmapColorArgs']]]]:\n return pulumi.get(self, \"colors\")", "def _random_color(self):\n levels = range(0, 256)\n return tuple(random.choice(levels) for _ in range(3))", "def change_color(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if color_mode == 'RGB':\n color_red = random.randint(-25,25)\n color_green = random.randint(-25,25)\n color_blue = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = (color[0]+color_red,color[1]+color_green,color[2]+color_blue)\n else: #color_mode == 'L':\n color_diff = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = color+color_diff\n mutated_genome[index][0] = newcolor", "def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_score = max(scores)\n victors = []\n for p in self.state.get_players():\n if p.get_color() not in self.violators and p.get_score() == max_score:\n victors.append(self.players[p.get_color()])\n return victors\n else:\n return None", "def set_all(self, red, green, blue):\n self._set_all(red, green, blue)", "def change(widget, colors): \n\t\n new_val = '#'\n for name in ('red', 'green', 'blue'):\n new_val += colors[name].get()\n widget['bg'] = new_val", "def test_issue_269(self):\n\n c = pygame.Color(0)\n c.hsva = 360, 0, 0, 0\n self.assertEqual(c.hsva, (0, 0, 0, 0))\n c.hsva = 360, 100, 100, 100\n self.assertEqual(c.hsva, (0, 100, 100, 100))\n self.assertEqual(c, (255, 0, 0, 255))", "def randcolor():\n return (randint(0,255), randint(0,255), randint(0,255))", "def get_state(self):\n return np.append(self.game.game_board.get_board(),\n [self.game.player_1.color, self.game.player_2.color])[None, :]", "def change_colors(self, interval):\n for shape in self.shapes:\n shape.set_pen_color(choice(Color.PALETTE)).set_fill_color(\n choice(Color.PALETTE)\n )", "def setColourLevels(self):\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n brightness = self.brightnessSlider.value()\n contrast = self.contrastSlider.value()\n colourStart = (brightness / 100.0 * contrast / 100.0) * (maxsg - minsg) + minsg\n colourEnd = (maxsg - minsg) * (1.0 - contrast / 100.0) + colourStart\n for btn in self.picbuttons:\n btn.stopPlayback()\n btn.setImage(self.lut, colourStart, colourEnd, False)\n btn.update()", "def set_colors(cls, font, background, foreground) -> None:\n\n for index in range(256):\n color = blend_color(background, foreground, font[NRM][index])\n font[BMP].set_palette_at(index, color)\n\n font[BFC] = (background, foreground)" ]
[ "0.6983501", "0.661778", "0.65605044", "0.6559854", "0.65476024", "0.65333056", "0.64143264", "0.613175", "0.6107858", "0.60999984", "0.6061564", "0.59373385", "0.58590215", "0.58403534", "0.58192056", "0.57921976", "0.5781316", "0.5771787", "0.5770856", "0.5764907", "0.57404304", "0.5736701", "0.5667599", "0.5636704", "0.5633197", "0.5618592", "0.5616111", "0.56148463", "0.5591083", "0.5584227", "0.557936", "0.55669665", "0.5566862", "0.5565828", "0.5565828", "0.5552508", "0.55464476", "0.55434746", "0.554153", "0.5527679", "0.55176437", "0.5512932", "0.5512891", "0.55125606", "0.5510945", "0.5509823", "0.54989374", "0.5489773", "0.5484984", "0.54792124", "0.54680943", "0.5466759", "0.54629725", "0.5457748", "0.54535085", "0.5451428", "0.54512125", "0.545049", "0.5432485", "0.5428812", "0.5419856", "0.541888", "0.5417253", "0.54112387", "0.5409022", "0.54072976", "0.5405613", "0.5401837", "0.5397934", "0.5397869", "0.5392659", "0.5368894", "0.5361293", "0.53515095", "0.53432524", "0.53432506", "0.533221", "0.5331021", "0.5322097", "0.532075", "0.53158456", "0.5311905", "0.5307991", "0.52973586", "0.5291605", "0.52908355", "0.528141", "0.5280638", "0.5274651", "0.52735096", "0.5267975", "0.52646965", "0.52638763", "0.52627575", "0.52611005", "0.52586156", "0.5246342", "0.523956", "0.5239183", "0.52384317" ]
0.848916
0
Remove a player of a given color from the game along with his penguins and add him to the violator list.
def __remove_player(self, color): self.state.remove_player(color) self.violators.append(self.players[color])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removePlayer(self, color):\n if type(color) not in (player.Player, int):\n raise TypeError(\"Input to removePlayer must be of type int or Player.\")\n if type(color) is player.Player:\n color = color.getColor()\n if color not in self.__colordict__:\n raise IndexError(\"The input color/player was not found in the Rotator.\")\n # Empty the Rotator if this is the last node\n if len(self.__colordict__.keys()) <= 1:\n self.__colordict__ = {}\n self.__currentnode__ = None\n return\n if self.__currentnode__.getPlayer().getColor() == color:\n self.__currentnode__ = self.__currentnode__.getNext()\n node_to_remove = self.__colordict__[color]\n # Connect the previous node to the next node\n previous_node = node_to_remove.getPrevious()\n next_node = node_to_remove.getNext()\n previous_node.connectForward(next_node)\n next_node.connectBackward(previous_node)\n self.__colordict__.pop(color, None)", "def remove_player_from_game(self, player):\n if player in self.players:\n cards = player.cards\n for card in cards:\n self.cards.append(card)\n\n self.__shuffle_cards()\n player.cards = []\n if player == self.current_player:\n self.choose_next_player()\n\n self.players.remove(player)", "def remove_player(self, player):\n\t\tself.players.remove(player)", "def _remove_player(self, player, player_party, other_party):\n\n party = vars(self)[player_party][:]\n party.remove(player)\n vars(self)[player_party].remove(player)\n for other in vars(self)[other_party]:\n if player in other.prefs:\n other.forget(player)", "def removePlayer(self, index):\n\n self.eloList.pop(index)\n self.idList.pop(index)", "def removePlayer(self, index):\n serial = self.seats[index]\n self.seats[index]=0\n if serial in self.players:\n del self.players[serial]", "def cull(self) -> None:\n for player in self.players:\n to_remove = [creature for creature in player.battle_line if creature.damage_taken >= creature.power()]\n for creature in to_remove:\n player.battle_line.remove(creature)\n to_remove.destroyed(self, creature)", "def removeFromPlayerList(self):\n\t\tfor x in self.playerRemoveList:\n\t\t\tself.removePlayer(x)", "def updatePlayer(self, _player):\n if _player.color == 'black': self.players['black'] = _player\n else: self.players['white'] = _player", "def removePlayer(self, userid):\r\n userid = int(userid)\r\n if self.__contains__(userid):\r\n del self.players[userid].command \r\n # we have to manually delete the underlying object so we have no other references to PlayerObject class.\r\n del self.players[userid] # calls deconstructor on PlayerObject class\r", "def removePlayer(self, player):\n\t\tfor i in range(len(self.playerList)):\n\t\t\tif self.playerList [i] == player:\n\t\t\t\tself.playerList[i] = None\n\t\t\t\treturn", "def delete_gkeeper(alist):\n\n res = [player for player in alist if player[2] != ['Por']]\n\n return res", "async def deluser(self, ctx, member: discord.Member):\r\n for k, v in player.items():\r\n if k == member.name:\r\n del player[k]\r\n cur.execute(\"DELETE FROM players WHERE name=%s\", [k])\r\n conn.commit()\r\n await ctx.send(k + ' has been removed from the player-base')\r\n break", "def remove_player(self, player):\r\n print(\"REMOVING\", player)\r\n player_index = self.get_player_index(player)\r\n\r\n # if we are the current player, move back the index once\r\n if self.current_player == player_index:\r\n self.current_player -= 1\r\n if self.current_player < 0:\r\n self.current_player = len(self.player_list) - 2\r\n\r\n self.player_positions.pop(player_index)\r\n self.player_list.pop(player_index)\r\n\r\n # TODO: put any cards owned by the player back in to the cards list\r", "def delPlayer(self, idx):\n self.players[idx:idx + 1] = []", "def eat_fruit(self):\r\n self.app.fruit.remove(self.grid_pos)\r\n self.current_score += 5", "def remove_from_hand(self):\n pass", "def end_game(self,player,color):\n black = (0, 0, 0)\n font = pygame.font.Font(os.path.join(os.path.dirname(os.path.realpath(__file__)),'TRON.TTF'), 25)\n label1= font.render(player + \" WINS!\", 1, color)\n label2 = font.render(\"Press Space to Restart\", 1, (255,255,255))\n self.screen.fill(black)\n self.screen.blit(label1,(185,100))\n self.screen.blit(label2,(43,200))\n pygame.display.flip()\n self.game_over = True\n for player in self.players:\n player.dir = \"None\"", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def hit(hand=bj.player1.hand):\r\n hand.append(bj.deck.remove_card())", "def removePlayer(self, player):\n self.players.remove(player)\n for observer in self.observers:\n observer.playerRemoved(player)", "def __delitem__(self, userid):\r\n self.removePlayer(userid)", "def remove_player(lst,player):\n print(\"Removing\",player)\n cursor=lst.head\n while cursor.data!=player:\n cursor=cursor.next\n if cursor==lst.head:\n cursor.next.prev=lst.tail\n cursor.prev.next=cursor.next\n lst.head=cursor.next\n if cursor==lst.tail:\n cursor.next.prev=cursor.prev\n cursor.prev.next=lst.head\n lst.tail=cursor.prev\n cursor.prev.next=cursor.next\n cursor.next.prev=cursor.prev\n lst.size-=1", "def removePlayer(self, player):\n #if (not self.__configuring) and (player in self.__players):\n if (player in self.__players):\n self.__players.remove(player)\n for event in self.__events:\n if player in event:\n del event[player]\n player.unbind(self)", "def _remove_joker(hand, joker):\n\n hand.remove(joker)", "def remove(self, pair):\n\n for plug in self.plugleads:\n if plug.pair == pair:\n self.plugleads.remove(plug)", "async def tod_remove(self, ctx, *args):\n if \"all\" in args:\n for user in self.players:\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await user.remove_roles(role)\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"truth-or-dare\"):\n await channel.delete()\n break\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"secret-voice\"):\n await channel.delete()\n break\n self.players = []\n message = \"All players removed from the game!\"\n await ctx.send(message)\n return\n\n for name in args:\n message = \"\"\n size = len(self.players)\n for user in self.players:\n if name == user.mention:\n self.players.remove(user)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await user.remove_roles(role)\n message = f\"{name} removed from the game!\"\n if size == len(self.players):\n message = \"Player not in the game! Check command syntax.\"\n await ctx.send(message)", "def remove_player(self, seat_id):\n player_id = seat_id\n try:\n idx = self._seats.index(self._player_dict[player_id])\n self._seats[idx] = Player(0, stack=0, emptyplayer=True)\n del self._player_dict[player_id]\n self.emptyseats += 1\n except ValueError:\n pass", "def remove_card_from_player(self, card, player):\n if player in self.players:\n if card in player.cards:\n player.cards.remove(card)", "def remove_player(self, room_code: str, player_name: str) -> GameInfo:\n game = self.read_game(room_code)\n new_player_list = game.players.copy()\n\n matched_players = [\n player for player in game.players if player.name == player_name\n ]\n if len(matched_players) == 0:\n return game\n (player,) = matched_players\n\n new_player_list.remove(player)\n\n turn_player_name = game.turn_player_name\n if turn_player_name == player.name:\n # Pass to the next player\n player_index = game.players.index(player)\n if len(new_player_list) == 0:\n turn_player_name = None\n else:\n turn_player = new_player_list[player_index % len(new_player_list)]\n turn_player_name = turn_player.name\n\n self.games_table.update_item(\n Key={\"room_code\": room_code},\n UpdateExpression=(\"set turn_player_name=:t, players=:p\"),\n ExpressionAttributeValues={\n \":t\": turn_player_name,\n \":p\": [player.dict() for player in new_player_list],\n },\n ConditionExpression=Attr(\"players\").eq(game.dict()[\"players\"]),\n )\n\n self._determine_winner(room_code)\n\n return self.read_game(room_code)", "def player_card(self):\n card = random.choice(self.hand.cards)\n self.hand.cards.remove(card)\n print(f\"{self.name}:{card!r:<3} \", end=\"\")\n return card", "async def remove(ctx, pkmn_id: int):\n res = database.remove_from_party(ctx.message.author, pkmn_id)\n if not res:\n ctx.send(\"**Oak**: Make sure you actually have that pokemon or if your party is not full ya scrub.\")\n return await show_party(ctx.message.author)", "def remove(self, game_obj):\r\n self.game_objects_for_removal.append(game_obj)", "def remove(self):\r\n game_ref.remove(self)", "def func(self):\n char = self.character\n clothing = char.search(self.args, candidates=char.contents)\n if not clothing:\n return\n if not clothing.db.worn:\n char.msg(\"You're not wearing that!\")\n return\n if clothing.db.covered_by:\n char.msg(\"You have to take off %s first.\" % clothing.db.covered_by.name)\n return\n clothing.remove(char)", "def mine_remove(x, y):\n click.echo('Removed mine at %s,%s' % (x, y))", "async def remove(self, ctx, game):\n\n user = ctx.message.author\n\n if remove(game, user.id):\n await self.bot.say(\"{}, {} was removed from your library.\".format(user.mention, game))\n else:\n await self.bot.say(\"{}, you do not have this game in your library.\".format(user.mention))", "def removePlayerMoney(self, player, amount):\n\t\tplayer.removeMoney(amount)\n\t\tif player.money == 0:\n\t\t\tself.allinQ.append(player.name)", "def remove_item(player, name):\n for index, gear in enumerate(player[\"inventory\"]):\n if gear[\"type\"] == name:\n if 'quantity' in gear.keys():\n gear['quantity'] -= 1\n if gear['quantity'] < 1:\n del player['inventory'][index]\n else:\n del player['inventory'][index]\n\n return True\n\n return False", "def drop(self):\n Game.instance.area_map.entities.append(self.owner)\n Game.instance.inventory.remove(self.owner)\n self.owner.x = Game.instance.player.x\n self.owner.y = Game.instance.player.y\n message('You dropped a ' + self.owner.name + '.', palette.yellow)", "async def removefrom(self, ctx, game, user):\n\n if remove(game, user.id):\n await self.bot.say(\"{} was removed from {}'s' library.\".format(game, user.nick))\n else:\n await self.bot.say(\"{} does not have this game in their library.\".format(user.nick))", "def remove_colors(ingredient):\n colors = [\"yellow\", \"purple\", \"green\", \"black\",\n \"purple\", \"white\", \"red\"]\n no_colors = [gram for gram in ingredient.split(\" \") if gram not in colors]\n colorless_string = \" \".join(no_colors)\n return colorless_string", "def remove_player(self, player):\n super().remove_player(player)\n if self._waiting_for_players:\n if self.__current_player == player:\n self.player_done(player)", "def _onRemove(self, event):\n index = self.colorlist.GetSelection()\n del self.graphColors[index]\n self._tupleListToStrings()\n if len(self.graphColors) > 0:\n self.colorlist.SetSelection(0)\n self._updateButtons(None)", "def __removeLured(self, suitId):\n if self.__suitIsLured(suitId):\n del self.currentlyLuredSuits[suitId]", "def remove_judge(contest, user):\n _remove_role(contest, user, pcm.Judge)", "def remove_piece(self, piece):\n if piece.color not in (PieceColor.BLACK, PieceColor.WHITE):\n raise InternalErrorException(\"cant remove a piece with no color\")\n x, y = piece.position.x, piece.position.y\n # set placeholder in its place\n self._rubrics[x][y] = PlaceHolder(piece.position)\n\n # keep track of which pieces were removed\n if piece.name in self._removed_pieces[piece.color]:\n raise InternalErrorException(\"cant remove piece %s - already removed\" % piece.name)\n self._removed_pieces[piece.color][piece.name] = piece\n self._pieces[piece.color].pop(piece.name)", "def hit():\r\n new_card = deck[random.randint(1, len(deck))]\r\n deck.remove(new_card)\r\n return new_card", "def remove_matches(game: List[int], row: int, matches: int) -> None:\n game[row] -= matches", "def drop(self, pitem):\n\n #if the item is not inside the item list, can't drop it \n if pitem not in self.items:\n print('The player does not carry the item')\n\n #if not, remove the item \n else:\n self.items.remove(pitem)", "def colorWipe(self, color):\r\n #color = Color(R,G,B)\r\n for i in range(self.strip.numPixels()):\r\n self.strip.setPixelColor(i, color)\r\n self.strip.show()", "def play(self, player, game):\n player.get_hand().get_cards().remove(self)\n card = game.pick_card()\n player.get_hand().add_cards(card)\n game.set_action(\"NO_ACTION\")", "def take_remove_tile_turn(self, remove_tile_fxn):\n tilesAroundOpponents = []\n for player in self.board.players:\n if not player == self.player:\n x, y = player.x, player.y\n nearbyTiles = self.board.get_removable_tiles_around(x, y)\n tilesAroundOpponents.extend(nearbyTiles)\n tilesAroundOpponents = set(tilesAroundOpponents)\n x, y = self.player.x, self.player.y\n tilesAroundMe = set(self.board.get_removable_tiles_around(x, y)) # tiles around controlled player (me)\n safelyAroundOpponents = list(tilesAroundOpponents - tilesAroundMe) # tiles around opponents but not around me\n removableTiles = set(self.board.get_all_open_removable_tiles()) # all removable tiles\n safelyRemovable = list(removableTiles - tilesAroundMe) # all removable tiles except those around me\n try:\n if safelyAroundOpponents:\n target = random.choice(safelyAroundOpponents)\n elif tilesAroundOpponents: # likely that I'm next to other player. I'll have to remove a tile available for both of us\n target = random.choice(list(tilesAroundOpponents))\n else: # no open spots to remove around players can only happen if solid unremovable tiles exist\n target = random.choice(safelyRemovable)\n except IndexError: # this error will catch if last else statement possibly triggered it\n super(TileRemoveBot, self).take_remove_tile_turn(remove_tile_fxn)\n return\n remove_tile_fxn(target.x, target.y)", "def remove():", "def point_assigner_loss(self, group, player_sprites):\n # Grab playersprite\n if group != {}:\n for player in group:\n player.reset()\n player_sprites.add(player)", "def pick(self, inv, pl, group):\r\n if self.rect.colliderect(pl):\r\n group.remove(self)\r\n if inv.count('key') == 0:\r\n inv += ['key']\r\n music_acceptor.activatedPortalSound()", "def remove_uredjaj(self, naziv):\n self.uredjaji.pop(naziv, 0)", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "async def tod_leave(self, ctx, *args):\n try:\n self.players.remove(ctx.author)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.remove_roles(role)\n except ValueError:\n pass\n message = f\"{ctx.author.mention} has been removed from the game!\"\n await ctx.send(message)", "def change_PU(self, timer):\n for i in self.get_PU():\n i.y+=i.get_velocity()\n if i.y<=powerup_HEIGHT:\n self.get_PU().remove(i)", "def delete_habit():\n analytics.remove_habit('Play Piano')", "def removeIfDead(self):\n global HP, winColor, FPS, kills\n if self.health <= 0:\n if self.rank == \"firerate\":\n if P.boosts == 1:\n P.timer = 600\n else:\n P.boosts += 1\n\n if self.rank == \"healer\":\n if P.medkits == 1:\n HP = 100\n else:\n P.medkits += 1\n\n if self.rank == \"quadshot\":\n P.quadshot = True\n P.quadshottimer = 300\n FPS = 100\n\n if self.rank == \"helper\":\n if self.firsttime:\n self.image = pygame.transform.rotate(self.image, 180)\n self.firsttime = False\n self.y -= self.vel*3\n if self.y <= 0:\n del enemies[findPlace(self, enemies)]\n if yn(Frame, 3):\n projectiles.append(projectile(self.x+self.w+2, self.y+self.h//2, 8, yvel=0, r=True, l=False))\n projectiles.append(projectile(self.x-42, self.y+self.h//2, -8, yvel=0, r=False, l=True))\n else:\n del enemies[findPlace(self, enemies)]\n kills += 1", "def play(self, player, game):\n player.get_hand().get_cards().remove(self)\n card = game.pick_card()\n player.get_hand().add_cards(card)\n game.next_player()\n game.set_action(\"NO_ACTION\")", "def __remove_card(self, from_col, from_row) -> None:\n self.solitaire[from_col, from_row] = 0", "def addPlayer(self, name, color):\n logging.info(\"New player [%s, %s]\", name, color)\n self.players.append(player.Player(name, color))", "def newLogoff(self, playerID):\n if playerID in self.idList:\n playerIndex = self.idList.index(playerID)\n self.removePlayer(playerIndex)", "def remove_hero(self, name):\n if self.heroes != []:\n for hero in self.heroes:\n if name in hero.name:\n self.heroes.remove(hero)\n else:\n return 0\n return 0", "async def remove_player(ctx, group_name: str, player_name: str, owner: str=None):\n\n if owner and owner != ctx.message.author.name:\n if ctx.message.author.id != bot.owner_id:\n await ctx.send(\"Sorry, you don't have permission to modify that group. Nerd.\")\n else:\n owner = ctx.message.author.name\n \n if owner in bg_bot.manager.groups:\n for group in bg_bot.manager.groups[owner]['groups']:\n if group.name == group_name:\n if group.remove_member(player_name):\n response = f'Removed {player_name} from {group_name} successfully!'\n break\n else:\n response = \"Error removing player!\"\n break\n\n else:\n response = \"No groups exist that match the input criteria.\"\n \n await ctx.send(response)", "def dealPlayer(self, player):\n self.send(Message(text=\"You have been dealt: \" + str(player.hand)), thread_id=player.fbid, thread_type=ThreadType.USER)", "def appendPlayer(self, new_player):\n if type(new_player) is not player.Player:\n raise TypeError(\"Can only append objects of type Player.\")\n # Insert the player with the input color into __colordict__\n color = new_player.getColor()\n if color in self.__colordict__:\n raise IndexError(\"The color of the appended player (\" + cell.getColorString(color) +\n \") is already in the Rotator.\")\n newnode = __rotationnode__(new_player)\n self.__colordict__[color] = newnode\n # Link the node\n if self.__currentnode__ is None:\n self.__currentnode__ = newnode\n newnode.connectForward(newnode)\n newnode.connectBackward(newnode)\n self.__initial_color__ = new_player.getColor()\n else:\n former_prevnode = self.__currentnode__.getPrevious()\n newnode.connectForward(self.__currentnode__)\n newnode.connectBackward(former_prevnode)\n self.__currentnode__.connectBackward(newnode)\n former_prevnode.connectForward(newnode)", "def detached(self, mind):\n self.remote = None\n players.remove(self)", "def remove_player_from_pending(self, player_email=None, session=None):\n self.pending_players.remove(player_email)\n session.query(Game).filter(Game.id == self.game_id).update(\n {'pending_players': json.dumps({'pendingPlayers': self.pending_players})}\n )\n session.commit()", "def penalisePlayer(player,active_players,rounds):\r\n ghost_players = {}\r\n active_players[player] += 1\r\n if active_players[player] == len('ghost'):##\r\n ghost_players[player] = ghost_players.get(player,rounds)\r\n del active_players[player]\r\n print \" Boom! you are now a ghost\"\r\n return ghost_players\r\n print \" Your status is: \" + 'ghost'[:active_players[player]]\r\n return None", "def drop(self, command):\n \n for item in self.inventory:\n if item.name == command[1]:\n self.location.inventory.append(item)\n self.inventory.remove(item)\n print(\"You dropped a\", item.name)\n return \n print(command[1] + \" is not here!\")", "def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_score = max(scores)\n victors = []\n for p in self.state.get_players():\n if p.get_color() not in self.violators and p.get_score() == max_score:\n victors.append(self.players[p.get_color()])\n return victors\n else:\n return None", "def get_player_squares(self, player: PlayerColor) -> List[Square]:\r\n return [square for square in self.squares.values() if\r\n square.state == SquareState.OCCUPIED\r\n and square.occupant.owner == player]", "def player_discard(self, inpt):\n \n if inpt.isdigit() == False:\n return 0\n if int(inpt) > len(self.player_hand):\n print(\"\\nNumber of card entered is greater than number of cards\")\n print(\"Please try again \\n\")\n return 0\n if self.player_hand[int(inpt)-1][1] == '8':\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n while self.new_suit not in ['h','d','s','c']:\n self.new_suit = input(\"Please enter new suit: h, d, s, c\\n\")\n print(\"\\nNew suit is: \", self.new_suit)\n return 1\n if self.new_suit != '':\n if self.player_hand[int(inpt)-1][0] == self.new_suit:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n return 1\n else:\n print(\"\\nYou need to match new suit\")\n print(\"Please try again\\n\")\n return 0\n if self.new_suit == '':\n if self.player_hand[int(inpt)-1][0] == self.discard_pile[0] or \\\n self.player_hand[int(inpt)-1][1] == self.discard_pile[1]:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n return 1\n else:\n print(\"\\nYou need to match discard pile card suit or rank\")\n print(\"Please try again\\n\")\n return 0", "def get_opponent_color(self, mycolor):\n if mycolor == ChessGame.BLACK:\n return ChessGame.WHITE\n elif mycolor == ChessGame.WHITE:\n return ChessGame.BLACK\n else:\n raise NotImplementedError()", "def on_removeuser(self, username):\n self.users.remove(username)\n print ('%s left the room.' % username)", "def remove_from_winning(game: List[int]) -> None:\n while True:\n game_copy = game.copy()\n row = randint(1, len(game_copy))\n if game_copy[row-1] < 1:\n continue\n matches = randint(1, game_copy[row-1])\n remove_matches(game_copy, row - 1, matches)\n if not is_winning(game_copy):\n remove_matches(game, row - 1, matches)\n break\n print(\"{} matches on row {} have been removed.\".format(matches, row))", "def clearList(self):\r\n self.players.clear()", "def dropObject(player):\n for treasure in Treasure.List:\n if player.treasureCaptured:\n player.treasureCaptured = False\n treasure.x = player.x\n treasure.y = player.y\n treasure.img = pygame.image.load(Treasure.treasure_img[0])", "def remove_guest(self, key: int):\r\n if self.has_guest(key):\r\n self.guests.pop(key)", "async def _kill_player(self, ctx: Context, *, user: discord.Member):\n\n guild = ctx.guild\n\n player_id = await self.config.guild(guild).player_id()\n player_role = discord.utils.get(guild.roles, id=player_id)\n\n if player_role not in user.roles:\n return await ctx.send(_(\"User doesn't have player role.\"))\n\n try:\n await user.remove_roles(player_role)\n except discord.Forbidden:\n return await ctx.send(\n _(\n \"I either don't have permissions to manage\"\n \" roles or the `{}` role is above my highest role!\"\n ).format(player_role.name)\n )\n\n dead_id = await self.config.guild(guild).dead_id()\n dead_role = discord.utils.get(guild.roles, id=dead_id)\n\n await user.add_roles(dead_role)\n\n await ctx.message.add_reaction(CHECK_MARK)", "def opponent(self, player):\r\n # player = core.BLACK (can do this for any static var)\r\n if player == core.BLACK:\r\n return core.WHITE\r\n else:\r\n return core.BLACK", "async def delete_player_status(user_id):\n await ex.conn.execute(\"DELETE FROM blackjack.currentstatus WHERE userid = $1\", user_id)", "def remove(self, idx_):\n self._dice.pop(idx_)", "def _decrease_lives(self, player):\n player.lives -= 1\n if player.lives:\n self.dead_player = True\n player.is_alive = False\n else:\n self.game_over = True", "def opponent(player):\n return BLACK if player is WHITE else WHITE", "def remove_enemy_from_list(name):\n\n def write_to_file(data):\n with open(\"data.json\", \"w\") as file2:\n json.dump(data, file2, indent=4)\n\n with open(\"data.json\", \"r\") as file:\n data = json.load(file)\n for _name in data[\"enemy_data\"][\"names\"]:\n if name == _name:\n data[\"enemy_data\"][\"names\"].remove(name)\n write_to_file(data)\n return True\n \n return False", "def remove(self, user_id):\n pass", "def player_left(self, player):\n # might not be able to use player as key in dict\n if player in self.player_scores:\n del self.player_scores[player]\n\n if player in self.player_guessed:\n self.player_guessed.remove(player)\n\n if player == self.player_drawing:\n self.chat.update_chat(\"Round has been skipped because the drawer left.\")\n self.end_round(\"Drawing player leaves\")", "def remove(self, egg):", "def init_players(self):\n complain = \"\"\n players_turn = random.sample(range(self.n_players), self.n_players)\n players_created = {}\n picked_colors = []\n for x in range(self.n_players):\n while True:\n clear_output()\n try:\n color = input(\n f\"{complain}Player {x+1}, please type in one of the following colors: ({', '.join([x.capitalize() for x in self.world.player_colors if x not in picked_colors])}):\\n\").lower()\n if color in self.world.player_colors and color not in picked_colors:\n picked_colors.append(color)\n players_created[players_turn[x]] = Player(\n color.capitalize(), self.start_troops)\n break\n else:\n complain = \"Please enter a valid color\\n\"\n except:\n pass\n\n self.players = [players_created[y] for x in range(\n self.n_players) for y in players_created.keys() if int(y) == x]", "def remove(self, xcord, ycord, g_board):\n for i in range(xcord, xcord + 2):\n for j in range(ycord, ycord + self.size):\n g_board[i][j] = ' '", "def replace_card(player_name,cards_to_replace):\t\t#player_name= card list of player; cards_to_list= list of cards, player wants to replace\n\tburn_card()\t#remove 1st card from deck\n\tfor i in range(len(cards_to_replace)):\t\n\t\tif cards_to_replace[i] in player_name:\n\t\t\tplayer_name.remove(cards_to_replace[i])\n\t\t\tplayer_name.append(my_deck.draw())\n\treturn player_name", "def pick(self, inv, pl, group, sc):\r\n if self.rect.colliderect(pl) and not self.used:\r\n group.remove(self)\r\n inv += ['score {}'.format(id(self))]\r\n sc += [sc[len(sc) - 1] + 100]\r\n self.used = True", "def join_player(self, data, user):\n self.remove(user)\n\n user.room = \"100\"\n user.x = \"0\"\n user.y = \"0\"\n user.frame = \"0\"\n\n self.add(user)", "def eat_coin(self):\r\n self.app.coins.remove(self.grid_pos)\r\n self.current_score += 1", "def removePlayer(df, name):\n if name in getPlayerList(df):\n df = df[df.name != name]\n return df" ]
[ "0.7096801", "0.6246487", "0.6242192", "0.59200877", "0.5919842", "0.5817426", "0.57871246", "0.57752347", "0.57319325", "0.57220024", "0.57121086", "0.5706811", "0.5705507", "0.5691792", "0.56409585", "0.564011", "0.5619549", "0.5613825", "0.55753464", "0.5562794", "0.55148274", "0.55026793", "0.54680395", "0.5456231", "0.5450543", "0.54256123", "0.5417046", "0.53862697", "0.5357478", "0.5320001", "0.53069156", "0.53068614", "0.525895", "0.52522177", "0.52307403", "0.52188253", "0.52033097", "0.5201436", "0.51858103", "0.51781625", "0.5164771", "0.51588804", "0.5148796", "0.5143083", "0.51348895", "0.5133951", "0.5125295", "0.511719", "0.5115508", "0.50947315", "0.5094373", "0.508944", "0.50815314", "0.507846", "0.5074211", "0.5051577", "0.50476164", "0.503434", "0.50341326", "0.5026009", "0.50217146", "0.50204355", "0.501793", "0.5015645", "0.50111824", "0.499753", "0.49940932", "0.49910703", "0.49904275", "0.49755815", "0.4971113", "0.49671587", "0.49668267", "0.49611846", "0.49565184", "0.49546066", "0.49486858", "0.49448976", "0.49446413", "0.49419078", "0.49406132", "0.49326235", "0.4932421", "0.49301594", "0.49273205", "0.4925613", "0.4922672", "0.4922339", "0.4920815", "0.49082878", "0.49080923", "0.49066326", "0.49053758", "0.48984042", "0.48925635", "0.48843524", "0.4883125", "0.48804635", "0.48726013", "0.48697612" ]
0.8311631
0
Helper to put threaded function return value in a queue. Puts None in the queue if an exception occurs.
def __player_thread(self, func, arg, queue): try: queue.put(func(arg)) except Exception as exc: #print(exc) queue.put(None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReturnWrapper(queue, fn):\n queue.put(fn())", "def queue_wrapper(result_queue, wid,\n func, args):\n result_queue.put((wid, func(*args)))", "def putting_on_queue(*args):\n results.put(main_func(*args))", "def run(self, arg: _T) -> queue.Queue[Tuple[Optional[_R], Optional[Exception]]]:\n self.cancel()\n\n result_queue: queue.Queue[\n Tuple[Optional[_R], Optional[Exception]]\n ] = queue.Queue(1)\n\n def inner() -> None:\n try:\n result = self.target(self.__cancel, arg)\n\n # This is only going to happen if the callable never checks the cancelation Event\n if self.__cancel.is_set():\n raise CancelledException()\n\n result_queue.put((result, None))\n except Exception as exception:\n result_queue.put((None, exception))\n\n thread = threading.Thread(name=self.name, target=inner, daemon=True)\n with self._lock:\n self.__thread = thread\n thread.start()\n\n return result_queue", "def _process_run(queue: Queue, func: Callable[[Any], Any] = None,\n *args, **kwargs):\n queue.put(func(*args, **kwargs))", "def queue(self, func, *args, **kwargs):\n return self.event_queue.put((func, args, kwargs))", "def wrapper(func, retvals_queue, chunk):\n return_value = func(*chunk['args'], **chunk['kwargs'])\n retvals_queue.put(dict(indices=chunk['indices'],\n return_value=return_value))", "def enqueue(self, func):\n self.queue.put(func)", "def ex(self, fn, *args, **kwargs):\n if len(args) == 0 and len(kwargs) == 0:\n self.down_queue.put(fn)\n else:\n\n def closure():\n return fn(*args, **kwargs)\n\n self.down_queue.put(closure)", "def _process_worker(call_queue, result_queue):\n while True:\n call_item = call_queue.get(block=True)\n if call_item is None:\n # Wake up queue management thread\n result_queue.put(os.getpid())\n return\n try:\n r = call_item.fn(*call_item.args, **call_item.kwargs)\n except BaseException as e:\n exc = _ExceptionWithTraceback(e, e.__traceback__)\n result_queue.put(_ResultItem(call_item.work_id, exception=exc))\n logger.exception(e) # 主要是直接显示错误。\n else:\n result_queue.put(_ResultItem(call_item.work_id,\n result=r))", "def handler(obj, testcase, queue):\n try:\n queue.put((None, testcase(obj)))\n except:\n queue.put((sys.exc_info(), None))", "def rec_from_queue( self, ):\r\n try:\r\n action, function, function_args = self.queue_fr_helper.get_nowait()\r\n except queue.Empty:\r\n action = \"\"\r\n function = None\r\n function_args = None\r\n\r\n return ( action, function, function_args )", "def get_nowait(self) -> _T:\n self._consume_expired()\n if self._putters:\n assert self.full(), \"queue not full, why are putters waiting?\"\n item, putter = self._putters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(putter, None)\n return self._get()\n elif self.qsize():\n return self._get()\n else:\n raise QueueEmpty", "def _thread_run_for_result(future, func, *args):\n result = func(future, *args)\n future._set_result(result)", "def apply(self, external_callable, *args, **kwargs):\n self.work_request_queue.put((external_callable, args, kwargs))\n return self.result_queue.get()", "def test_execution(self):\r\n a_thread = workerthread.WorkerThread(exception_queue=self.exception_queue,\r\n return_queue=self.message_queue,\r\n target=self.sample_function, args=(1, 2))\r\n a_thread.start()\r\n a_thread.join()\r\n self.assertEqual(self.sample_function(1, 2), self.message_queue.get())", "def dequeue_loop():\n while True:\n result = dequeue_function()\n if not result:\n break\n print(result)", "def _work_function(job_q, result_q, error_q):\r\n # type: (Queue, Queue, Queue) -> None\r\n while True:\r\n job = job_q.get()\r\n\r\n if isinstance(job, _ThreadPoolSentinel):\r\n # All the work is done, get out\r\n result_q.put(_ThreadPoolSentinel())\r\n error_q.put(_ThreadPoolSentinel())\r\n job_q.task_done()\r\n break\r\n\r\n function = job[0]\r\n args = job[1]\r\n try:\r\n result = function(*args)\r\n except Exception as e:\r\n error_q.put((job, e))\r\n else:\r\n result_q.put((job, result))\r\n finally:\r\n job_q.task_done()", "def thread_func(*args, **kwargs):\n exception, res = None, None\n try:\n res = func(*args, **kwargs)\n except Exception as e:\n exception = e\n return callback(exception, res)", "def _handle_thread(self, func):\n @wraps(func)\n def handle_thread(*args, **kwargs):\n \"\"\"Wrapped function\"\"\"\n try:\n ident = threading.get_ident()\n # Execute the function\n result = func(*args, **kwargs)\n except:\n error = sys.exc_info()\n with self.lock:\n # Record the error\n if (self.thread.ident == ident):\n self.result = None\n self.error = error\n raise error[1].with_traceback(error[2])\n else:\n with self.lock:\n # Record the result\n if (self.thread.ident == ident):\n self.result = result\n self.error = None\n return handle_thread", "def do(self, f, *args, **kwArgs):\n self.queue.put((f, args, kwArgs))", "def threadWorker(self):\n while True:\n row = self.queue.get() #get a row of data\n if row is None: #ending criterium\n break\n self.similarityQuestions(row) #the actual working function\n self.queue.task_done() #inform the queue one task is done", "def queue(self, queue_, value):\n while not self.closed:\n try:\n queue_.put(value, block=True, timeout=1)\n return\n except queue.Full:\n continue", "def wrapper(*args, **kwargs):\n ret_val = []\n thread = Thread(target=wrapped_func, args=(ret_val,)+args, kwargs=kwargs)\n thread.ret_val = ret_val\n thread.start()\n return thread", "def run(self):\n try:\n if self._target:\n retval = self._target(*self._args, **self._kwargs)\n self._queue.put(retval)\n except Exception: # pylint: disable=broad-except\n self.err = sys.exc_info()\n logger.debug(\"Error in thread (%s): %s\", self._name,\n self.err[1].with_traceback(self.err[2]))\n finally:\n self._complete.set()\n # Avoid a ref-cycle if the thread is running a function with\n # an argument that has a member that points to the thread.\n del self._target, self._args, self._kwargs", "def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None", "def putting_task(self, func, *args, **kws):\n if self.is_join:\n raise self.TreadPoolException('Thread pool is closed.')\n result_id = kws.pop('_result_id', self.NULLKEY)\n task = self.setup_func(result_id, func, *args, **kws)\n # mark one position in queue has been taken.\n self.task_queue.put(True)\n self.execute_task(task)", "def _process_data(f, work_queue, results_queue):\n for element in iter(work_queue.get, FINISHED):\n try:\n results_queue.put(f(element))\n except Exception, work_error:\n LOG.critical('parallel_pc Error: {0}\\n\\n\\tconfig settings {1}\\n'.format(work_error, element))\n results_queue.put(FINISHED)", "def test_dequeue_returns_value():\n queue = Queue()\n queue.enqueue('a')\n assert queue.dequeue() is 'a'", "def thread_output(*args, **kwargs):\n return_queue = Queue()\n kwargs[\"args\"] = (kwargs[\"args\"] or tuple()) + (return_queue, )\n thread = threading.Thread(*args, **kwargs)\n thread.start()\n yield return_queue\n thread.join()", "def test_mp_queue():\n def f(q):\n q.put([42, None, 'hello'])\n # print(\"blah\") # prints to screen\n\n q = mp.Queue()\n p = mp.Process(target=f, args=(q,))\n p.start()\n rval = q.get()\n print(type(rval), rval)\n \n # kills process\n p.join()", "def get_page_queue(url, queue):\n queue.put(urlopen(url).read())\n return None", "def __add__(self, value):\n self.queue.append(value)", "def process(q, results, iolock, func, args, kwargs):\n\n kwargs[\"iolock\"] = iolock\n\n while True:\n\n line = q.get()\n\n if line is None:\n break\n\n result = func(line, *args, **kwargs)\n results.put(result)\n\n return", "def maybe_enqueue(self):\n if len(self._vals) > 0:\n self.enqueued = True\n return self._queue.enqueue(self._vals)\n else:\n return None", "def enqueue(Q, x):\n # Q.append(x)\n Q.put_nowait(x)\n if debug: \n print(\"enqueue\", x, \":\", end=\" \")\n show_queue(Q)\n return Q", "def worker(queue, run):\n for args in iter(queue.get, None):\n try:\n run(*args)\n except Exception as e: # catch exceptions to avoid exiting the thread prematurely\n print('{} failed: {}'.format(args, e), file=sys.stderr)", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def get(self):\n with self.__lock:\n while True:\n try:\n job = self.__queue.get(False)\n self.__lock.notify_all()\n return job\n except Queue.Empty:\n self.__lock.wait()", "def setup_func(self, key, func, *args, **kws):\n\n def func_wrap():\n try:\n self.result_dict[key] = func(*args, **kws)\n except:\n raise\n finally:\n # mark one position in queue is available.\n self.task_queue.get()\n self.task_queue.task_done()\n\n def func_origin():\n try:\n func(*args, **kws)\n except:\n raise\n finally:\n self.task_queue.get()\n self.task_queue.task_done()\n\n if key is not self.NULLKEY:\n return func_wrap\n else:\n return func_origin", "def blockingCallOnMainThread(func, *args, **kwargs):\n def blockingCallFromThread(f, *a, **kw):\n queue = Queue.Queue()\n def _callFromThread():\n result = defer.maybeDeferred(f, *a, **kw)\n result.addBoth(queue.put)\n reactor.callFromThread(_callFromThread)\n\n result = None\n while True:\n try:\n result = queue.get(True, 30)\n except Queue.Empty as qe:\n if True: #not reactor.running: # reactor.running is only False AFTER shutdown, we are during.\n raise ValueError(\"Reactor no longer active, aborting.\")\n else:\n break\n\n if isinstance(result, failure.Failure):\n result.raiseException()\n return result\n\n if currentThread().getName() == 'MainThread':\n return func(*args, **kwargs)\n else:\n return blockingCallFromThread(func, *args, **kwargs)", "def monitoredQueue(self, monitorFunc):\n\t\tm_queue = q.Queue()\n\t\tm_thread = t.Thread(target=monitorFunc, args=[m_queue])\n\t\tm_thread.setDaemon(True)\n\t\tm_thread.start()\n\t\treturn m_queue", "def enqueue(self, fn):\n self.queue.put(fn)", "def dequeue(self):\n try:\n temp = self.front\n self.front = self.front.next\n temp.next = None\n return temp.value\n except Exception:\n return \"the queue is empty\"", "def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()", "def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()", "def _retrieve_output(thread, timeout, queue, thread_error):\n # Try to join the thread on failure abort\n thread.join(timeout)\n if thread.is_alive():\n # Join should have killed the thread. This is unexpected\n raise TimeoutWaitingFor(thread_error + \". Unexpected error\")\n\n # Thread died so we should have output\n try:\n # data = (stdout, stderr, exitcode)\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor(\"streams from TaskWarrior\")\n\n return data", "def _safe_dequeue(self):\n @retry(\n stop_max_attempt_number=self.max_sequential_errors,\n # Wait 2^n * 1 seconds between retries, up to 10 seconds.\n wait_exponential_multiplier=1000, wait_exponential_max=10000,\n retry_on_exception=lambda e: not isinstance(e, KeyboardInterrupt))\n def inner():\n return self.queue.dequeue()\n return inner()", "def Worker(queue, out_queue):\n while not queue.empty() and Worker.running:\n item = queue.get(False)\n if not item:\n break\n results = RunGCC(item[0], item[1])\n out_queue.put(results)", "def use_queue():\n q = queue.Queue()\n for i in range(10):\n q.put_nowait(i)\n while q.qsize() > 0:\n element = q.get_nowait()\n sys.stdout.write(\"poping out from queue: {0}\\n\".format(element))", "def put(\n self, item: _T, timeout: Optional[Union[float, datetime.timedelta]] = None\n ) -> \"Future[None]\":\n future = Future() # type: Future[None]\n try:\n self.put_nowait(item)\n except QueueFull:\n self._putters.append((item, future))\n _set_timeout(future, timeout)\n else:\n future.set_result(None)\n return future", "def get(self):\n\t\ttry:\n\t\t\tself.logger.debug('Im trying to get item from queue')\n\t\t\titem = self.queue.get()\n\t\t\tself.logger.debug('Recevie item from queue %s'%(item))\n\t\t\treturn True, item\n\t\texcept Exception, e:\n\t\t\tself.logger.error('Error method get, error: %s'%(e),exc_info=True)\n\t\t\treturn False, None", "def _getqueue(self):\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty(): return self.outqueues[index]", "def crawl_queue(q, result_set):\n _log = logging.getLogger(crawl_queue.__name__)\n while not q.empty():\n worker = q.get() #get an itme from the queue\n\n try:\n req = requests.get(worker[1], verify = False, timeout = (30,30), headers = create_fakeheader(ua,browser))\n cont = req.content\n result_set[worker[0]] = cont\n except:\n _log.warning(f' couldnt find a request for index {worker[0]}')\n result_set[worker[0]] = ''\n if q.qsize() % 100 == 0:\n _log.info(f'things left to process {q.qsize()}')\n q.task_done()\n return True", "def maybe_dequeue(self):\n if self._queue.enqueued:\n return self._queue.dequeue()\n else:\n return None", "def call(self):\n current_thread = threading.current_thread() # get current thread·\n event = self.q.get() # get task from queue\n while event != self.StopEvent: # Determine whether task is a terminator\n\n func, arguments, callback = event # get funcname,params,callback name\n try:\n result = func(*arguments)\n func_excute_status = True # set func executed status success\n except Exception as e:\n func_excute_status = False # set func executed status failure\n result = None\n print('{} executed error:'.format(func.__name__), e)\n\n if func_excute_status: #\n if callback is not None: # determine whetherif callback is None\n try:\n callback(result)\n except Exception as e:\n print(callback.__name__, e)\n\n with self.worker_state(self.free_list, current_thread):\n if self.terminal:\n event = self.StopEvent\n else:\n event = self.q.get()\n\n else:\n self.created_list.remove(current_thread)", "def get_queue():\n\n return multiprocessing.Queue()", "def submit( # type: ignore[override]\n self, fn: Callable[..., T], *args: Any, **kwargs: Any\n ) -> FutureType[T]:\n future = cast(FutureType[T], Future())\n try:\n result = fn(*args, **kwargs)\n except BaseException as exc_info:\n future.set_exception(exc_info)\n else:\n future.set_result(result)\n return future", "def queue_iter(queue: Queue) -> Generator[T, None, None]:\n while True:\n val = queue.get()\n yield val", "def _put_nowait(self, value):\n while True:\n if self._waiting_consumers:\n consume_wish = self._waiting_consumers.pop(0)\n with consume_wish.group.lock:\n if not consume_wish.group.fulfilled:\n consume_wish.fulfill(value)\n return\n elif self._buf is not None and not self._buf.full:\n self._buf.push(value)\n return\n else:\n raise Full()", "def enqueue_call(self, func, args=None, kwargs=None, timeout=None, result_ttl=None): #noqa\n timeout = timeout or self._default_timeout\n job = Job.create(func, args, kwargs, connection=self.connection,\n result_ttl=result_ttl, status=Status.QUEUED)\n yield self.enqueue_job(job, timeout=timeout)\n defer.returnValue(job)", "def get_result(self):\n if not self._complete.is_set():\n logger.warning(\"Aborting attempt to retrieve result from a LongRunningTask that is \"\n \"still running\")\n return None\n if self.err:\n logger.debug(\"Error caught in thread\")\n self._config.set_cursor_default(widget=self._widget)\n raise self.err[1].with_traceback(self.err[2])\n\n logger.debug(\"Getting result from thread\")\n retval = self._queue.get()\n logger.debug(\"Got result from thread\")\n self._config.set_cursor_default(widget=self._widget)\n return retval", "def filter_results(extQueue, timeout_value, url):\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n \n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes", "def test_exception_execution(self):\r\n a_thread = workerthread.WorkerThread(exception_queue=self.exception_queue,\r\n return_queue=self.message_queue,\r\n target=self.sample_exception_function, args=(1, 2))\r\n a_thread.start()\r\n a_thread.join()\r\n exc_type, exc = self.exception_queue.get()\r\n self.assertTrue(isinstance(exc, Exception))", "def test_valueInQueue(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper.pop()\n self.assertEqual(expected, actual)", "def _retrieve_output(thread, timeout, queue, thread_error):\n # Try to join the thread on failure abort\n thread.join(timeout)\n if thread.isAlive():\n # Join should have killed the thread. This is unexpected\n raise TimeoutWaitingFor(thread_error + \". Unexpected error\")\n\n # Thread died so we should have output\n try:\n # data = (stdout, stderr, exitcode)\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor(\"streams from program\")\n\n return data", "def test_the_queue_dequeue(the_queue):\n the_queue.enqueue(2)\n assert the_queue.dequeue() == 2", "def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2", "def _put(self, item, queue):", "def _getqueue(self):\n\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty():\n return self.outqueues[index]", "def test_MultiThreaded(self):\n\n q = Queue(self.path)\n def producer():\n for i in range(1000):\n q.put('var%d' % i)\n\n def consumer():\n for i in range(1000):\n q.get()\n q.task_done()\n\n c = Thread(target = consumer)\n c.start()\n p = Thread(target = producer)\n p.start()\n c.join()\n p.join()\n with self.assertRaises(Empty):\n q.get_nowait()", "def add_task(self, func, *args, **kwargs):\n self.queue.put((func, args, kwargs))", "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except Queue.Empty:\n return None\n return item", "def small_queue():\n queue = Queue()\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n queue.enqueue(4)\n return queue", "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except queue.Empty:\n return None\n return item", "def put_nowait(self, item: _T) -> None:\n self._consume_expired()\n if self._getters:\n assert self.empty(), \"queue non-empty, why are getters waiting?\"\n getter = self._getters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(getter, self._get())\n elif self.full():\n raise QueueFull\n else:\n self.__put_internal(item)", "def progress_wrapper(user_defined_function: Callable, master_workers_queue: multiprocessing.Queue, index: int, chunk_size: int) -> Callable:\n ...", "def _wait_for_results(self) -> RemoteCallableResult:\n if (\n self.subscriber is None or\n self.started is None or\n self.process is None\n ):\n raise dbt.exceptions.InternalException(\n '_wait_for_results() called before handle()'\n )\n\n try:\n msg = self.subscriber.dispatch_until_exit(\n started=self.started,\n timeout=self.timeout,\n )\n except dbt.exceptions.Exception as exc:\n raise dbt_error(exc)\n except Exception as exc:\n raise server_error(exc)\n if isinstance(msg, QueueErrorMessage):\n raise RPCException.from_error(msg.error)\n elif isinstance(msg, QueueTimeoutMessage):\n if not self._single_threaded:\n self.process.terminate()\n raise timeout_error(self.timeout)\n elif isinstance(msg, QueueResultMessage):\n return msg.result\n else:\n raise dbt.exceptions.InternalException(\n 'Invalid message type {} (result={})'.format(msg)\n )", "def Task(func, *args, **kwargs):\n future = Future()\n\n def handle_exception(typ, value, tb):\n if future.done():\n return False\n future.set_exc_info((typ, value, tb))\n return True\n\n def set_result(result):\n if future.done():\n return\n future.set_result(result)\n with stack_context.ExceptionStackContext(handle_exception):\n func(*args, callback=_argument_adapter(set_result), **kwargs)\n return future", "def enqueue(self, f, *args, **kwargs):\n if not isinstance(f, basestring) and f.__module__ == '__main__':\n raise ValueError(\n 'Functions from the __main__ module cannot be processed '\n 'by workers.')\n\n # Detect explicit invocations, i.e. of the form:\n # q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, timeout=30)\n timeout = None\n result_ttl = None\n if 'args' in kwargs or 'kwargs' in kwargs:\n assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs.' # noqa\n timeout = kwargs.pop('timeout', None)\n args = kwargs.pop('args', None)\n result_ttl = kwargs.pop('result_ttl', None)\n kwargs = kwargs.pop('kwargs', None)\n\n job = yield self.enqueue_call(func=f, args=args, kwargs=kwargs,\n timeout=timeout, result_ttl=result_ttl)\n defer.returnValue(job)", "def call_queue_closure(data, call_queue):\n result = data.copy()\n for func, f_args, f_kwargs in call_queue:\n try:\n result = func(result, *f_args, **f_kwargs)\n except Exception as err:\n self.call_queue = []\n raise err\n return result", "def maybe_future(x):\n if is_future(x):\n return x\n else:\n fut = Future()\n fut.set_result(x)\n return fut", "def enqueue(self, val):\r\n self.queue.append(val)", "def results_q_get(self):\n while not self.stopped():\n try:\n return self.results_queue.get(timeout=self.heart_beat)\n except queue.Empty:\n pass\n raise StopIteration()", "def do(self, fun):\n with self.mutex:\n self.value = fun(self.value)\n return self.value", "def test_dequeue_removes_value():\n queue = Queue()\n queue.enqueue('a')\n queue.dequeue()\n assert queue._queue.last_node is None\n assert queue._queue.first_node is None" ]
[ "0.77862006", "0.6819761", "0.63104945", "0.62345034", "0.61928016", "0.6103177", "0.6083798", "0.6009036", "0.593441", "0.5919444", "0.59155446", "0.5880543", "0.58595", "0.58402306", "0.5773026", "0.5739314", "0.57106704", "0.5703303", "0.56909806", "0.5687329", "0.5681928", "0.5662713", "0.5658153", "0.56541455", "0.5614836", "0.5600179", "0.55782765", "0.5569699", "0.5568735", "0.5536511", "0.5511734", "0.5498045", "0.5453433", "0.5444592", "0.54374814", "0.5418565", "0.53736025", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.53709483", "0.5362895", "0.53563017", "0.5327342", "0.53209406", "0.53185326", "0.52992827", "0.5298519", "0.5298519", "0.5291552", "0.52907735", "0.52797794", "0.52685976", "0.526822", "0.52660185", "0.5263525", "0.5239609", "0.52387035", "0.52313", "0.5223563", "0.52227", "0.5215953", "0.52146363", "0.5214031", "0.521242", "0.520991", "0.5206993", "0.52045035", "0.5200765", "0.51958203", "0.5194385", "0.5187323", "0.5185319", "0.5181308", "0.5181168", "0.5180389", "0.51743084", "0.51730895", "0.51703393", "0.51552504", "0.5147169", "0.51437163", "0.51419705", "0.51316124", "0.51208216", "0.5119783", "0.5115629", "0.5112932", "0.51124567" ]
0.7133484
1
Perform a single turn of the game of fish. If the game is over, the function exits before the referee makes any calls to the game state.
def run_turn(self): all_placed = self.state.all_avatars_placed() color = self.__get_next_turn(all_placed) if color is None: return if not all_placed: # placement round func = self.players[color].make_placement else: # movement round func = self.players[color].make_move queue = Queue() thread = Thread(target=self.__player_thread, args=[func, deepcopy(self.state), queue]) thread.daemon = True thread.start() thread.join(self.timeout) if thread.is_alive(): #print("The " + str(color) + " player timed out and will be removed.") self.__remove_player(color) return action = queue.get() if action == None: #print("The " + str(color) + " player crashed and will be removed.") self.__remove_player(color) return if not all_placed: if self.state.valid_placement(action, color): self.state.place_avatar(action, color) else: #print("The " + str(color) + " player has attempted an invalid placement and will be removed.") self.__remove_player(color) else: if self.state.valid_move(*action): self.state.move_avatar(*action) else: #print("The " + str(color) + " player has attempted an invalid move and will be removed.") self.__remove_player(color)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n while True:\n if self.is_game_over():\n break\n self.run_turn()", "def run(self):\n while not self.turn_over:\n self.go()", "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "def doTurn(self, gamestate):\n raise NotImplementedError(\"Please Implement this method\")", "def turn(self):\r\n # 1 throw the dice\r\n if (\r\n not self.player_list[self.current_player].is_in_jail()\r\n or self.try_leave_jail()\r\n ):\r\n thr = Throw()\r\n while thr is not None:\r\n # check in where the current player will land\r\n new_position = self.compute_new_position_from_dice(\r\n self.current_player, thr\r\n )\r\n self.move_player_to(self.current_player, new_position, thr=thr)\r\n\r\n if thr.is_double():\r\n thr = Throw()\r\n else:\r\n thr = None\r\n print(\"------------------------------\")\r\n\r\n self.player_observable_variables()\r\n\r\n # move turn to next player\r\n self.current_player += 1\r\n if self.current_player >= len(self.player_list):\r\n self.current_player = 0\r\n self.full_turn_count += 1\r\n print(\"**********************\")\r\n print(\r\n \"Full turn:\",\r\n self.full_turn_count,\r\n \"\\n\",\r\n \"\\n\".join(map(lambda x: x.full(), self.player_list)),\r\n )\r\n print(\"**********************\")", "def start_game(self):\n while self.can_deal:\n self.take_turn()", "def GAMEOVER_LOOP():\n pass", "def play_game():\n # Display board.\n display_board()\n # While game is still going.\n while game_still_going:\n # Handle a single turn of an arbitrary player.\n handle_turn(current_player)\n # Flip to another player.\n flip_player()\n # Check weather game is over or not.\n check_if_game_over()", "def take_turn(self, opponent):\n\n # --------- BEGIN YOUR CODE ----------\n\n # 1.) Guess a random space that has not been guessed (or be more clever!)\n\n # Steps 2-4 are the same as Human.take_turn\n\n # 2.) Call opponent.guess() to check whether the guess is a hit or miss\n\n # 3.) Update my_hits, my_misses, and sunk_ships accordingly\n\n # 4.) If the sunk_ships array has 5 ships in it set self.complete to True\n\n # --------- END YOUR CODE ----------\n\n # enforce a short delay to make the computer appear to \"think\" about its guess\n time.sleep(0.5)", "def play_Feeder(hand, battlefield, graveyard, library):\n\thand['Carrion Feeder'] -= 1\n\tbattlefield['Carrion Feeder'] += 1\n\tlog(\"We play a Carrion Feeder.\")\n\tdescribe_game_state(hand, battlefield, graveyard, library)", "def play_game(self):\n player = Player(input(\"What is your name?\"))\n while player.health > 0:\n input(\"Press t to start another turn\")\n n = random.randint(0, 3)\n if n == 0:\n if self.monster_attack(player):\n break\n elif n == 1:\n self.find_gold(player)\n else:\n print(\"Nothing happened!\")", "def play(self):\n\n player1_turn = True\n\n while True:\n if player1_turn:\n self.player_turn(self.player1, self.player2)\n if self.lost(self.player2):\n print(\"Game Over!! You sank {}'s ships!\".format(\n self.player2.name))\n break\n player1_turn = False\n else:\n self.player_turn(self.player2, self.player1)\n if self.lost(self.player1):\n print(\"Game Over!! You sank {}'s ships!\".format(\n self.player1.name))\n break\n player1_turn = True", "def play_game(self):\r\n\r\n print('Welcome to a game of Concentration!!')\r\n if self.who_goes_first():\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n\r\n while True:\r\n if self.match:\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n self.check_game_end()", "def go(self):\n roll = self.die.roll()\n self.record_roll(roll)\n self.player.record_roll(roll)\n # print(\"{} you rolled a {} and your turn score is {}\".format(self.player.name, roll, self.score))\n if not self.turn_over:\n self.turn_over = not self.player.go_again()", "def step(self):\n if not self._is_game_over:\n self._move_snake()\n self._is_game_over = self.is_snake_collides()", "def fainted(self):\n self.pkmn.faint()\n messages = self.effect.attemptAfterTurn(self.pkmn)\n assert messages == [], \"Should receive no messages since nothing was performed\"", "def turn(self):\n pass", "def oneGame():\n playOneGame()", "def restartGame(self):\n\t\tself.state = [[0 for x in range(3)] for y in range(3)]\n\t\tself.turn = self.whoGoesFirst()\n\t\tself.win = 0", "def main():\r\n turn_left()\r\n move_three_times()\r\n turn_right()\r\n move_three_times()\r\n turn_right()\r\n move_three_times()\r\n turn_left()", "def computer_fire(self):\n\n # Check tracker to see if previous attempt was a hit\n # If yes, continue to bomb rest of the ship first\n for shipID, size in self.tracker.items():\n if (size != 0) and (self.counter_copy[shipID] != size):\n for n in range(len(self.hit)):\n if self.hit[n] == shipID:\n self.bomb(n)\n return\n\n # Else, randomly fire on a new location\n n = random.randrange(0, len(self.hit))\n while self.hit[n] == 5:\n n = random.randrange(0, len(self.hit))\n self.bomb(n)", "def __do_turn(self, time_left_ms):\n if not self.data.hand:\n self.bot.log(\"No hand, killing ourselves. Data={d}\"\n .format(d=self.data))\n self.bot.fold()\n return\n\n hand = self.data.hand\n stack = self.our_stack()\n to_call = self.to_call(silent=False)\n pot_odds = self.pot_odds()\n equity = 0\n self.update_fear(to_call)\n preflop_fear = self.data.preflop_fear\n hand_fear = self.data.hand_fear\n\n # preflop, no big raises. safe to use our precalculated win %\n if not self.data.table_cards and preflop_fear == -1:\n equity = self.preflop_equity[hand.simple()]\n source = \"preflop\"\n else:\n simulator = HandSimulator(hand, self.data.table_cards,\n self.preflop_equity)\n best_hand, score = simulator.best_hand()\n equity = self.__run_simulator(simulator, time_left_ms,\n preflop_fear, hand_fear)\n source = \"sim\"\n\n self.bot.log(\" hand: {h}, table: {t}\"\n .format(h=hand, t=[str(t) for t in self.data.table_cards]))\n if self.data.table_cards:\n self.bot.log(\" best 5: {b} score: {s}\"\n .format(b=[str(c) for c in best_hand], s=str(score)))\n self.bot.log(\" win: {e:.2f}% ({s}), pot odds: {p:.2f}%, stack={m}\"\n .format(e=equity, s=source, p=pot_odds, m=stack))\n self.bot.log(\" pre-fear={pf}, hand-fear=({hf})\"\n .format(pf=preflop_fear, hf=hand_fear))\n\n self.pick_action(equity, to_call, pot_odds)", "def play_land(hand, battlefield, graveyard, library):\n\thand['Gemstone Mine'] -= 1\n\tbattlefield['Gemstone Mine'] += 1\n\tlog(\"We played Gemstone Mine.\")\n\tdescribe_game_state(hand, battlefield, graveyard, library)", "def play(the_game):\n\n print('-' * 50)\n print('')\n player = the_game.player_list[the_game.turn]\n print(' Turn {0} as {1}'.format(player.name, player.piece.name))\n print(' Rolling...\\n')\n die1, die2, roll = the_game.roll()\n\n print(' {0} + {1} = {2}!'.format(die1, die2, roll))\n\n if the_game.dice.doubles:\n print('** D O U B L E S ! **\\n')\n if player.in_jail:\n print('*** GET OUT OF JAIL ***')\n player.leave_jail()\n player.doubles = 0\n\n if player.doubles == 2:\n player.doubles = 0\n player.go_to_jail()\n print('*** DOUBLES THIRD TIME. GO TO JAIL! ***\\n')\n the_game.next_turn()\n else:\n player.doubles += 1\n if player.doubles == 1:\n print('Doubles First time')\n elif player.doubles == 2:\n print('Doubles Second time')\n else:\n player.doubles = 0\n\n if player.in_jail:\n player.position = 10\n\n if player.passed_go and not (player.doubles == 2 and the_game.dice.doubles):\n print('\\n $$$ {0} Passed GO! $$$\\n'.format(player.name))\n player.passed_go = False\n player.receive(200)\n\n print(' {0} Landed on {1}.'.format(\n player.name, the_game.board.location(player.position).name))", "def play_game():\n pass", "def main():\n\n print(\"Welcome to ToBe. Enjoy your stay.\")\n print(\"\\n\")\n\n player = Player()\n game = GameState()\n\n while not game.game_over:\n\n print(\"You are \" + player.return_age() + \" years old.\")\n\n game.play_turn(player)\n\n if player.age == 5:\n\n game.game_over = True\n \n input(\"Press ENTER to continue: \")\n print()\n\n print(\"You have died! You were \" +\n player.return_age() + \" years old.\")", "def GAME_LOOP():\n pass", "def callback_game_loop(self) -> None:\n self._goal_generate()\n self._update()\n self.reset()\n\n while self._player != self._goal:\n self._update()\n action = self._action_callback(\n self._player.np,\n self._goal.np,\n *self._action_callback_args,\n )\n if action == \"QUIT\":\n break\n self._player_erase()\n self.FUNCMAP[action]()\n self._update()\n\n if self._display:\n time.sleep(0.1)\n try:\n if chr(cv2.waitKey(5)) in self.KEYMAP[\"QUIT\"]:\n break\n except ValueError:\n pass\n\n if self._display:\n print(f\"Steps taken: {self._routes[self._current_route_key]}\")\n\n if self._display:\n cv2.waitKey(0)", "def take_turn(self):\n \n self.card_1 = self.get_card()\n self.display_card_1()\n guess = self.player.higher_lower()\n self.card_2 = self.get_card()\n self.display_card_2()\n self.compare_cards(guess)\n self.player.print_score()\n if self.player.score > 0:\n self.can_deal = self.player.keep_playing()\n print(\"\\n\")\n else:\n self.can_deal = False\n print(\"Game overThanks for playing!\")", "def turn(self, player):\n player.turn_status = 1\n print 'It is {}\\'s turn.'.format(player.name)\n while player.turn_status == 1 and player.totscore < 100:\n roll = self.die.roll()\n if roll == 1:\n print ('Sorry {}! You rolled a 1 and forfeit all '\n 'points this turn. Your total score is {}. Pass die '\n 'to next player.').format(player.name, player.totscore)\n player.turnscore = 0\n self.next_player()\n else:\n print '{} rolled a {}.'.format(player.name, roll)\n player.turnscore += roll\n print ('Your current point total '\n 'for this turn is {}. Your total '\n 'score is {}').format(player.turnscore, player.totscore)\n self.turn_choice(player)\n print ('{} score is {} and'\n 'has won the game!').format(player.name, player.totscore)", "def main():\r\n turn_left()\r\n while front_is_clear():\r\n move()\r\n turn_right()\r\n while front_is_clear():\r\n move()\r\n turn_right()\r\n while front_is_clear():\r\n move()\r\n turn_left()", "def begin_turn(self):\n pass", "def test_simple():\n game = Game(3, [0, 0], -1, 5, -5, 10, 1, [[0, 1]], [0.0])\n\n print(f\"Check the baby exists\\n{game.baby}\")\n\n print(\"\\nCheck the berry exists\")\n for berry in game.get_berries():\n print(berry)\n\n print(f\"\\nHere is the board\\n{game.get_board()}\")\n\n print(\"First let's perform an illegal move Northwards\")\n board, reward, done = game.step(\"N\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow let's perform a legal move which does NOT eat the berry\")\n board, reward, done = game.step(\"E\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow we will move back to the original place and then eat the berry\")\n board, reward, done = game.step(\"W\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow let's perform a legal move which does NOT eat the berry\")\n board, reward, done = game.step(\"S\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")", "def run_turn(self):\n # <<-- Creer-Merge: runTurn -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.\n checker = True\n\n while checker:\n checker, tile = self.find_move() or (None, None)\n\n if checker:\n checker.move(x=tile['x'], y=tile['y'])\n\n return True # as we are done with our turn\n # <<-- /Creer-Merge: runTurn -->>", "def play(self):\n\n input(\"\"\"\nWelcome to Angry Dice! Roll the two dice until you get thru the 3 Stages!\nStage 1 you need to roll 1 & 2\nStage 2 you need to roll ANGRY & 4\nStage 3 you need to roll 5 & 6\nYou can lock a die needed for your current stage\nand just roll the other one, but beware!\nIf you ever get 2 ANGRY's at once, you have to restart to Stage 1!\nAlso, you can never lock a 6! That's cheating!\n\nTo rol the dice, simply input the name of the die you want to roll.\nTheir names are a and b.\n\nPress ENTER to start!\n \"\"\")\n self.cheating = self.roll_parse(\"ab\")\n done = False\n while not done:\n self.print_hand()\n decision = input(\"Roll dice: \")\n self.cheating = self.roll_parse(decision)\n done = self.advance_check()\n self.print_hand()\n print(\"You've won! Calm down!\")", "def run(self):\n self.running = True\n \n for player in self.game.players:\n self.runPlayerTurn(player)\n if self.game.over:\n return", "def move(self, game):\n state = game.state\n game.post(\"It's now {}'s' turn\".format(self.name))\n state.status(self, game)\n selected = game.get(state.prompt_str)\n while selected not in state.moves or not state.moves[selected].canPerform(game, self):\n game.post(\"Move cannot be performed. Try again.\")\n selected = game.get(state.prompt_str)\n state.moves[selected].perform(game, self)", "def play_game():\n pass", "def RunTurn( lobound=1, hibound=20 ):\n\tpass", "def turn():\n \n robottype = get_type()\n if robottype == RobotType.PAWN:\n pawn_turn()\n else:\n overlord_turn()\n bytecode = get_bytecode()", "def turn():\n \n robottype = get_type()\n if robottype == RobotType.PAWN:\n pawn_turn()\n else:\n overlord_turn()\n bytecode = get_bytecode()", "def play_game() -> None:\n board = tuple(tuple(0 for _ in range(i, i + 16))\n for i in range(0, 64, 16))\n state = GameState(board, 1)\n while state.util is None:\n # human move\n print(state.display)\n state = state.traverse(int(input(\"Move: \")))\n if state.util is not None:\n break\n # computer move\n find_best_move(state)\n move = (state.selected if state.selected != -1\n else random.choice(state.moves))\n state = state.traverse(move)\n print(state.display)\n if state.util == 0:\n print(\"Tie Game\")\n else:\n print(f\"Player {state.util} Wins!\")", "def comp_turn():\n global red_turn,board_array,die_1_num,die_2_num\n roll()\n red_turn = False\n value,move = backgammon_AI.choose_move(board_array,die_1_num,die_2_num,doubles)\n print value,move\n if(value != -1000):\n for sub_move in move:\n board_array[sub_move[0]][1] -= 1\n board_array[sub_move[1]][1] += 1\n if(board_array[sub_move[1]][0] == 1): #Handle hits\n board_array[sub_move[1]][0] -= 1\n board_array[0][0] += 1\n die_1_num = 0\n die_2_num = 0\n update_dice()\n draw_draughts()\n red_turn = True", "def play_game():\n\tstate = Coinche(verbose=True)\n\tbeliefs = [Belief(i, state) for i in range(4)]\n\n\twhile state.get_moves():\n\t\tprint(state)\n\t\tm = ismcts(rootstate=state, itermax=2000, verbose=False, belief=beliefs[state.player_to_move])\n\t\tprint(\"Best Move: \" + str(m) + \"\\n\")\n\t\tstate.do_move(m)\n\n\tfor p in range(state.number_of_players):\n\t\tprint(\"Player \" + str(p), state.get_result(p))", "def play_step(self, action):\n self.players[0].moving_left = False\n self.players[0].moving_right = False\n if action == MOVE_LEFT:\n self.players[0].moving_left = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_left = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == MOVE_RIGHT:\n self.players[0].moving_right = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_right = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == SHOOT:\n if self.dead_player or not self.players[0].is_alive:\n self.update(is_a_star=True)\n return\n if not self.players[0].weapon.is_active:\n self.players[0].shoot()\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n if self.dead_player or not self.players[0].is_alive:\n return", "def next_turn(self):\n if self.turn == BLUE and self.ai:\n self.ai_turn = True\n self.turn = RED\n elif self.turn == BLUE:\n self.turn = RED\n else:\n self.turn = BLUE\n\n self.selected_piece = None\n self.selected_legal_moves = []\n self.check_game_over()", "async def fish(ctx):\n global fish_now\n r = random.random()\n if len(str(fish_now)) > 1500:\n fish_now = round(pow(fish_now, 0.5))\n if fish_now == 69: fish_now = 70\n return await ctx.send(\"Woah! Bear's fish is a little too high, so it unfortunately has to be square rooted.\")\n if r > 0.9:\n fish_now += 10\n if fish_now == 69: fish_now = 70\n return await ctx.send(f\"Wow, you gave bear a super fish! Added 10 fish! Bear now has {fish_now} fish!\")\n if r > 0.1:\n fish_now += 1\n if fish_now == 69: \n fish_now = 70\n return await ctx.send(f\"You feed bear two fish. Bear now has {fish_now} fish!\")\n else:\n return await ctx.send(f\"You feed bear one fish. Bear now has {fish_now} fish!\")\n if r > 0.02:\n fish_now += 0\n return await ctx.send(f\"You can't find any fish... and thus can't feed bear. Bear still has {fish_now} fish.\")\n else:\n fish_now = round(pow(fish_now, 0.5))\n if fish_now == 69: fish_now = 70\n return await ctx.send(f\":sob:\\n:sob:\\n:sob:\\nAww, bear's fish was accidentally square root'ed. Bear now has {fish_now} fish. \\n:sob:\\n:sob:\\n:sob:\")", "def step(self):\n self.game.step()", "def run():\n game = Game()\n i = 0\n while True:\n print(i, \"\\n\\n\" + str(game))\n i += 1\n actions = game.possible_moves()\n if actions == []:\n return game.score()\n else:\n game_state = replace_none(np.array(game.state))\n action = h_min_max(game_state)[0]\n if action == UP:\n game.up()\n elif action == DOWN:\n game.down()\n elif action== LEFT:\n game.left()\n elif action== RIGHT:\n game.right()\n else:\n print(\"Didn't move\")\n return game", "def stand(self):\n self.endgame()", "def start(self):\n while self.turns <= 7:\n # print()\n # print(\"This is turn {}.\".format(self.turns))\n turn = Turn(self.current_player, self.die)\n turn.run()\n self.current_player.score += turn.score\n # print(\"{}'s score is now {}\".format(self.current_player, self.current_player.score))\n self.turns += 1\n # print()\n # print(\"You have reached 7 turns. Game over.\")\n # print(\"Your total score is {}.\".format(self.current_player.score))", "def firing(self) -> None:\n self.shooter.fire()\n # self.next_state(\"tracking\")\n self.state = self.tracking", "def go(self):\r\n self.__last_word = ''\r\n result = 'first_turn'\r\n random.seed()\r\n self.__current_player = random.randint(0, 1)\r\n\r\n while True: \r\n if result == 'first_turn':\r\n answer = self.__notify_user(self.__players[self.__current_player], 'first_turn', self.__last_word)\r\n elif result == 'rejected':\r\n self.__current_player = 1 - self.__current_player\r\n self.__last_word = self.__previous_word\r\n answer = self.__notify_user(self.__players[self.__current_player], 'repeat_turn', self.__last_word)\r\n elif result == 'wrong_letter':\r\n answer = self.__notify_user(self.__players[self.__current_player], 'wrong_letter', self.__last_word)\r\n elif result == 'already_used':\r\n answer = self.__notify_user(self.__players[self.__current_player], 'already_used', self.__last_word)\r\n elif result == 'not_exist':\r\n answer = self.__notify_user(self.__players[self.__current_player], 'not_exist', self.__last_word)\r\n elif result == 'validated':\r\n self.__previous_word = self.__last_word\r\n self.__last_word = answer\r\n self._save_state()\r\n self.__current_player = 1 - self.__current_player\r\n answer = self.__notify_user(self.__players[self.__current_player], 'your_turn', self.__last_word)\r\n elif result == 'stop_game':\r\n return\r\n\r\n result = self.__validate_word(answer)", "def run(self, GameState):\n pass", "def play_game():\n display_board()\n while ongoing_game:\n handle_turn(current_player)\n check_if_game_over()\n swap_player()\n global board\n if winner == \"X\" or winner == \"O\":\n print(\"<-------- Congratulations \" +\n winner + \", you win. -------->\")\n play_again()", "def play_game(self):\n self.welcome()\n while (self.winner is None) and (not self.exit_flag) and (not self.board.full()):\n self.play_round()\n self.exit_game()", "def defender(self):\n step = None\n other_card = self.enemy.enemy_step(self.other_hand.get_hand())\n s = 0\n while True:\n self.other_hand.give(other_card, self.table)\n #print()\n print(\"TABLE\")\n print(self.table)\n print(\"#\" * 100)\n my_card = self.player.player_repel(self.table.get_hand()[s], self.my_hand.get_hand())\n if my_card != None:\n self.my_hand.give(my_card, self.table)\n print(self.table)\n print(\"#\" * 100)\n\n step = 0\n else:\n\n step = 1\n for i in range(len(self.table.get_hand())):\n # self.table.give(self.table.get_hand()[i], self.my_hand)\n self.my_hand.add(self.table.get_hand()[i])\n break\n print(\"Your hand\")\n print(self.my_hand)\n other_card = self.enemy.toss(self.table.get_hand(), self.other_hand.get_hand())\n if other_card == None:\n break\n s = s + 2\n if step == 0:\n #print()\n print(\"Successful defense\")\n else:\n print(\"To abandon the defense\")\n\n self.table.get_hand().clear()\n\n return step", "def playGame(self, verbose=False) -> int:\n curPlayer = 0\n board = self.game.getInitBoard()\n it = 0\n log.info('GAME START')\n while self.game.getGameEnded(board) == 0:\n it += 1\n # if verbose:\n # assert self.display\n # print(\"Turn \", str(it), \"Player \", str(curPlayer))\n # self.display(board)\n player_func = self.landlord if curPlayer == 0 else self.farmers\n action = player_func(self.game.getCanonicalForm(board, curPlayer))\n log.info(action)\n valids = self.game.getValidMoves(self.game.getCanonicalForm(board, curPlayer), 1)\n\n if valids[action] == 0:\n log.error(f'Action {action} is not valid!')\n log.debug(f'valids = {valids}')\n assert valids[action] > 0\n board, curPlayer = self.game.getNextState(board, curPlayer, action)\n # if verbose:\n # assert self.display\n # print(\"Game over: Turn \", str(it), \"Result \", str(self.game.getGameEnded(board, 1)))\n # self.display(board)\n return 1 if curPlayer == 1 else -1", "def ninja_turn():\r\n\tglobal men\r\n\tl = [chop, fly, firebreath]\r\n\tx = randint(0,3)\r\n\tif men >= 85 and x == 3:\r\n\t\tx = randint(0,2)\r\n\tif x != 3 and men - l[x][5] >= 0:\r\n\t\treturn ninja.hit(*l[x])\r\n\telse:\r\n\t\tmen += ninja.sleep(*nsleep)\r\n\t\treturn 0", "def next_turn(self): \n if (self.moves):\n self.board = self.select_move() \n self.moves = []\n self.roll = self.roll_dice()\n self.player = not self.player\n self.generate_valid_moves()", "def fold(self, player):\n\t\tplayer.isHandLive = False\n\t\tself.setNextTurn()\n\t\tself.setState()", "def player_turn(user, deck):\n print(f\"\\n======== PLAYER'S TURN ========\\n\\n\"\n f\"Your current hand is \\033[36m{user.total}\\033[0m.\\n\")\n while deck.cards and not bust(user) and user.total != GOAL_TOTAL():\n if player_draw():\n draw_card(user, deck)\n else:\n print(f\"\\nYou've chosen to Stand, this ends the round with your hand of \\033[36m{user.total}\\033[0m.\\n\")\n break\n time.sleep(1)", "def play_game(difficulty):\r\n lives = 3\r\n directions = Direction(difficulty=difficulty)\r\n drawer.bgcolor = Colors.BLUE\r\n drawer.fill_screen()\r\n\r\n # Starting Countdown\r\n drawer.display_countdown(3, 'Starting in ')\r\n\r\n # ---------- Main Program Loop ------------\r\n lost = False\r\n # Each game is 20 rounds long.\r\n turn = 10\r\n while turn > 0:\r\n time.sleep(0.1)\r\n if lost:\r\n if lives > 0:\r\n drawer.display_option('use a life and continue?')\r\n drawer.display_lives(lives)\r\n drawer.refresh()\r\n\r\n while (input_direction := gamepad.direction_input()) is None:\r\n pygame.event.get()\r\n\r\n if input_direction == 'LEFT':\r\n lives -= 1\r\n lost = False\r\n time.sleep(0.5)\r\n\r\n else:\r\n print('round lost')\r\n return -1\r\n\r\n else:\r\n drawer.bgcolor = Colors.ORANGE\r\n drawer.display_lose()\r\n time.sleep(1)\r\n return -1\r\n\r\n # User did something.\r\n for event in pygame.event.get():\r\n # If user clicked close.\r\n if event.type == pygame.QUIT:\r\n pass\r\n\r\n # Choose a random direction either up right left or down\r\n # target_direction = random.choice(directions)\r\n directions.pick_direction()\r\n\r\n prev_input_direction = None\r\n ball_pos = [drawer.width // 2, drawer.height // 2]\r\n\r\n drawer.bgcolor = Colors.BLUE\r\n\r\n for angle in (a / 10 for a in range(63, -1, -1)):\r\n time.sleep(speed)\r\n\r\n # display the information\r\n drawer.fill_screen()\r\n drawer.display_text(directions.target_direction, Colors.GREY)\r\n\r\n drawer.display_text(f'{turn}', offset_x=320, offset_y=-200)\r\n\r\n # draw the ball in the proper place\r\n drawer.display_ball(ball_pos)\r\n drawer.display_lives(lives)\r\n drawer.display_timer(stop_angle=angle)\r\n drawer.refresh()\r\n\r\n # If the ball reached the end.\r\n if not drawer.ball_in_border(ball_pos):\r\n\r\n # The player chose correct.\r\n if directions.correct_direction(input_direction):\r\n # Leave the for; go on to the next turn.\r\n turn -= 1\r\n break\r\n\r\n # The player chose wrong.\r\n else:\r\n drawer.bgcolor = Colors.RED\r\n drawer.fill_screen()\r\n\r\n drawer.display_text(\"You chose wrong!\")\r\n drawer.display_lives(lives)\r\n drawer.refresh()\r\n time.sleep(0.3)\r\n # prompt to use a life and play again above\r\n lost = True\r\n break\r\n\r\n # makes it easier to get controller input\r\n pygame.event.get()\r\n # capture the controller input\r\n input_direction = gamepad.direction_input()\r\n\r\n # Initialize the previous input\r\n # We need prev_input_direction otherwise\r\n # input_direction is None most of the time.\r\n # prev_ lets the ball continue to update after\r\n # choosing a direction.\r\n #\r\n # Need to update later to be able to correct a wrong move in time.\r\n # But for now it works good enough.\r\n if prev_input_direction is None:\r\n prev_input_direction = input_direction\r\n else:\r\n input_direction = prev_input_direction\r\n\r\n # get the input\r\n if input_direction is not None:\r\n # update the balls position\r\n if input_direction == 'LEFT':\r\n ball_pos[0] -= ball_speed\r\n\r\n elif input_direction == 'RIGHT':\r\n ball_pos[0] += ball_speed\r\n\r\n elif input_direction == 'UP':\r\n ball_pos[1] -= ball_speed\r\n\r\n else:\r\n ball_pos[1] += ball_speed\r\n\r\n # The ball didn't reach the end.\r\n # The player was too slow and time ran out.\r\n else:\r\n\r\n drawer.bgcolor = Colors.RED\r\n drawer.fill_screen()\r\n drawer.display_text('Out of Time! You were too slow.')\r\n drawer.display_lives(lives)\r\n drawer.refresh()\r\n time.sleep(1)\r\n if lives > 0:\r\n drawer.display_option('use a life and continue?')\r\n drawer.display_lives(lives)\r\n drawer.refresh()\r\n\r\n while (input_direction := gamepad.direction_input()) is None:\r\n pygame.event.get()\r\n\r\n time.sleep(0.5)\r\n\r\n if input_direction == 'LEFT':\r\n lives -= 1\r\n time.sleep(0.5)\r\n continue\r\n\r\n # End the game\r\n drawer.bgcolor = Colors.ORANGE\r\n drawer.display_lose()\r\n time.sleep(1)\r\n return -1\r\n\r\n\r\n # The player completed the round successfully.\r\n else:\r\n drawer.bgcolor = Colors.GREEN\r\n drawer.fill_screen()\r\n drawer.display_text('Congratulations', Colors.WHITE)\r\n drawer.refresh()\r\n time.sleep(2)\r\n return lives", "def selfplay():\n agent2 = Agent(0.99, 0.1, 0.003, 42, train_games, 7, eps_dec)\n agent2.load_checkpoint()\n global win_cntr\n global done\n g = Game()\n turn = random.choice([PLAYER, AI])\n done = False\n transitions_agent = []\n transitions_agent2 = []\n while done == False:\n g.printBoard()\n if turn == PLAYER:\n # row = input('{}\\'s turn: '.format('Red'))\n # g.insert(int(row), turn)\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent2.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Red') % action)\n g.insert(action, PLAYER_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Red') % action)\n g.insert(action, PLAYER_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent2 += [(observation, action, observation_, done)]\n else:\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = AI if turn == PLAYER else PLAYER\n if g.getWinner() == Tie:\n reward_agent = 0\n else:\n winner = AI if turn == PLAYER else PLAYER\n if winner == AI:\n win_cntr += 1\n if vertical_win:\n reward_agent = 5\n else:\n reward_agent = 20\n\n else:\n reward_agent = -20\n\n for i in range(len(transitions_agent)):\n agent.store_transition(transitions_agent[i][0], transitions_agent[i][1], reward_agent, transitions_agent[i][2],\n transitions_agent[i][3])\n agent.learn()\n return", "def play_game(self):\n while self.over is False:\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p1.get_move(self.board)\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p2.get_move(self.board)", "def play(self):\n\n while self.board.board[self.board.target_location()[0]]\\\n [self.board.target_location()[1]] == \"E\": # the car didn't\n # arrive the exit\n self.__single_turn()\n print(\"you won!\")", "def flee(self, tile):\r\n available_moves = tile.adjacent_moves()\r\n r = random.randint(0, len(available_moves) - 1)\r\n self.do_action(available_moves[r])", "def flee(self, tile):\r\n available_moves = tile.adjacent_moves()\r\n r = random.randint(0, len(available_moves) - 1)\r\n self.do_action(available_moves[r])", "def plant_food(self):\n self.phase.set(0)\n #self.broadcast_phase()\n self.players[self.first_player].take_turn()", "def on_turn(self, turn_state):\n game_state = gamelib.GameState(self.config, turn_state)\n gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))\n #game_state.suppress_warnings(True) #Uncomment this line to suppress warnings.\n\n self.fortress(game_state)\n\n game_state.submit_turn()", "def run_game(player_board, user_guess, computer_board, computer_guess):\n player_turn = 0 # Ensures player goes first\n computer_turn = 1 # Computer can only go once player score is equal\n # Life counter decrements each time a ship is hit\n player_lives = 15\n computer_lives = 15\n while True:\n if player_turn < computer_turn:\n user_guess.print_board()\n column, row = player_board.attack_input()\n if user_guess.board[row][column] == GUESSED:\n print('\\nYOU HAVE ALREADY GUESSED THIS CO-ORDINATE\\n')\n elif user_guess.board[row][column] == HITSHIP:\n print('\\nYOU HAVE ALREADY HIT A SHIP IN THIS CO-ORDINATE\\n')\n elif computer_board.board[row][column] == SHIP:\n print(' ')\n print(PHASE)\n print('\\nCONGRATULATIONS, YOU HIT A SHIP!\\n')\n user_guess.board[row][column] = HITSHIP\n player_turn += 1\n user_guess.lives_counter()\n user_guess.print_board()\n computer_lives -= 1\n print(\"COMPUTER'S TURN TO ATTACK!\")\n time.sleep(3)\n if computer_lives == 0:\n print('\\nTHE COMPUTER HAS NO LIVES LEFT!')\n print('YOU WIN!')\n print(' ')\n print(PHASE)\n break\n else:\n print(' ')\n print(PHASE)\n print('\\nYOU MISSED!\\n')\n user_guess.board[row][column] = GUESSED\n player_turn += 1\n user_guess.print_board()\n print(\"COMPUTER'S TURN TO ATTACK!\")\n time.sleep(3)\n if computer_turn == player_turn:\n row, column = computer_guess.attack_input()\n if computer_guess.board[row][column] == GUESSED:\n pass\n elif computer_guess.board[row][column] == HITSHIP:\n pass\n elif player_board.board[row][column] == SHIP:\n print('THE COMPUTER HIT YOUR SHIP!\\n')\n computer_turn += 1\n player_lives -= 1\n computer_guess.column_arry.append(column)\n computer_guess.row_arry.append(row)\n computer_guess.board[row][column] = HITSHIP\n player_board.board[row][column] = HITSHIP\n player_board.lives_counter()\n player_board.print_board()\n computer_guess.attk_arry.append(0)\n time.sleep(3)\n if player_lives == 0:\n print('\\nYOU HAVE NO LIVES LEFT!')\n print('YOU LOSE!')\n print(' ')\n print(PHASE)\n break\n else:\n print('COMPUTER MISSED!\\n')\n computer_guess.board[row][column] = GUESSED\n computer_turn += 1\n player_board.print_board()\n computer_guess.attk_arry.append(1)\n computer_guess.check_miss_count()\n time.sleep(3)", "def run(self):\r\n \r\n if not self.gameOver:\r\n screen.fill(COLOR3)\r\n self.board.drawBoard()\r\n self.handleEvents()\r\n for piece in self.board.pieces.values():\r\n piece.update()\r\n else:\r\n self.resetGame()\r\n pygame.display.update()", "def autochess(self):\n print(\"Player {}: depth = 2\".format(self.player.getCurrentPlayer()))\n self.player.changeCurrentPlayer()\n\n red_start = time.time()\n result = self.alphabeta(2, True, -9999, 9999, 2)\n red_end = time.time()\n red_elapse = red_end - red_start\n print(\"red elapse: \", red_elapse)\n self.red_count += 1\n self.timetable_red.append((self.red_count, red_elapse))\n\n # movePiece using result piece and position:\n print(\"Result: \", result)\n self.movePiece(self.pieceList, result['Piece'], result['Pos'])\n self.player.UpdatePieceList(self.pieceList)\n self.playGUI.drawBoard(self.player)\n\n check = self.setNextMove_AB()\n\n if (self.EndGame()) or (check == 0):\n print(self.timetable_black)\n return 0\n else:\n pass", "def play():\n global done\n done = False\n g = Game()\n turn = random.choice([PLAYER, AI])\n transitions_agent = []\n agent.epsilon = agent.eps_min\n while done == False:\n g.printBoard()\n if turn == PLAYER:\n row = input('{}\\'s turn:'.format('Red'))\n g.insert(int(row), PLAYER_PIECE)\n else:\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = AI if turn == PLAYER else PLAYER\n winner = AI if turn == PLAYER else PLAYER\n if winner == AI:\n reward = 20\n else:\n reward = -20\n for i in range(len(transitions_agent)):\n agent.store_transition(transitions_agent[i][0], transitions_agent[i][1], reward, transitions_agent[i][2],\n transitions_agent[i][3])\n agent.learn()\n return", "def act(self, action):\n assert action in self.options()\n if self._state == GameState.PLAY_OR_DRAW:\n if isinstance(action, NopAction):\n self._advance_turn()\n elif isinstance(action, DrawAction):\n self._state = GameState.PLAY_DRAWN\n self._current_hand().append(self._draw())\n else:\n self._play_card(action)\n elif self._state == GameState.PLAY:\n if isinstance(action, NopAction):\n self._state = GameState.PLAY_OR_DRAW\n self._advance_turn()\n else:\n self._play_card(action)\n elif self._state == GameState.PLAY_DRAWN:\n if isinstance(action, NopAction):\n self._state = GameState.PLAY_OR_DRAW\n self._advance_turn()\n else:\n self._play_card(action)\n elif self._state == GameState.PICK_COLOR or self._state == GameState.PICK_COLOR_INIT:\n disc = self._discard[-1]\n disc.color = action.color\n if self._state == GameState.PICK_COLOR:\n last_disc = self._discard[-2]\n if disc.card_type == CardType.WILD:\n self._state = GameState.PLAY_OR_DRAW\n elif any(x.color == last_disc.color for x in self._current_hand()):\n self._state = GameState.CHALLENGE_INVALID\n else:\n self._state = GameState.CHALLENGE_VALID\n self._advance_turn()\n else:\n self._state = GameState.PLAY\n elif self._state == GameState.CHALLENGE_VALID or self._state == GameState.CHALLENGE_INVALID:\n if isinstance(action, NopAction):\n for _ in range(4):\n self._current_hand().append(self._draw())\n self._advance_turn()\n elif self._state == GameState.CHALLENGE_INVALID:\n self._advance_turn(by=-1)\n for _ in range(4):\n self._current_hand().append(self._draw())\n self._advance_turn(by=2)\n else:\n for _ in range(6):\n self._current_hand().append(self._draw())\n self._advance_turn()\n self._state = GameState.PLAY_OR_DRAW", "def take_turn(self):\r\n self._choose_best_option()\r\n self._do_draw()", "def run_game(self) -> None:\n decision = 0\n if self._initial:\n self._initial = False\n while decision != 1:\n try:\n display_no_combat_init(self.hero)\n decision = get_user_input([1, 2, -1])\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n else:\n break\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")\n\n while not self.hero.is_dead:\n try:\n self._load_map()\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")", "def take_turn(self):\n if self.fired:\n return None\n\n self.tick_needs()\n # TODO: Currently dropping Trash, stuff that doesn't satisfy, where ever\n # May want to look for Trash Can at some point\n # Dropping first Trash item found when inventory full\n if self.inventory_full():\n trash = filter(lambda x: any(s not in self.needs for s in x.satisfies), self.inventory)\n for t in trash:\n print(f\"{self.name} dropped {t.name}\")\n self.drop_item(t)\n break\n\n # If not preoccupied, check needs and do stuff\n if not self.occupied:\n self.check_needs()\n self.move_to_target()", "def fight(self):\r\n\t\tif self.death():\r\n\t\t\treturn 0\r\n\t\tif self.ctime < 1:\r\n\t\t\tself.ctime += 0.05\r\n\t\telse:\r\n\t\t\tself.ctime = 0\r\n\t\t\tself.hit()", "def play_game(self):\n print('Welcome to Tetris! To play, press \"j\" to move Left, \"l\" to move Right, and \"k\" to '\n 'Invert the piece.')\n raw_input('Press any key to acknowledge.')\n board.add_piece()\n board.display_piece()\n board.display_board()\n while True:\n over = board.update_board_and_check_for_eog()\n if over:\n print over\n break\n board.display_board()\n start = time.time()\n while time.time() - start < self.refresh_rate:\n direction = board.get_input() # right, left\n if direction:\n board.display_piece(clear=True)\n board.move_piece(direction=direction)\n board.display_board()\n time.sleep(0.1)\n print 'You got {} points!'.format(board.points)\n return", "def frog_update(self, player):\n self.default_update_idle(player)\n if not self.can_update(): return\n #if self.onGround: \n # TODO: make the frog try to land on the player.\n # figure out the frog's distance from the player, and calculate the necessary xvel.\n # jump with min(self.max_speed/2, target_speed)\n if self.onGround and self.ai_counter <= 0: #TODO: change the way this works\n self.faceTowards(player.current_tile())\n self.changeAnimation('idle', self.direction_id)\n self.jump(self.direction_val*self.max_speed/2, self.max_speed)\n self.ai_counter = 110\n self.wait()", "def _play(self, player):\n\n # Request the current player's desired action\n action = player.request_action()\n\n # Player chose to roll\n if action == \"r\":\n # Roll the die and add to roll total for the turn\n roll = self._die.roll()\n player.update_total_rolls()\n player.update_last_roll(roll)\n # If the player rolls 1, reset the current\n # score and commit the current rolls count\n # to the player's Player object, and exit\n # the loop.\n if roll == 1:\n player.reset_turn_stats()\n player.commit_score()\n print(\"Ouch {}, you rolled a {} and lost all points you accumulated during this turn. Your score for this turn is {}. Your total score is {}.\".format(\n player.get_name(), roll, player.get_current_score(), player.get_total_score()))\n self._active_turn = False\n # If the player rolled other than a 1, update the\n # current score to the value of the roll, check\n # to see if the player's total is >= 100 and end\n # the game, otherwise ask the player for their\n # next action\n else:\n player.update_turn_score(roll)\n if (player.get_current_score() + player.get_total_score()) >= 100:\n player.commit_score()\n player.reset_turn_stats()\n self._end_game, self._active_turn = True, False\n else:\n print(\"Nice {}! You rolled a {}. Your current score for this turn is {}. Your total score is {}\".format(\n player.get_name(),\n roll,\n player.get_current_score(),\n player.get_current_score() + player.get_total_score()\n )\n )\n # Player chose to hold, commit their current score and\n # roll count to their Player object and exit the loop\n elif action == \"h\":\n player.commit_score()\n print(\"{}, you held. Your score for this turn is {}. Your total score is {}.\".format(\n player.get_name(), player.get_current_score(), player.get_total_score()))\n player.reset_turn_stats()\n self._active_turn = False\n # The player entered an invalid action\n else:\n print(\"You entered an invalid action.\")", "def step(self, action):\n self.game.play_single_turn([action])\n next_obs = self.get_state()\n reward = self.game.player_1.score - self.game.turn_count\n done = self.game.check_for_end_of_game() or self.game.turn_count > 25\n\n if done:\n if self.game.player_1.score > self.game.player_2.score:\n reward += 25\n elif self.game.player_2.score > self.game.player_1.score:\n reward -= 25\n\n if self.game.save_images_suffix:\n image_suffix = f'{self.game.save_images_suffix}_{self.game.turn_count+1}'\n self.game.game_board.graphical_output(save=True, display=False, image_suffix=image_suffix)\n\n return next_obs, reward/100, done", "def run_game(self):\n game = Poker()\n AI_win = game.play_round(self.name)\n self.update_scores(AI_win)\n message = 'Would you like to play another round? Y(es) or N(o): '\n answer = InputHandler.input_bool(message)\n if answer:\n self.run_game()", "def player_turn():\n # while the player has not stopped\n # Roll the die\n # If is a 1\n # Set score to 0 and stop the turn\n # else\n # Add that to the score\n # Ask the player whether to continue\n # Return the score", "def perform(self):\n\t\tif self.turns_remaining <= 0:\n\t\t\tself.engine.message_log.add_message(\n\t\t\t\tf\"The {self.entity.name} is no longer confused.\",\n\t\t\t)\n\t\t\tself.entity.ai = self.previous_ai\n\t\telse:\n\t\t\t# Pick a random direction\n\t\t\tdir_x, dir_y = random.choice(\n\t\t\t\t[\n (-1, -1), # Northwest\n (0, -1), # North\n (1, -1), # Northeast\n (-1, 0), # West\n (1, 0), # East\n (-1, 1), # Southwest\n (0, 1), # South\n (1, 1), # Southeast\n ]\n\t\t\t)\n\n\t\t\tself.turns_remaining -= 1\n\t\t\t# The actor will either try to move or attack in the chosen random direction.\n\t\t\t# It's possible the actor will just bump into the wall, wasting a turn.\n\t\t\treturn BumpAction(self.entity, dir_x, dir_y,).perform()", "def play_move(self, move):\n color, move_tuple = convert_from_GTP(move, self.size)\n result = self.game.move_stone(move_tuple[0], move_tuple[1], color)\n if result.status != gogame.VALID:\n raise PlayError, result.status\n self.board.PlaceMove(move_tuple, color)\n self.board.ErasePieces(result.prisoners)\n self.board.SwitchCurColor()\n\n self.curWatcher.play(move)\n\n #if there are consecutive passes, end the game, else reset pass count\n if move == \"pass\":\n self.isPass += 1\n if self.isPass == 2:\n self.end_game()\n else:\n self.isPass = 0\n\n #switch the players\n temp = self.curPlayer\n self.curPlayer = self.curWatcher\n self.curWatcher = temp\n \n if self.isPass < 2:\n self.call_for_move()\n else:\n self.end_game()", "def fighter(mob):\r\n\tglobal player\r\n\trestore()\r\n\top = op_set(mob)\r\n\tt = None\r\n\tplayer = engine('you', fo)\r\n#\tprint fo\r\n\twhile mhp > 0 and php > 0:\r\n\t\tt = turn(t, op)\r\n\t\tprompt()\r\n\tif mhp <= 0 and php > 0:\r\n\t\treturn 'winner'\r\n\telif php <= 0 and mhp > 0:\r\n\t\treturn 'game_death'\r\n\telse:\r\n\t\tprint \"You both seemed to have died...\"\r\n\t\treturn 'game_death'", "def play_game(game_agent, iter):\n global WIN\n window = WIN\n clock = pygame.time.Clock()\n reward = 0\n\n car = Car(200, random.randint(200,1000), random.randint(0,90))\n goal = Goal(700, 350)\n obstacles = []\n obstacles.append(Obstacle(800,550,90))\n for i in range(1, 3):\n\n obstacles.append(Obstacle(700,100 + i * 180,0))\n\n old_state = np.array(game_agent.get_game_states(car, goal, [150] * 12))\n #next_action = [1,0,0,0,0,0]\n next_action = [1,0,0,0,0,0]\n run = True\n while run:\n\n clock.tick(30)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n pygame.quit()\n quit()\n break\n\n\n car.next_action(next_action)\n car.move()\n #print(\"car x\",car.x)\n #print(\"car y\", car.y)\n #print(\"car speed\", car.speed)\n\n car_crash = False\n car_parked = False\n\n # check if car is out of bounds\n if car.x < 0 or car.x > 1200 or car.y < 0 or car.y > 1200:\n car_crash = True\n\n # check for car crash\n for obstacle in obstacles:\n collision = obstacle.collide(car)\n if(collision):\n print(collision)\n print(\"car crash\")\n car_crash = True\n\n # check for parking\n collision = goal.collide(car)\n num_park = 0\n if(collision):\n print(collision)\n print(\"car parked\")\n car_parked = True\n num_park += 1\n\n if car_parked:\n print(\"num_park: \", num_park)\n from datetime import datetime\n parked_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')", "def playerForfeit(self):\n self.handleWin(self.currentplayer*-1)", "def on_turn(self, turn_state):\n game_state = gamelib.GameState(self.config, turn_state)\n #gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))\n #game_state.suppress_warnings(True) #Uncomment this line to suppress warnings.\n\n self.starter_strategy(game_state)\n\n game_state.submit_turn()", "def step(self, action): # action is nb-cops-sized or 1-sized\n reward = 0\n done = False\n\n action = np.array(action)\n\n def old_pos(set=None):\n if set is None:\n return self.cops_pos if self.is_cops_turn else self.rob_pos\n else:\n if self.is_cops_turn:\n self.cops_pos = action\n else:\n self.rob_pos = action\n\n invalids = []\n\n if self.is_first_turn:\n self.graph.set_cr(action, self.is_cops_turn)\n else:\n edges = self.graph.get_rep()[old_pos(), action]\n invalids = edges != 1\n invalids[action == old_pos()] = False\n invalids = np.where(invalids == True)[0]\n if invalids.shape[0] != 0:\n action[invalids] = old_pos()[invalids] # correct action\n self.graph.set_cr(action, self.is_cops_turn)\n\n old_pos(action)\n if not self.is_cops_turn and self.is_first_turn:\n self.is_first_turn = False\n self.is_cops_turn = not self.is_cops_turn\n if self.rob_pos is not None and self.rob_pos[0] in self.cops_pos:\n print(\"Cops won\")\n done = True\n reward += (1 if self.is_cops_turn else -1) * REWARD_END_WL\n\n reward += (-1 if self.is_cops_turn else +1) * REWARD_STEP_WL\n reward -= len(invalids) * REWARD_INVALID\n\n observation = self.graph.get_attr()\n\n if self.is_cops_turn:\n self.cops_rew += reward\n else:\n self.rob_rew += reward\n\n if not done:\n if self.is_cops_turn and self.cops is not None:\n observation, _, done, _ = self.step(self.cops.act(observation))\n elif not self.is_cops_turn and self.robber is not None:\n observation, _, done, _ = self.step(self.robber.act(observation))\n return observation, reward, done, {}", "def ProcessGame(self):\n\n #self.CreateWeapon(line_number=0, weapon_type=self.MakeSunFlowerWeapon)\n while self.InputEvents(pygame.event.get()):\n self.clock.tick(self.speed)\n \n if self.IsGameOver():\n painter.DisplayGameOver()\n else:\n self.UpdateAll()\n self.DrawAll()", "def play_against_random(self, action, display_game=False):\n state, status, done = self.step(action)\n if display_game: env.render()\n if not done and self.turn == 2:\n state, s2, done = self.random_step()\n if display_game: env.render()\n if done:\n if s2 == self.STATUS_WIN:\n status = self.STATUS_LOSE\n elif s2 == self.STATUS_TIE:\n status = self.STATUS_TIE\n else:\n raise ValueError(\"???\")\n return state, status, done", "def playGameMove(self, printReward):\n\n # make a copy the current state used for determining reward\n state = self.currentState()\n\n # train the GameEnvironment and make a move, get the the action taken\n action = self.trainMove()\n\n # if no action is found, then the game is over, end the game\n if action is None:\n return None\n # otherwise, perform the action by training with it\n else:\n # find the reward for the action\n reward = self.rewardFunc(state, action)\n if printReward:\n print(\"Reward this turn: \" + str(reward))\n\n return reward", "def env_step(self, action):\n if action == 0: # Hit\n\n new_state = deepcopy(self.current_state)\n reward = 0\n terminal = False\n \n new_card = min(self.random.randint(1,14), 10)\n # print('new card:', new_card)\n \n if new_card == 1:\n self.player_ace_count += 1\n new_state['player_sum'] = self.current_state['player_sum'] + 11 \n else:\n new_state['player_sum'] = self.current_state['player_sum'] + new_card\n\n while new_state['player_sum'] > 21 and self.player_ace_count > 0:\n self.player_ace_count -= 1\n new_state['player_sum'] -= 10\n\n new_state['usable_ace'] = int(self.player_ace_count > 0)\n\n if new_state['player_sum'] > 21: # Goes bust\n reward = -1\n terminal = True\n\n elif action == 1: # Stick\n\n new_state = deepcopy(self.current_state)\n terminal = True\n\n if self.current_state['dealer_card'] == 1:\n dealer_ace = 1\n dealer_sum = 11\n else:\n dealer_ace = 0\n dealer_sum = self.current_state['dealer_card']\n\n first_two_cards = True\n while dealer_sum < self.dealer_sticks or first_two_cards:\n first_two_cards = False\n # new_card = self.random.choice(range(1,11), p=self.card_probs)\n new_card = min(self.random.randint(1,14), 10)\n if new_card == 1:\n dealer_sum += 11\n dealer_ace += 1\n else:\n dealer_sum += new_card\n\n while dealer_sum > 21 and dealer_ace > 0:\n dealer_sum -= 10\n dealer_ace -= 1\n dealer_ace = int(dealer_ace > 0)\n # print('dealer:', new_card)\n\n # print('dealer sum:', dealer_sum)\n if dealer_sum > 21:\n reward = 1\n else:\n if new_state['player_sum'] > dealer_sum:\n reward = 1\n elif new_state['player_sum'] < dealer_sum:\n reward = -1\n else:\n reward = 0\n # reward = int(new_state['player_sum'] > dealer_sum) - int(new_state['player_sum'] < dealer_sum)\n\n else:\n raise Exception(\"Invalid action.\")\n\n self.current_state = new_state\n\n self.reward_obs_term = (reward, self.observation(self.current_state), terminal)\n\n return self.reward_obs_term", "def runGame(self):\n for player in self.game_state.player_list:\n self.player_hand_dict[player.name] = []\n\n \"\"\" Deal the hand out starting with the player after the Dealer \"\"\"\n print \"Dealing cards...\"\n self.dealCards(self.dealer)\n self.printPlayersHands()\n self.playHand()\n dealer = (self.dealer+1)%self.num_players\n\n \"\"\" Play until the termination conditions are met \"\"\"\n if self.isGameFinished() == False:\n \"\"\" Increment dealer \"\"\"\n self.dealer = (self.dealer+1)%self.num_players\n \"\"\" Reset the game state \"\"\"\n self.game_state.newGameState()\n self.decision_list = []\n self.runGame()\n else:\n print \"\"\n print \"Game over!\"", "def _step(self):\n \n self._game.step()\n\n self._view.delete('laser')\n\n\n self.refresh_view()\n\n return not self._won", "def play(self):\n while True:\n self.round()\n if not(self.player.playing) and (self.dealer.value >= 17):\n break\n self.update_state()\n return self.end_game()", "def run_game(self):\n self.food.randomize_position(self.grid)\n while True:\n self.clock.tick(8)\n self.handle_keys()\n if not self.snake.move(self.grid):\n if self.end_game_dialog() == 0:\n end_game()\n else:\n self.score = 0\n self.grid = deepcopy(self.base_grid)\n self.snake.initialize_snake_on_grid(self.grid)\n self.food.randomize_position(self.grid)\n\n self.draw_grid()\n\n if self.snake.get_head_position() == self.food.position:\n self.snake.length += 1\n self.score += 2\n self.food.randomize_position(self.grid)\n if self.score > self.maxScore:\n self.maxScore = self.score\n\n self.screen.blit(self.surface, (0, 0))\n score = self.font.render(\"Score {0}\".format(self.score), True, (0, 0, 0))\n self.screen.blit(score, (5, 10))\n pygame.display.update()", "async def process_turn(self, game):\r\n\r\n # Check if the player is an AI\r\n if self.is_ai:\r\n\r\n # Determine the best place to go and return the location\r\n # Use a sleep function to simulate decision making\r\n await sleep(1)\r\n self.determine_best_move(game.board)\r\n return None\r\n\r\n # The player is not an AI\r\n else:\r\n\r\n # Wait for the player to react with the spot they want to go\r\n def check_reaction(reaction, user):\r\n return (\r\n reaction.message.id == game.message.id and\r\n user.id == self.member.id and\r\n str(reaction) in game.get_valid_reactions()\r\n )\r\n done, pending = await wait([\r\n game.bot.wait_for(\"reaction_add\", check = check_reaction),\r\n game.bot.wait_for(\"reaction_remove\", check = check_reaction)\r\n ], return_when = FIRST_COMPLETED)\r\n reaction, user = done.pop().result()\r\n for future in pending:\r\n future.cancel()\r\n\r\n # Check if the player wants to QUIT the ConnectFourGame\r\n if str(reaction) == QUIT:\r\n return ConnectFourPlayer.QUIT\r\n\r\n # The player does not want to quit, make their requested move\r\n else:\r\n\r\n # Check if the column is full\r\n if game.board.is_column_full(CONNECT_FOUR_REACTIONS.index(str(reaction))):\r\n return ConnectFourPlayer.COLUMN_FULL\r\n\r\n # The column is not full, let the player go there\r\n else:\r\n game.board.add_piece(CONNECT_FOUR_REACTIONS.index(str(reaction)), is_challenger = game.challenger_turn)\r\n return None" ]
[ "0.70215684", "0.6675281", "0.6568159", "0.6531183", "0.6502724", "0.6469548", "0.6381622", "0.63657147", "0.6328124", "0.6289508", "0.62504184", "0.62501085", "0.6250065", "0.6247987", "0.62212396", "0.61608124", "0.6129432", "0.6125374", "0.6121029", "0.6068085", "0.60574883", "0.60526377", "0.60351706", "0.6031425", "0.6027868", "0.60052973", "0.5989123", "0.5973123", "0.59718865", "0.5965738", "0.59519047", "0.594463", "0.5938905", "0.59289056", "0.5928803", "0.59280616", "0.59263927", "0.59245425", "0.59216547", "0.59206945", "0.59206945", "0.5914906", "0.5899594", "0.5896566", "0.5896248", "0.58888406", "0.5880607", "0.5856188", "0.58529794", "0.5847475", "0.5844857", "0.58354765", "0.58253", "0.5823212", "0.5818345", "0.5815819", "0.58038217", "0.5803334", "0.58023906", "0.579989", "0.57955396", "0.57889295", "0.57875293", "0.578572", "0.5782336", "0.57819873", "0.5770604", "0.5770604", "0.5751741", "0.5751068", "0.5749856", "0.57464933", "0.57462883", "0.5728509", "0.57253516", "0.5720432", "0.57181287", "0.5715675", "0.5712713", "0.5710709", "0.57054716", "0.5704299", "0.57033247", "0.57018423", "0.570102", "0.56919473", "0.568651", "0.5686446", "0.56786406", "0.56767136", "0.5675569", "0.56753004", "0.56732756", "0.56665564", "0.5665462", "0.5662365", "0.5660999", "0.5659716", "0.5658272", "0.5658067", "0.56550306" ]
0.0
-1
Pass the turn in the current game state to the next player. Return the color of the player, or None if the game is over.
def __get_next_turn(self, all_placed): game_over = self.is_game_over() if all_placed: if game_over: return None else: self.state.pass_turn_if_applicable() color = self.state.whose_turn().get_color() return color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTurn(self):\r\n return self.players[self.getCurrentPlayer()].getColor()", "def user_to_move_in_game(game):\n if game.finished:\n return None\n black_or_white = go.next_color(game.sgf)\n next_in_game = {go.Color.black: game.black,\n go.Color.white: game.white}[black_or_white]\n return next_in_game", "def player(self):\n return self._color", "def update_player_turn(self):\n\n if self.get_player_turn() != 'BLUE':\n\n self._player_turn = 'BLUE'\n\n else:\n\n self._player_turn = 'RED'", "def __get_whose_turn_in_history(self, time_index: int) -> chess.Color:\n\n # get player from history\n side = self.history[time_index].split(\" \")[1]\n\n if side == \"w\":\n return chess.WHITE\n elif side == \"b\":\n return chess.BLACK", "def get_opponent_color(self, mycolor):\n if mycolor == ChessGame.BLACK:\n return ChessGame.WHITE\n elif mycolor == ChessGame.WHITE:\n return ChessGame.BLACK\n else:\n raise NotImplementedError()", "def opponent(player):\n return BLACK if player is WHITE else WHITE", "def get_opponent_color(self, self_color):\r\n return abs(self_color - 1)", "def getCurrentColor(self):\n if self.__currentnode__ is None:\n return None\n else:\n return self.__currentnode__.getPlayer().getColor()", "def next_turn(self):\n if self.turn == BLUE and self.ai:\n self.ai_turn = True\n self.turn = RED\n elif self.turn == BLUE:\n self.turn = RED\n else:\n self.turn = BLUE\n\n self.selected_piece = None\n self.selected_legal_moves = []\n self.check_game_over()", "def opponent(self, player):\r\n # player = core.BLACK (can do this for any static var)\r\n if player == core.BLACK:\r\n return core.WHITE\r\n else:\r\n return core.BLACK", "def switch_player(self):\n if self.playerOne:\n # sets the chip color to blue\n self.red = 0\n self.blue = 255\n # switch the player to player 2 and change the caption\n self.playerOne = False\n pygame.display.set_caption('Connect4 - Player 2')\n else:\n # sets the chip color to red\n self.red = 250\n self.blue = 0\n # switch the player to player 1 and change the caption\n self.playerOne = True\n pygame.display.set_caption('Connect4 - Player 1')", "def best_move(self, state, curr_player):\n\t\t# determine opponent's color\n\t\tif curr_player == self.colors[0]:\n\t\t\topp_player = self.colors[1]\n\t\telse:\n\t\t\topp_player = self.colors[0]\n\n\t\treturn self.value(state, curr_player)", "def handle_game_start(self, color, board):\n\n\n self.color = color\n self.current_board = board\n pass", "def draw_colored_player(self, id):\n if id == self.id:\n pygame.draw.rect(self.screen, self.color_1, pygame.Rect(self.first_player_x, self.first_player_y, 20, 140))\n else:\n pygame.draw.rect(self.screen, self.color_2, pygame.Rect(self.second_player_x, self.second_player_y, 20, 140))\n return", "def is_a_player_in_check(self, color):\n\n current_player = color\n if current_player == 'red':\n opposing_player = 'blue'\n else:\n opposing_player = 'red'\n\n if self.can_checkmate(opposing_player):\n return self.get_general(current_player)\n\n if self.can_checkmate(current_player):\n return self.get_general(opposing_player)\n\n return False", "def get_state(self):\n return np.append(self.game.game_board.get_board(),\n [self.game.player_1.color, self.game.player_2.color])[None, :]", "def has_winner(self):\n if self.color_check_mate(ChessGame.BLACK):\n return ChessGame.WHITE\n elif self.color_check_mate(ChessGame.WHITE):\n return ChessGame.BLACK\n else:\n return None", "def get_color(rank):\n if rank == 1:\n color = int(0xffd700)\n elif rank == 2:\n color = int(0xc0c0c0)\n elif rank == 3:\n color = int(0xcd7f32)\n else:\n color = random.randint(1, 16777215)\n\n return discord.Color(color)", "def _next_turn(self):\n return self.TURNS[self._turn is self.BLACK]", "def process_color(self, color):\n self.controller.game.receive_color(color)\n self.parent.parent.update_stat_frame()\n self.parent.parent.update_table_frame()\n self.parent.parent.end_turn()", "def GoTo(self):\n if self.state == 'normal':\n return self.backUser()\n \n print(r\"\"\"Please enter a specific color to reach the desired room:\n\n - blue -> entrance\n - red -> closet\n - green -> living room\n - yellow -> kitchen\n - magenta -> bathroom\n - black -> bedroom\n \"\"\")\n\n color = raw_input('Color: ')\n if color in self.color:\n self.msg_play.play = False\n self.msg_play.color = color\n self.play_pub.publish(self.msg_play)\n rospy.loginfo(\"color sent\")\n self.state = ''\n else:\n print('Command Unknown') \n return self.GoTo()", "def isMyTurn(self, gameID, playerID):\n if gameID in self.games:\n match = self.games[gameID]\n myColor = match.getColorOfPlayer(playerID)\n if myColor is None:\n return (False, {\"error\": \"Not a player in the game\"})\n else:\n return (True, {\"isMyTurn\": match.whoseTurn() == myColor})\n else:\n return (False, {\"error\": \"Invalid game ID\"})", "def next_player(self):\n if self.player1.turn_status == 1:\n self.player1.turn_status = 0\n self.turn(self.player2)\n else:\n self.player2.turn_status = 0\n self.turn(self.player1)", "def check_game_over(self):\n red, blue = self.board.count_piece()\n if blue == 0:\n self.ui.show_result(\"RED WIN!\")\n self.turn = RED\n elif red == 0:\n self.ui.show_result(\"BLUE WIN!\")\n self.turn = BLUE\n elif red == blue == 1:\n self.ui.show_result(\"DRAW!\")", "def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"", "def select_player(self, player, color):\n module_globals = {}\n execfile(player, module_globals)\n return module_globals[module_globals.keys()[len(module_globals.keys()) - 1]](color)", "def piece_color(self, piece):\n if piece == None:\n return None\n if ord(ChessPiece.W_KING) <= ord(piece) <= ord(ChessPiece.W_PAWN):\n return \"white\"\n return \"black\"", "def play_single_turn(self, action=None):\n self.turn_count += 1\n if self.save_images_suffix:\n self.game_board.graphical_output(save=True, display=False,\n image_suffix=f'{self.save_images_suffix}_{self.turn_count}')\n if self.game_type == self.game_types['human']:\n self.game_board.graphical_output()\n\n self.player_1.play_turn(action if action else self.get_color_options())\n self.player_2.play_turn(self.get_color_options())\n\n if self.game_type == self.game_types['vs_ai']:\n self.game_board.graphical_output(save=True, image_suffix=self.turn_count)\n\n if self.game_type != self.game_types['r_l']:\n print(f\"player 1 played {self.player_1.color}: {self.player_1.score}\")\n print(f\"player 2 played {self.player_2.color}: {self.player_2.score}\")\n print()", "def take_comp_turn(self, deck, pile):\n matches = [card for card in self.hand if card.is_match(pile.top_card() != 0)]\n if len(matches) > 0: # can play\n choice = random.randrange(len(matches))\n self.play_card(matches[choice-1], pile)\n if matches[choice - 1].kind == 'wild' or matches[choice - 1].kind == 'wild4':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n matches[choice - 1].color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n print(str(self.name) + \" played \" + str(matches[choice-1]))\n\n else: # comp can't play\n # check if deck is empty -- if so, reset it\n if deck.is_empty():\n deck.reset_deck(pile)\n # draw a new card from the deck\n newcard = self.draw_card(deck)\n print(\"The computer drew: \" + str(newcard))\n if newcard.is_match(pile.top_card()): # can be played\n self.play_card(newcard, pile)\n if newcard.kind == 'wild':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n newcard.color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n else: # still can't play\n print(\"Sorry, you still can't play.\")\n print(str(self.name) + \" played \" + str(newcard))\n return", "def finish_turn(game_ID):\n state = get_state(game_ID)[\"playerState\"]\n\n if state[\"redPoints\"] == NUM_RED_WORDS:\n return set_winner(game_ID, \"red\")\n elif state[\"bluePoints\"] == NUM_BLUE_WORDS:\n return set_winner(game_ID, \"blue\")\n\n if state[\"attemptsLeft\"] == 0 and state[\"action\"] == \"chooser\":\n r.hset(\n \"state:\" + game_ID,\n mapping={\"turn\": opposite(state[\"turn\"]), \"action\": \"spymaster\"},\n )", "def updatePlayer(self, _player):\n if _player.color == 'black': self.players['black'] = _player\n else: self.players['white'] = _player", "def print_turn(board: Connect4Board) -> None:\r\n\r\n if board.get_player_turn() == board.get_red():\r\n print('\\nRED\\'s Turn.')\r\n else:\r\n print('\\nYELLOW\\'s Turn.')", "def next_color(self):\n if self._color_cycle is None:\n return self._theme.color\n return next(self._color_cycle)['color']", "def get_current_player(player_one_turn):\n \n # Get appropriate player whether the parameter is True or False\n if player_one_turn == True:\n return 'Player One'\n return 'Player Two'", "def player_play(self, color, x, y):\r\n self.tr.bd.disks[x][y].color,\r\n self.tr.bd.disks[x][y].display_on = color, True\r\n self.tr.bd.disks[x][y].chain()\r\n self.tr.board_scan_reset()\r\n # Checks for computer move, if none, then checks for another move\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n # If none, ends game.\r\n else:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount", "def player(self, state, current_player):\r\n\r\n new_piece, player = self.new_or_old_piece(state)\r\n\r\n if new_piece:\r\n return player\r\n else:\r\n return current_player", "def _advance_turn(self):\n\n self.__turn_info['turn'] = ChessGame.BLACK if\n self.__turn_info['turn'] == ChessGame.WHITE else ChessGame.WHITE", "def next_round(self):\n if self.finish_game == 3:\n self.restart_game()\n return\n\n atual_color = self.atual_player.color\n if self.board.valid_moves(atual_color).__len__() > 0:\n self.board.play(self.atual_player.play(self.board.get_clone()), atual_color)\n self.view.atualizar_discos()\n self.finish_game = 0\n else:\n self.finish_game += 1\n self.atual_player = self._opponent(self.atual_player)\n\n self.view.atualizar_jogador_atual(self.atual_player.color)\n\n if self.finish_game == 2:\n self._end_game()", "def getInitialPlayer(self):\n return self.__colordict__[self.__initial_color__]", "def play_game(self):\n while True:\n\n for player in self.players:\n print(self.board)\n print(\"Your turn player {}\".format(player))\n\n self.play_turn_for_player(player)\n\n if self.board.is_draw():\n print(\"Its a draw!\")\n return \"draw\"\n\n elif self.board.is_victory(player.icon):\n print(self.board)\n print(\"{} Wins! Congrats!\".format(player.icon))\n return player.name", "def currentScore(self, playerColor):\n total = 0\n for col in range(0, 8):\n for row in range(0, 8):\n if self.board[col][row].color == playerColor:\n total+=1\n return total", "def getCurrentString(self):\r\n if self.getTurn() == RED:\r\n return 'red'\r\n return 'white'", "def genmove(self, color, game) -> Move:\n # print(color)\n # print(game.play_history)\n # print(self.mc.states)\n if not len(game.play_history) == (len(self.mc.states) - 1):\n # Last play not yet in our states:\n last_player, last_move = game.play_history[-1]\n # pprint(game.play_history)\n # print(last_player, last_move)\n missing_state = self.mc.board.next_state(\n self.mc.states[-1], last_move)\n self.mc.update(missing_state)\n\n # print('Current board in our mc:')\n # _b = self.mc.states[-1][0]\n # _b = self.mc.board.from_tuple(_b)\n # print(_b)\n\n move = self.mc.get_play()\n\n # Update our saved states\n resulting_state = self.mc.board.next_state(\n self.mc.states[-1], move)\n self.mc.update(resulting_state)\n\n return move", "def recognize_color(self):\n x = (self.x + DIRECTIONS[(self.facing_direction - self.config) % 8][0]) % (self.image.shape[0] - 1)\n y = (self.y + DIRECTIONS[(self.facing_direction - self.config) % 8][1]) % (self.image.shape[1] - 1)\n color_left = self.image[x, y]\n if abs(self.luminance(color_left) - self.luminance_fcolor) <= self.lum_threshold:\n return self.turn_left\n x = (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1)\n y = (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)\n color_forward = self.image[x, y]\n if abs(self.luminance(color_forward) - self.luminance_fcolor) <= self.lum_threshold:\n return self.move_forward\n x = (self.x + DIRECTIONS[(self.facing_direction + self.config) % 8][0]) % (self.image.shape[0] - 1)\n y = (self.y + DIRECTIONS[(self.facing_direction + self.config) % 8][1]) % (self.image.shape[1] - 1)\n color_right = self.image[x, y]\n if abs(self.luminance(color_right) - self.luminance_fcolor) <= self.lum_threshold:\n return self.turn_right\n return None", "def play_against_random(self, color, game_count):\n\n q_player = tournament.QNetPlayer(self.target_network)\n random_player = tournament.RandomPlayer()\n score = tournament.play_one_color(game_count, q_player, color, random_player)\n return score", "def run_turn(self):\n\n all_placed = self.state.all_avatars_placed()\n color = self.__get_next_turn(all_placed)\n if color is None:\n return\n\n if not all_placed:\n # placement round\n func = self.players[color].make_placement\n else:\n # movement round\n func = self.players[color].make_move\n\n queue = Queue()\n thread = Thread(target=self.__player_thread, args=[func, deepcopy(self.state), queue])\n thread.daemon = True\n thread.start()\n thread.join(self.timeout)\n if thread.is_alive():\n #print(\"The \" + str(color) + \" player timed out and will be removed.\")\n self.__remove_player(color)\n return\n\n action = queue.get()\n if action == None:\n #print(\"The \" + str(color) + \" player crashed and will be removed.\")\n self.__remove_player(color)\n return\n\n if not all_placed:\n if self.state.valid_placement(action, color):\n self.state.place_avatar(action, color)\n else:\n #print(\"The \" + str(color) + \" player has attempted an invalid placement and will be removed.\")\n self.__remove_player(color)\n else:\n if self.state.valid_move(*action):\n self.state.move_avatar(*action)\n else:\n #print(\"The \" + str(color) + \" player has attempted an invalid move and will be removed.\")\n self.__remove_player(color)", "def next_color():\n global _color\n char = BG_COLORS[_color]\n _color = (_color + 1) % 8\n return char", "def play(self, black_strategy, white_strategy):\n self.restart()\n strategy = lambda who: black_strategy if who == BLACK else white_strategy\n while self.__player is not None:\n move = Othello.get_move(strategy(self.__player), self.__player, self.__board)\n Othello.make_move(move, self.__player, self.__board)\n self.__player = Othello.next_player(self.__board, self.__player)\n return self.__board, Othello.score(BLACK, self.__board)", "def playing(player, grid):\n\n\t# Determine the current player and define the colors to use to fill the spots of the grid he chose\n\tif player == Data.current_player['Activator']:\n\t\tcase = colors.GREEN + '[A]' + colors.STOP\n\t\tno_case = colors.RED + '[I]' + colors.STOP\n\t\tc_player = colors.GREEN + player + colors.STOP\n\t\tprint('Joueur actuel : ' + colors.GREEN + player + colors.STOP)\n\n\telse:\n\t\tcase = colors.RED + '[I]' + colors.STOP\n\t\tno_case = colors.GREEN + '[A]' + colors.STOP\n\t\tc_player = colors.RED + player + colors.STOP\n\t\tprint('Joueur actuel : ' + colors.RED + player + colors.STOP)\n\n\tend = check_end(case, no_case, grid)\n\n\tif end == False:\n\t\tcoordXY, grid, taken_cases = check_position(c_player, case, no_case, grid)\n\n\t\t# Modifies grid with the informations given by the player\n\t\tgrid[coordXY] = case\n\t\tupdate_grid(grid, taken_cases, case)\n\n\treturn grid, end", "def getHealthColor(player):\r\n \r\n if player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 25):\r\n hpcolor = LRED\r\n elif player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 50):\r\n hpcolor = YELLOW\r\n elif player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 75):\r\n hpcolor = LGREEN\r\n elif player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 85):\r\n hpcolor = WHITE\r\n elif player.stats[HP] < ((float(player.stats[MAXHP]) / 100) * 95):\r\n hpcolor = WHITE\r\n else:\r\n hpcolor = WHITE\r\n \r\n # If player.hp is higher than maxhp, make it blue (only a buff can do this)\r\n if player.stats[HP] > player.stats[MAXHP]:\r\n hpcolor = BLUE \r\n \r\n return hpcolor", "def board_status(board):\n if terminal(board):\n victor = winner(board)\n if victor is not None:\n emit(\"game_over\", \"winner: \" + victor)\n else:\n emit(\"game_over\", \"Draw\")", "def get_trump_color(self):\n color_counter = collections.Counter()\n for card in self.hand:\n color = card.color\n if color == \"White\":\n continue\n color_counter[color] += 1\n if not color_counter.most_common(1):\n return super().get_trump_color()\n else:\n return color_counter.most_common(1)[0][0]", "def get_color(self, coord):\n return self.board[coord[0], coord[1]]", "def play_against_minimax(self, color, game_count):\n\n q_player = tournament.QNetPlayer(self.network)\n minimax_player = tournament.MinimaxPlayer()\n score = tournament.play_one_color(game_count, q_player, color, minimax_player)\n return score", "def getState(self, playerID, gameID):\n\n if gameID in self.games:\n g = self.games[gameID]\n\n # Determine which player the client is\n youAreColor = g.getColorOfPlayer(playerID)\n if youAreColor is None:\n return (False, {\"error\": \"You are not a player in this game\"})\n\n # Serialize layout and history\n serialLayout = TournamentSystem.__getState_serializeLayout(g.board)\n serialHst = TournamentSystem.__getState_serializeHistory(g.history)\n\n board = {\"layout\": serialLayout,\n \"enPassantFlags\": g.board.flag_enpassant,\n \"canCastleFlags\": g.board.flag_canCastle}\n\n return (True, {\"youAreColor\": youAreColor,\n \"isWhitesTurn\": (g.whoseTurn() == ChessBoard.WHITE),\n \"board\": board,\n \"history\": serialHst})\n else:\n return (False, {\"error\": \"Invalid game ID\"})", "def transition(board, player, action):\n if _ENV.is_valid((board, player), action):\n new_board, __ = _ENV.get_next_state((board, player), action)\n return new_board\n return None", "def has_winner(self):\n\n if self.num_black_pieces == 0 or len(self.get_all_valid_moves(Player.black)) == 0:\n return Player.white\n elif self.num_white_pieces == 0 or len(self.get_all_valid_moves(Player.white)) == 0:\n return Player.black\n elif self.repetition_happened() or self.passive_game():\n return \"Tie\"\n else:\n return None", "def set_game_state(self,winner):\r\n if winner == 'b':\r\n self._game_state = \"BLACK_WON\"\r\n else:\r\n self._game_state = \"RED_WON\"", "def game_over(screen, black, red, brown, player):\r\n screen.fill(black)\r\n\r\n f1 = pygame.font.Font(None, 102)\r\n s1 = f1.render(\"GAME OVER\", True, red)\r\n screen.blit(s1, [100, 100])\r\n\r\n f2 = pygame.font.Font(None, 70)\r\n s2 = f2.render(\"score: %d\" % player.score, True, brown)\r\n screen.blit(s2, [200, 240])\r\n\r\n pygame.display.update()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()", "def apply_laser(self, color):\n if self.winner is not None:\n raise RuntimeError(\"Game is already complete \" + str(self.winner.value) + \" won\")\n results = {}\n path, piece = self._apply_laser(color)\n\n if piece is not None:\n results[\"destroyed\"] = piece\n if piece.type is PieceType.pharaoh:\n self.winner = TeamColor.opposite_color(piece.color)\n results[\"winner\"] = self.winner\n\n results[\"path\"] = path\n return results", "def get_card_color(team_cards, game_dict, team):\n\n # Loop through cards by team.\n for j, card in enumerate(team_cards):\n # Loop through all cards of respective team.\n try:\n # Get player name and minute of card.\n player = card.a.text\n minute = card.find(\"span\", {\"class\": \"klammerzahl\"}).text[:-1]\n\n game_dict[\"{}_card_plyr_{}\".format(team, j)] = player\n game_dict[\"{}_card_min_{}\".format(team, j)] = int(minute)\n\n # Scrape card colors and count cards.\n if team_cards[j].div[\"style\"] == \"color:#FBDB04;\":\n game_dict[\"{}_card_clr_{}\".format(team, j)] = 1 # yellow = 1\n game_dict[\"{}_card_yllw\".format(team)] += 1\n\n elif team_cards[j].div[\"style\"] == \"color:#D7110C;\":\n game_dict[\"{}_card_clr_{}\".format(team, j)] = 2 # red = 2\n game_dict[\"{}_card_red\".format(team)] += 1\n\n elif team_cards[j].div[\"class\"][0] == \"icon_gelbrot\":\n game_dict[\"{}_card_clr_{}\".format(team, j)] = 3 # yellow/red=3\n # Counted as two yellow.\n game_dict[\"{}_card_yllw\".format(team)] += 2\n\n else:\n game_dict[\"{}_card_clr_{}\".format(team, j)] = np.nan\n\n except AttributeError:\n pass\n\n return game_dict", "def get_player_colors() -> List[Tuple[float, float, float]]:\n return PLAYER_COLORS", "def get_player_turn(self):\r\n return self._player_turn", "def get_color(self):\n return self.color", "def game():\n color_list = createColors() #Calls function to create 2D list of colors for game window\n colorPick = createPickColor() #Calls function to create list for colors to pick\n gameWindow = createGameWindow() #Calls function to create game window\n colorWindow = createColorWindow() #Calls funkcion to create color window\n figurelist = createGame(color_list) #Calls function to create 2D list with positions\n\n drawGame(gameWindow, figurelist) #Calls function to draws the game on game window\n drawColorPick(colorPick,colorWindow) #Calls function to draw window to pick colors\n\n rounds = 1 #Sets round to 1 for the first round\n updateWindow(gameWindow, figurelist, rounds) #Update window to ensure it is show right\n\n playing = True #Bool that is true whiles game is running\n\n lastColor = \"\" #String to remember what color was last picked \n\n while playing: #Loop that is the game\n\n colorToSet = pickColor(colorWindow, colorPick) #Gets color from color pick window\n if ((colorToSet != lastColor) and colorToSet != (color_list[0][0])):\n #Checks if the picked color is the same as picked last round and if it is the same as on position [0][0] to avoid picking the same on round 1\n floodFill(0, 0, color_list[0][0], colorToSet, color_list) #Calls floodfill function to update list of colors\n rounds = rounds + 1\n updateFigure(figurelist,color_list) #Update figure list with new color list\n updateWindow(gameWindow, figurelist, rounds) #Update window with new list\n \n if checkWin(color_list): #Calls function to see if game is won\n playing = False\n #Save score to a new list with the old scores score is 100 devided by rounds/2 and rounded to 2 decimals\n new_list = add_score(import_list(), get_new_score_name(), round(100/(rounds/2), 3))\n gameWindow.getMouse() #waits for click and then closes al windows\n gameWindow.close()\n colorWindow.close()\n return new_list #Returns new list to save\n\n lastColor = colorToSet #Change last color\n\n gameWindow.getMouse() #To avoid unintended skips", "def colour_press(self):\n global last_button\n if last_button is None:\n # If there is no \"last button press\", set this as the latest one\n last_button = self\n else:\n # Another button has been pressed before. Switch the colours of the two\n last_button.background_color, self.background_color = self.background_color, last_button.background_color\n # Set their states back to normal and reset the last button pressed\n last_button.state = 'normal'\n self.state = 'normal'\n last_button = None\n # Check if the switch removed any blocks\n points = self.screen.check_removal()\n if points == 0:\n # If nothing has been removed, the player gets one step closer to losing\n self.screen.misses += 1\n else:\n # Give the player the points\n self.screen.points += points\n if self.screen.misses > 3:\n # Player has lost, leave the game\n self.screen.leave()", "def next_player(board, prev_player):\n opp = Othello.opponent(prev_player)\n if Othello.any_legal_move(opp, board):\n return opp\n elif Othello.any_legal_move(prev_player, board):\n return prev_player\n return None", "def make_move(self, move_to_play, color_to_move, return_capture=False):\r\n captures = 0\r\n if move_to_play == 'PASS':\r\n board_copy = Board(self.state, self.previous_state, self.to_move)\r\n if self.to_move == 1:\r\n board_copy.to_move = 2\r\n else:\r\n board_copy.to_move = 1\r\n if return_capture:\r\n return board_copy, captures\r\n else:\r\n return board_copy\r\n\r\n current_state = np.array(self.state)\r\n ptemp_state = np.array(current_state)\r\n\r\n for p in ORTHOGONAL_POSITIONS[move_to_play]:\r\n if self.board[p[0]][p[1]].chain_liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += len(self.chains[(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color)])\r\n current_state = self.remove_chain(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color,\r\n current_state)\r\n\r\n elif self.board[p[0]][p[1]].liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += 1\r\n current_state[p[0]][p[1]] = 0\r\n\r\n current_state[move_to_play[0]][move_to_play[1]] = color_to_move\r\n if color_to_move == 1:\r\n temp_board = Board(current_state, ptemp_state, 2)\r\n else:\r\n temp_board = Board(current_state, ptemp_state, 1)\r\n if return_capture:\r\n return temp_board, captures\r\n else:\r\n return temp_board", "def update(self, opponent_action, player_action):\n\n if self.colour == 'upper':\n self.game_in_head.update(player_action, opponent_action)\n else:\n self.game_in_head.update(opponent_action, player_action)", "def get_next_turn(game):\n return game['next_turn']", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def play(self,position):\n (x, y) = position\n if self.board[x][y] != 0:\n print('Error, ' + str(x) + ',' + str(y) + ' is not a possible state')\n return\n else:\n self.board[x][y] = self.color\n self.color = self.color * -1\n (terminated, winner) = self.judge_terminal()\n if terminated:\n self.ended = True\n self.winner = winner\n return winner", "def change_color():\n return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)", "async def process_turn(self, game):\r\n\r\n # Check if the player is an AI\r\n if self.is_ai:\r\n\r\n # Determine the best place to go and return the location\r\n # Use a sleep function to simulate decision making\r\n await sleep(1)\r\n self.determine_best_move(game.board)\r\n return None\r\n\r\n # The player is not an AI\r\n else:\r\n\r\n # Wait for the player to react with the spot they want to go\r\n def check_reaction(reaction, user):\r\n return (\r\n reaction.message.id == game.message.id and\r\n user.id == self.member.id and\r\n str(reaction) in game.get_valid_reactions()\r\n )\r\n done, pending = await wait([\r\n game.bot.wait_for(\"reaction_add\", check = check_reaction),\r\n game.bot.wait_for(\"reaction_remove\", check = check_reaction)\r\n ], return_when = FIRST_COMPLETED)\r\n reaction, user = done.pop().result()\r\n for future in pending:\r\n future.cancel()\r\n\r\n # Check if the player wants to QUIT the ConnectFourGame\r\n if str(reaction) == QUIT:\r\n return ConnectFourPlayer.QUIT\r\n\r\n # The player does not want to quit, make their requested move\r\n else:\r\n\r\n # Check if the column is full\r\n if game.board.is_column_full(CONNECT_FOUR_REACTIONS.index(str(reaction))):\r\n return ConnectFourPlayer.COLUMN_FULL\r\n\r\n # The column is not full, let the player go there\r\n else:\r\n game.board.add_piece(CONNECT_FOUR_REACTIONS.index(str(reaction)), is_challenger = game.challenger_turn)\r\n return None", "def play_turn(self, cur_board):\n pass", "def playGame(self, printReward=False):\n\n self.game.resetGame()\n\n redTotal, blackTotal, redMoves, blackMoves = 0, 0, 0, 0\n\n # play the game until it's over\n while self.game.win == E_PLAYING:\n turn = self.game.redTurn\n reward = self.playGameMove(printReward)\n if reward is None:\n break\n else:\n if turn:\n redTotal += reward\n redMoves += 1\n else:\n blackTotal += reward\n blackMoves += 1\n\n return redTotal, blackTotal, redMoves, blackMoves", "def current_state(self):\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n for i in range(8):\n for j in range(8):\n if self.board_value[i][j]==self.current_player:\n square_state[0][i][j]=1\n elif self.board_value[i][j]!=self.current_player and self.board_value[i][j]!= 0:\n square_state[1][i][j]=1\n # indicate the last move location\n square_state[2][self.last_move // self.width, self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def player(board):\n #X ALWAYS gets first move, alternates with each additional move\n curr_moves = actions(board)\n if (board == initial_state()):\n return X\n if(len(curr_moves) % 2 == 0):\n return O\n else:\n return X", "def __getColors(self):\n colors = {\"leftSideHighColor\" : \"\", \"leftSideDownColor\" : \"\",\\\n \"rightSideHighColor\" : \"\", \"rightSideDownColor\" : \"\"}\n for team, nestedDict in self.playerPositions.items():\n for player, position in nestedDict.items():\n if 1 == position:\n colors[\"leftSideHighColor\"] = self.playerColors[team][player]\n elif 2 == position:\n colors[\"leftSideDownColor\"] = self.playerColors[team][player]\n elif 3 == position:\n colors[\"rightSideDownColor\"] = self.playerColors[team][player]\n elif 4 == position:\n colors[\"rightSideHighColor\"] = self.playerColors[team][player]\n for key, color in colors.items():\n colors[key] = color.capitalize()\n return colors", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def _turn(self, next_player=False):\n\n # Get the player for the current turn\n player = self._players.get_current_player(\n ) if not next_player else self._players.get_next_player()\n\n # Reset the _active_turn attribute to True\n self._active_turn = True\n\n # Let the players know who's turn it is\n print(\"\\n{}, it's your turn. Your current score is {}\".format(\n player.get_name(), player.get_total_score()))\n\n # Keep the current player's turn until they roll a 1,\n # win the game, or hold.\n while self._active_turn and not self._end_game:\n self._play(player)\n\n # Check to see if the game is over, if not go to the next player,\n # otherwise call the protected _game_over function to trigger the\n # leaderboard display\n if not self._end_game:\n self._turn(True)\n else:\n self._accounce_winner()\n self._game_over()", "def _get_color(self, r, g, b):\n clr = (r, g, b)\n return clr", "def return_color(self,tank):\n self.color_queue.append(tank.color)", "def winner(self, board):\n if self.any_legal_move(BLACK, board) or self.any_legal_move(WHITE,board):\n return None\n scoreBlack = self.score(BLACK, board)\n scoreWhite = self.score(WHITE, board)\n if scoreBlack > scoreWhite: return PLAYERS[BLACK]\n elif scoreBlack < scoreWhite: return PLAYERS[WHITE]\n else: return TIE", "def drive_to_color(self, color):\n while not self.color_sensor.color == color:\n self.right_motor.run_forever(speed_sp=150)\n self.left_motor.run_forever(speed_sp=150)\n self.right_motor.stop(stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.left_motor.stop(stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n if color == ev3.ColorSensor.COLOR_RED:\n ev3.Sound.play(\"/home/robot/csse120/assets/sounds/Enter_Sandman.wav\").wait()\n elif color == ev3.ColorSensor.COLOR_BLUE:\n ev3.Sound.play(\"/home/robot/csse120/assets/sounds/Luke_Bryan_-_That_s_My_Kind_Of_Night_with_Lyrics_.wav\").wait()\n elif color == ev3.ColorSensor.COLOR_BLACK:\n ev3.Sound.play(\"/home/robot/csse120/assets/sounds/Semi-Charmed_Life_1_.wav\").wait()", "def _switch_turn(self, cur_player):\n if cur_player == \"W\":\n self._turn = \"B\"\n else:\n self._turn = \"W\"", "def color(piece):\n return Color.BLACK if piece in {Piece.BP, Piece.BN, Piece.BB, Piece.BR, Piece.BQ, Piece.BK} else Color.WHITE", "def get_move(self, board, color_to_play):\n move = self.MCTS.get_move(board, color_to_play, self.n_simualtions_per_move, self.exploration)\n self.update(move)\n return move", "def __init__(self, action, pre_state, action_player=2, next_player=1, has_color=True):\n\n self.action = action\n # Initial state\n if pre_state is None:\n self.action_player = action_player\n self.player = next_player\n self.available_moves = set()\n for x in range(1, 16):\n for y in range(1, 16):\n if (x, y) != action:\n self.available_moves.add((x, y))\n self.occupied = {}\n if next_player == 1:\n self.occupied[action] = 2\n else:\n self.occupied[action] = 1\n self.top = action[0]\n self.bottom = action[0]\n self.left = action[1]\n self.right = action[1]\n global use_color\n use_color = has_color\n else:\n self.action_player = pre_state.player\n if pre_state.player == 1:\n self.player = 2\n else:\n self.player = 1\n self.available_moves = set(pre_state.available_moves)\n self.available_moves.remove(action)\n self.occupied = dict(pre_state.occupied)\n self.occupied[action] = pre_state.player\n # Set the most top, bottom, left, and right index for the state.\n if action[0] < pre_state.top:\n self.top = action[0]\n else:\n self.top = pre_state.top\n if action[0] > pre_state.bottom:\n self.bottom = action[0]\n else:\n self.bottom = pre_state.bottom\n if action[1] < pre_state.left:\n self.left = action[1]\n else:\n self.left = pre_state.left\n if action[1] > pre_state.right:\n self.right = action[1]\n else:\n self.right = pre_state.right\n self.pre_state = pre_state\n if self.action_player == 1:\n self.value = evaluate_state(self)\n else:\n self.value = -evaluate_state(self)", "def __set_colors(self, players):\n\n colors = set()\n for p in players:\n if p.get_color() is None:\n continue\n colors.add(p.get_color())\n if len(colors) != 0 and len(colors) != len(players):\n raise ValueError(\"Each player does not have a unique assigned color.\")\n \n if len(colors) == 0:\n for i, p in enumerate(players):\n p.set_color(BoardPlayer.POSSIBLE_COLORS[i])", "def color_in_check(self, mycolor):\n\n opponent = self.__players[self.get_opponent_color(mycolor)]\n\n x, y = None, None\n for (u, v) in self.__players[mycolor]:\n piece = self.get_piece(u, v)\n if not piece:\n raise ValueError()\n\n if self.get_piece(u, v).name == 'king':\n x, y = u, v\n break\n\n for (u, v) in opponent:\n if (x, y) in self._get_piece_moves(u, v):\n return True\n\n return False", "def compute_utility(board, color):\n player1_score = 0\n player2_score = 0\n\n score = get_score(board)\n if color == 1:\n return score[0] - score[1]\n else:\n return score[1] - score[0]", "def end_game(self,player,color):\n black = (0, 0, 0)\n font = pygame.font.Font(os.path.join(os.path.dirname(os.path.realpath(__file__)),'TRON.TTF'), 25)\n label1= font.render(player + \" WINS!\", 1, color)\n label2 = font.render(\"Press Space to Restart\", 1, (255,255,255))\n self.screen.fill(black)\n self.screen.blit(label1,(185,100))\n self.screen.blit(label2,(43,200))\n pygame.display.flip()\n self.game_over = True\n for player in self.players:\n player.dir = \"None\"", "def declare_winner(board):\n results = count_chips(board, 0), count_chips(board, 1)\n winner = '¡El color {} ha ganado la partida!'\n for i in range(2):\n if results[i] > results[i - 1]:\n print(winner.format(PLAYER_COLORS[i]) + '\\n')\n if results[0] == results[1]:\n print('¡Empate!\\n')\n print('Puntajes:')\n for i in range(2):\n print('{}: {} punto(s)'.format(PLAYER_COLORS[i].title(), results[i]))", "def get_color(self):\n\n return self.color" ]
[ "0.7333892", "0.70925194", "0.6907869", "0.68132323", "0.6679578", "0.6647104", "0.6575999", "0.65741646", "0.6515778", "0.64863414", "0.6473067", "0.63014096", "0.6287772", "0.62448686", "0.6174598", "0.6149186", "0.61185074", "0.598921", "0.592801", "0.589623", "0.58790624", "0.5865871", "0.58650553", "0.58319265", "0.583045", "0.5825556", "0.58138573", "0.5812738", "0.58040273", "0.5789422", "0.5782856", "0.57542014", "0.5747689", "0.57298523", "0.5729775", "0.5727155", "0.5715833", "0.56920207", "0.56915355", "0.5685202", "0.5679548", "0.5645373", "0.56344926", "0.56294584", "0.56291467", "0.5598268", "0.5582803", "0.5579099", "0.55603933", "0.5543831", "0.55394304", "0.55145824", "0.5504981", "0.54955316", "0.5480497", "0.5467557", "0.54500675", "0.54496837", "0.54485744", "0.54456806", "0.5430107", "0.5424946", "0.5411937", "0.5384906", "0.5374669", "0.537067", "0.536988", "0.5364708", "0.5358063", "0.5353092", "0.5348206", "0.5341451", "0.5341451", "0.5341451", "0.5341451", "0.53371656", "0.53303474", "0.53301036", "0.5329029", "0.53267545", "0.5324234", "0.5321171", "0.53180397", "0.5304525", "0.5304525", "0.53030044", "0.53027916", "0.52999794", "0.5297854", "0.52941823", "0.5293575", "0.529072", "0.5288927", "0.5272074", "0.5268481", "0.52670974", "0.52613693", "0.52611995", "0.52466005", "0.5242829" ]
0.69213134
2
Begin the game of Fish. The referee will begin the game and start taking moves from players. The game will continue until it has ended. Players who attempt invalid placements or movements, take too long to respond, or otherwise crash will be ejected from the game and their penguins will be removed.
def run(self): while True: if self.is_game_over(): break self.run_turn()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "def start_game(self):\n while self.can_deal:\n self.take_turn()", "def start(self):\n self.save_checkpoint(\"setup\")\n\n logging.info(\"Starting game...\")\n body = render_message(\n \"welcome.html\",\n game_name=self.name,\n night_end=self.night_end.strftime(\"%I:%M %p\"),\n day_end=self.day_end.strftime(\"%I:%M %p\"),\n players=self.game.players,\n )\n self.send_message(mafia.events.PUBLIC, \"%s: Start\" % self.name, body)\n self.game.begin()\n self.started = True\n\n self.save_checkpoint(\"start\")", "def draw_fish(f: Fish):\n x0, y0 = pos()\n h0 = heading()\n fillcolor(f.color)\n begin_fill()\n left(30)\n for _ in range(3):\n left(120)\n forward(0.4 * f.size)\n right(120)\n circle(0.5 * f.size)\n end_fill()\n jump_rel(0, 0.70 * f.size)\n fillcolor('white')\n begin_fill()\n circle(0.15 * f.size)\n end_fill()\n jump_to(x0, y0)\n setheading(h0)", "def start_game(self):\n p1_move = True\n is_all_moves_over = False\n while not is_all_moves_over:\n\n while p1_move and not is_all_moves_over:\n p1 = int(input(\"Player 1 pos:\"))\n is_all_moves_over, p1_move = self.play('p1', p1, p1_move)\n\n while not p1_move and not is_all_moves_over:\n p2 = int(input(\"Player 2 pos:\"))\n is_all_moves_over, p1_move = self.play('p2', p2, p1_move)\n\n print(\"Game Ended in Draw\")", "def start(self):\n running = True\n while running:\n k=self.Game.playgame()\n if k=='Exit':\n running = False\n continue\n elif k=='resume':\n continue\n elif k=='GameOver':\n o=self.gameover()\n if o=='newgame':\n self.Game=Game(self.Display)\n else:\n running = False\n while k=='Won':\n o=self.won()\n if o=='newgame':\n self.Game=Game(self.Display)\n break\n elif o==\"Exit\":\n output = self.Game.popup()\n if output == 'resume':\n self.Game.GameBoard.display()\n continue\n else:\n running = True\n break", "def start_game(self):\n self.code = code.get_random_num()\n self.Player1 = self.get_player(1)\n self.Player2 = self.get_player(2)\n attempt = self.Player1.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n check.check(num_guessed_list, right_answer_list)\n attempt = self.Player2.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n output = check.check(num_guessed_list, right_answer_list)\n play = end_game.end_game(output)\n if play == True:\n self.keep_playing()", "def run_game(self):\n self.food.randomize_position(self.grid)\n while True:\n self.clock.tick(8)\n self.handle_keys()\n if not self.snake.move(self.grid):\n if self.end_game_dialog() == 0:\n end_game()\n else:\n self.score = 0\n self.grid = deepcopy(self.base_grid)\n self.snake.initialize_snake_on_grid(self.grid)\n self.food.randomize_position(self.grid)\n\n self.draw_grid()\n\n if self.snake.get_head_position() == self.food.position:\n self.snake.length += 1\n self.score += 2\n self.food.randomize_position(self.grid)\n if self.score > self.maxScore:\n self.maxScore = self.score\n\n self.screen.blit(self.surface, (0, 0))\n score = self.font.render(\"Score {0}\".format(self.score), True, (0, 0, 0))\n self.screen.blit(score, (5, 10))\n pygame.display.update()", "def start_game(self):\n\n\t\tpass", "def startGame():\n #roundnumber\n eel.updateRoundNumber()\n # start page\n eel.updateStartPage([startPage.getTitle(), startPage.getUrl()])\n eel.updateStartPageDescription(startPage.getFirstSentence())\n # goal page\n eel.updateGoalPage([goalPage.getTitle(), goalPage.getUrl()])\n eel.updateGoalPageDescription(goalPage.getFirstSentence())\n # ui updates\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(wikiPageStackTrace[-1].getFirstSentence())\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n # loader\n time.sleep(0.5)\n eel.hideLoader()", "def start(self):\n while self.turns <= 7:\n # print()\n # print(\"This is turn {}.\".format(self.turns))\n turn = Turn(self.current_player, self.die)\n turn.run()\n self.current_player.score += turn.score\n # print(\"{}'s score is now {}\".format(self.current_player, self.current_player.score))\n self.turns += 1\n # print()\n # print(\"You have reached 7 turns. Game over.\")\n # print(\"Your total score is {}.\".format(self.current_player.score))", "def frog_update(self, player):\n self.default_update_idle(player)\n if not self.can_update(): return\n #if self.onGround: \n # TODO: make the frog try to land on the player.\n # figure out the frog's distance from the player, and calculate the necessary xvel.\n # jump with min(self.max_speed/2, target_speed)\n if self.onGround and self.ai_counter <= 0: #TODO: change the way this works\n self.faceTowards(player.current_tile())\n self.changeAnimation('idle', self.direction_id)\n self.jump(self.direction_val*self.max_speed/2, self.max_speed)\n self.ai_counter = 110\n self.wait()", "def plant_food(self):\n self.phase.set(0)\n #self.broadcast_phase()\n self.players[self.first_player].take_turn()", "def play_Feeder(hand, battlefield, graveyard, library):\n\thand['Carrion Feeder'] -= 1\n\tbattlefield['Carrion Feeder'] += 1\n\tlog(\"We play a Carrion Feeder.\")\n\tdescribe_game_state(hand, battlefield, graveyard, library)", "def start(self):\n # store a sign controlling addition or subtraction so pieces move in the right direction\n self.board = fen_to_position(self.game.fen)\n self.transposition_table = dict()", "def ProcessGame(self):\n\n #self.CreateWeapon(line_number=0, weapon_type=self.MakeSunFlowerWeapon)\n while self.InputEvents(pygame.event.get()):\n self.clock.tick(self.speed)\n \n if self.IsGameOver():\n painter.DisplayGameOver()\n else:\n self.UpdateAll()\n self.DrawAll()", "def run_game(self):\n\n while True:\n # Watch for keyboard and mouse events.\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n self.ship.moving_up = True\n if event.key == pygame.K_DOWN:\n self.ship.moving_down = True\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = True\n if event.key == pygame.K_LEFT:\n self.ship.moving_left = True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n if event.key == pygame.K_LEFT:\n self.ship.moving_left = False\n if event.key == pygame.K_UP:\n self.ship.moving_up = False\n if event.key == pygame.K_DOWN:\n self.ship.moving_down = False\n\n # Update positions of any characters that might be moving.\n self.ship.update()\n self.aliens.update()\n\n if pygame.sprite.spritecollideany(self.ship, self.aliens):\n # The player's ship has hit an alien ship.\n # Show an explosion, pause the game, and reset player's ship and alien ships.\n explosion = Explosion(self, self.ship)\n explosion.blitme()\n pygame.display.flip()\n pygame.mixer.music.play(0)\n sleep(2)\n # sys.exit()\n\n self.screen.fill((255, 255, 255))\n self.ship.blitme()\n self.aliens.draw(self.screen)\n\n # Make the most recently drawn screen visible.\n pygame.display.flip()", "def run_game(self):\n #create ufos\n self.creat_fleet_ufos()\n \n while True:\n \n self._check_events()\n self.ship.update()\n #self.ufos.update()\n self.missiles.update()\n self._update_screen()\n self.delete_missiles()", "async def fish(ctx):\n global fish_now\n r = random.random()\n if len(str(fish_now)) > 1500:\n fish_now = round(pow(fish_now, 0.5))\n if fish_now == 69: fish_now = 70\n return await ctx.send(\"Woah! Bear's fish is a little too high, so it unfortunately has to be square rooted.\")\n if r > 0.9:\n fish_now += 10\n if fish_now == 69: fish_now = 70\n return await ctx.send(f\"Wow, you gave bear a super fish! Added 10 fish! Bear now has {fish_now} fish!\")\n if r > 0.1:\n fish_now += 1\n if fish_now == 69: \n fish_now = 70\n return await ctx.send(f\"You feed bear two fish. Bear now has {fish_now} fish!\")\n else:\n return await ctx.send(f\"You feed bear one fish. Bear now has {fish_now} fish!\")\n if r > 0.02:\n fish_now += 0\n return await ctx.send(f\"You can't find any fish... and thus can't feed bear. Bear still has {fish_now} fish.\")\n else:\n fish_now = round(pow(fish_now, 0.5))\n if fish_now == 69: fish_now = 70\n return await ctx.send(f\":sob:\\n:sob:\\n:sob:\\nAww, bear's fish was accidentally square root'ed. Bear now has {fish_now} fish. \\n:sob:\\n:sob:\\n:sob:\")", "def start_at_beginning(self):\n b_pieces = [ChessPiece.B_ROOK,\n ChessPiece.B_KNIGHT,\n ChessPiece.B_BISHOP,\n ChessPiece.B_QUEEN,\n ChessPiece.B_KING,\n ChessPiece.B_BISHOP,\n ChessPiece.B_KNIGHT,\n ChessPiece.B_ROOK]\n w_pieces = [ChessPiece.W_ROOK,\n ChessPiece.W_KNIGHT,\n ChessPiece.W_BISHOP,\n ChessPiece.W_QUEEN,\n ChessPiece.W_KING,\n ChessPiece.W_BISHOP,\n ChessPiece.W_KNIGHT,\n ChessPiece.W_ROOK]\n\n for i in range(8):\n self.board.put_piece(b_pieces[i], 0, i)\n self.board.put_piece(ChessPiece.B_PAWN, 1, i)\n self.board.put_piece(w_pieces[i], 7, i)\n self.board.put_piece(ChessPiece.W_PAWN, 6, i)", "async def fish(self, ctx):\n board = self._sort_leaderboard(json.loads(await self._query(ctx, \"fish\")))\n player = \"\"\n fish = \"\"\n place = \"\"\n i = 1\n for x in board:\n player += x['player'] + \"\\n\"\n fish += str(x['fish']) + \"\\n\"\n place += str(i) +\"\\n\"\n i += 1\n embed: discord.Embed = discord.Embed(\n color = discord.Color.blue()\n )\n embed.add_field(name = \"Place\", value =place, inline=True)\n embed.add_field(name = \"Player\", value=player, inline=True)\n embed.add_field(name = \"Fish\", value=fish, inline=True)\n await ctx.send(embed = embed)", "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "def start(self):\n\n # Call the protected _turn method to start the game\n self._turn()", "def play_self_play_game(self):\n\n # start a fresh game \n self.reset_game()\n \n # play the epsilon greedy move and save the state transition in the experience lists \n while not self.game_terminal():\n self.epsilon_greedy_move()", "def startGame(self):\n\n\t\tfor name in self.players.keys():\n\t\t\tself.startPlayerGame((name, 0))\n\t\tself.setupGuiSignals()", "def step(self):\n if not self._is_game_over:\n self._move_snake()\n self._is_game_over = self.is_snake_collides()", "def start_fare(self):\n self.current_fare_distance = 0", "def start(self):\n # Call the protected _turn method to start the game\n self._end_time = time.time() + 60\n self._turn()", "def run_game(self) -> None:\n decision = 0\n if self._initial:\n self._initial = False\n while decision != 1:\n try:\n display_no_combat_init(self.hero)\n decision = get_user_input([1, 2, -1])\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n else:\n break\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")\n\n while not self.hero.is_dead:\n try:\n self._load_map()\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")", "def start(self):\n self._state = STATE_INACTIVE\n self._game = None\n self._last_key_press = False\n self._last_n_press = False\n self._last_lose_life = False\n self._mssg = (GLabel(text=START_MSSG, x=GAME_WIDTH/2, y=GAME_HEIGHT/2, font_size=24))\n self.time = None\n self._points_mssg = None\n self._falling_points = []\n self._FP_mssg = None", "def start_game(self, **kwargs):\n\n success, info = self.gms.start_game(\n player=kwargs.get('player', 'x'),\n first_turn=raw_input('Would you like to go first? y/n\\n') == 'y'\n )\n if success:\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n print(self.gms.game.get_board_state_pretty())\n self.play_human_move()\n else:\n print(info['messages'][0])", "def start_gameloop(self):\n print(\"Game Loop starting...\")\n while True:\n current_turn = self.who_goes_first()\n print('The ' + current_turn + ' will go first.')\n while self.is_active:\n if current_turn == \"player\":\n self.board.draw()\n move = get_player_move(\n self.board.positions, self.board.is_position_availible)\n self.board.make_move(move, self.player_letter)\n current_turn = \"computer\"\n else:\n move = self.npc.get_move(self.board)\n self.board.make_move(move, self.npc.letter)\n current_turn = \"player\"\n if self.board.is_winner(self.player_letter):\n self.board.draw()\n print(\"You won!\")\n self.is_active = False\n if self.board.is_winner(self.npc.letter):\n self.board.draw()\n print(\"You lost!\")\n self.is_active = False\n if self.board.is_board_full():\n self.board.draw()\n print(\"Tie\")\n self.is_active = False\n if request_play_again() is False:\n break\n self.is_active = True\n self.board = Board(request_board_size())", "def play_game():\n\tstate = Coinche(verbose=True)\n\tbeliefs = [Belief(i, state) for i in range(4)]\n\n\twhile state.get_moves():\n\t\tprint(state)\n\t\tm = ismcts(rootstate=state, itermax=2000, verbose=False, belief=beliefs[state.player_to_move])\n\t\tprint(\"Best Move: \" + str(m) + \"\\n\")\n\t\tstate.do_move(m)\n\n\tfor p in range(state.number_of_players):\n\t\tprint(\"Player \" + str(p), state.get_result(p))", "def start_game() -> None:\n rows = get_int()\n cols = get_int()\n state = game.GameState(rows, cols)\n\n line = next_line()\n if line == 'CONTENTS':\n rowList = []\n for i in range(rows):\n row = []\n line = raw_next_line()\n for index in range(cols):\n row.append(line[index])\n rowList.append(row)\n state.set_board_contents(rowList)\n\n while True:\n _display_board(state)\n line = next_line()\n if line == 'Q':\n return\n if line == '':\n if state.tick():\n _display_board(state)\n break\n else:\n _process_command(line, state)\n print('GAME OVER')", "def start(self): \n while self.lives > 0 and self.active_phrase.check_complete(self.guesses) == False:\n print(f\"Lives remaining: {self.lives}\")\n self.active_phrase.display(self.guesses)\n user_guess = self.get_guess()\n self.guesses.append(user_guess)\n self.active_phrase.check_guess(user_guess)\n if not self.active_phrase.check_guess(user_guess):\n self.lives -= 1\n self.lives_remaining()", "def beginRound(self):\n\t\tself.gameState = Table.PRE_FLOP\n\t\tfor p in self.getPlayers():\n\t\t\tif p.money <= 0:\n\t\t\t\tprint p.name\n\t\t\t\tself.playerRemoveList.append(p)\n\t\tself.removeFromPlayerList()\n\t\tif len(self.getPlayers()) == 1:\n\t\t\tself.isGameEnd = True\n\t\telse:\n\t\t\tself.roundNo += 1\n\t\t\tself.determineBlinds()\n\t\t\tself.curRaise = self.bigBlind\n\t\t\tself.collectSmallBlind()\n\t\t\tself.collectBigBlind()\n\t\t\tself.deal()\n\t\t\tself.setState()\n\t\t\tif self.noOfPlayers() == 2:\n\t\t\t\tself.turn = self.curDealerSeatNo\n\t\t\t\t_, self.roundEndSeat = self.findNthPlayerFromSeat(self.turn, 1)\n\t\t\telse:\n\t\t\t\t_, self.turn = self.findNthPlayerFromSeat(self.curDealerSeatNo, 3)\n\t\t\t\t_, self.roundEndSeat = self.findNthPlayerFromSeat(self.curDealerSeatNo, 2)", "def startface(self):\n self.fan = (self.position.x,self.position.y,self.position.z)", "def play(self):\n\n player1_turn = True\n\n while True:\n if player1_turn:\n self.player_turn(self.player1, self.player2)\n if self.lost(self.player2):\n print(\"Game Over!! You sank {}'s ships!\".format(\n self.player2.name))\n break\n player1_turn = False\n else:\n self.player_turn(self.player2, self.player1)\n if self.lost(self.player1):\n print(\"Game Over!! You sank {}'s ships!\".format(\n self.player1.name))\n break\n player1_turn = True", "def play(self):\n self.__draw_board()\n self.game_started = True\n turn = self.player_start\n while not self.game_ended:\n self.__make_move(turn)\n self.__draw_board()\n self.__check_winner()\n turn = abs(turn-1)", "def _start(self):\n\n print \"DEBUG: Starting Game\"\n nextFrameTime = 0\n deltaFrameTime = 1000 / Game.FPSLimit\n\n # Main Loop\n try:\n while True:\n self._handleEvents()\n\n currentTime = pygame.time.get_ticks()\n if ((nextFrameTime - currentTime) <= 0):\n pygame.display.flip()\n self.frameCount += 1\n self._nextFrame()\n self._drawFrame()\n nextFrameTime = currentTime + deltaFrameTime\n\n pygame.time.delay(1)\n finally:\n pygame.quit()", "def start_21game(self):\n self.is_game_start = True\n self.already_has_a_winner = False\n self.player_point = {}\n self.generate_21game_number()\n self.boardcast(self.game_msg)", "def run_game(self):\n\t\twhile True:\n\t\t\tself._check_event()\n\t\t\tself.ship.update()\n\t\t\tself._update_bullets()\n\t\t\tself._update_aliens()\n\t\t\tself._update_screen()", "def start_game(self):\n self._puzzle.get_puzzle()\n self._do_outputs()\n\n while self._keep_playing:\n print(\"\")\n print(\"+-----+-----+-----\")\n print(\"\")\n self._get_inputs()\n self._do_updates()\n self._do_outputs()\n print(\"+-----+-----+-----\")", "def create_fish():\n if config.F_LIST == []:\n fitem = scene.Fish(randint(2, common.COLS-2),\n randint(common.MIDS_R + 3, common.ROWS-2))\n config.F_LIST.append(fitem)\n elif randint(0, 10) == 1:\n fitem = scene.Fish(randint(2, common.COLS-2),\n randint(common.MIDS_R + 3, common.ROWS-2))\n config.F_LIST.append(fitem)\n\n for i in config.F_LIST:\n i.move(i.x_pos+1)", "def run_game():\n\n # Initialize game and create a screen object\n pygame.init()\n ai_settings = Settings()\n\n # Set screen width and height\n screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height), pygame.RESIZABLE) \n pygame.display.set_caption(\"CharLee MacDennis 2: Electric Bugaloo\")\n\n # Make the Play button\n play_button = Button(ai_settings, screen, \"Play\")\n\n # Make the Puase button\n pause_button = Button(ai_settings, screen, \"Paused\")\n\n # Create an instance to store game statistics\n stats = GameStats(ai_settings);\n\n # Set stats.high_score to be equal to universal high score\n stats.high_score = gf.read_high_score()\n\n # Create a scoreboard\n sb = Scoreboard(ai_settings, screen, stats)\n\n\n # Make a ship, a group of ship bullets\n ship = Ship(ai_settings, screen)\n ship_bullets = Group()\n \n # Create alien and group of alien bullets\n aliens = Group()\n alien_bullets = Group()\n\n # Create the fleet of aliens\n gf.create_fleet(ai_settings, screen, ship, aliens)\n\n # Create clock for FPS limit\n clock = pygame.time.Clock()\n\n # Start the main game loop\n while True:\n # 60 fps\n clock.tick(120)\n\n # Watch for keyboard and mouse events\n gf.check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, ship_bullets, alien_bullets)\n\n if stats.game_active:\n\n # Update ship status\n ship.update()\n\n # Update all bullets on screen\n gf.update_bullets(ai_settings, screen, stats, sb, ship, aliens, ship_bullets, alien_bullets)\n\n # Update aliens status\n gf.update_aliens(ai_settings, stats, screen, sb, ship, aliens, ship_bullets, alien_bullets)\n\n # Draw and refresh the screen\n gf.update_screen(ai_settings, screen, stats, sb, ship, aliens, ship_bullets, alien_bullets, play_button)", "def basic_begin_game(game_context) :\n game_context.world.set_game_defined()\n execute_context(game_context)", "def start():\r\n introduction()\r\n score = duck_shooting1()\r\n dogs()\r\n play_again(score)", "def initial_move(self):\n\n # Make the first move based on the game we\n # are currently playing, otherwise return\n if isinstance(self.get_game_space(), Gomoku):\n\n # play one stone in the bottom left-hand corner\n self.get_game_space().set_tile(0,6,self.get_affinity())\n\n # the agents are now in play \n self.set_play_status(True)\n self.get_opponent().set_play_status(True)\n\n else:\n print('Unknown game. Returning')\n return None", "def start_game(self):\n self.word_view.next_word()\n self.greeterboard.reset(msg=i18n.OUT_MSG_LUCK.format(self.player_name))\n self.keyboard.reset()\n self.init_game_metrics()", "def play_step(self, action):\n self.players[0].moving_left = False\n self.players[0].moving_right = False\n if action == MOVE_LEFT:\n self.players[0].moving_left = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_left = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == MOVE_RIGHT:\n self.players[0].moving_right = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_right = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == SHOOT:\n if self.dead_player or not self.players[0].is_alive:\n self.update(is_a_star=True)\n return\n if not self.players[0].weapon.is_active:\n self.players[0].shoot()\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n if self.dead_player or not self.players[0].is_alive:\n return", "def start_game(self) -> None:\n self.init_game()\n self.play()", "def start(self):\n self.welcome()\n while self.missed < 5 and not self.active_phrase.check_complete(self.guesses):\n print(f\"Number missed: {self.missed}\")\n self.active_phrase.display(self.guesses)\n user_guess = self.get_guess()\n if user_guess == \"INVALID\":\n continue\n self.guesses.append(user_guess)\n if not self.check_guess(user_guess):\n print(f\"Sorry, there are no {user_guess}'s in the phrase\")\n self.missed += 1\n else:\n count_correct = self.active_phrase.phrase.count(user_guess)\n if count_correct == 1:\n print(f\"Great job! There is {count_correct} '{user_guess}' in the phrase\")\n else:\n print(f\"Great job! There are {count_correct} {user_guess}'s in the phrase\")\n self.game_over()", "def launchGame(): \n # On rejoint la partie\n game.join()\n\n #On affecte le nom\n game.player.setName(options.name)\n\n #On créer une nouvelle fenetre\n win = createNewWin(curses)\n\n #On creer notre premiere pomme...\n win.addch(game.apple.coordx, game.apple.coordy, 'O', curses.color_pair(3))\n\n #On indique la direction par defaut du serpent, il ira par defaut a droite\n key = curses.KEY_RIGHT\n\n #On effectue une boucle infinie tant que la touche Echap (27) n'est pas\n #pressée.\n while key != 27:\n #On ajoute le score a la ligne 0, colonne 2\n #Le score est calcule en recuperant la longueur du serpent actuel\n #et en retirant 2 (sa valeur initiale)\t\n win.addstr(0,2,' Joueur : %s Score : %s ' %(game.player.name, str(game.player.score)), curses.color_pair(1))\n\n #On calcul un mouvement de ralentissement dependant de la longueur du\n #serpent\n win.timeout(180+ ( (len(game.snake.oSnake)-2) % 10- (len(game.snake.oSnake)-2) ) * 3 )\n\n #On 'hook' les touches\n getkey = win.getch()\n\n #On recupere la valeur de la touche par defaut\n key = key if getkey==-1 else getkey\n\n #Suivant la touche pressée, on modifie les positions de notre serpent\n game.snake.move(key)\n\n #On supprime les derniers elements sur lequel le Snake passe\n win.addch(game.snake.oSnake[len(game.snake.oSnake)-1][1],\n game.snake.oSnake[len(game.snake.oSnake)-1][0],' ')\n\n #On supprime un element du snake pour eviter la collision\n if win.inch(game.snake.oSnake[0][1], game.snake.oSnake[0][0]) & 255 == 32:\n game.snake.oSnake.pop()\n\n #Si on passe sur un element O\t\n elif win.inch(game.snake.oSnake[0][1],game.snake.oSnake[0][0]) & 255 == ord('O'):\n #On ajoute 1 point a notre Joueur\n game.player.addPoint()\n\n #On recalcule des nouvelles coordonnees pour la pomme\n game.apple.newApple()\n #On verifie les nouvelles coordonnees\n while game.apple.checkApple(game.snake.oSnake) != True:\n game.apple.newApple()\n\n #On l'affiche a l'ecran\n win.addch(game.apple.coordx, game.apple.coordy, 'O', curses.color_pair(3))\n\t\t\n else:\n break\n\n #On affiche une partie de notre Snake\n win.addch(game.snake.oSnake[0][1],game.snake.oSnake[0][0],'X', curses.color_pair(2))\n\n\n #Si on sort de la boucle (GameOver), alors on\n #détruit les fenetres\n destroyWin()\n\n #A la fin de la partie (game over), on affiche l'écran \n showGameOver()", "def start_game(self):\n print(\"hi there, game started!\")\n self.draw()", "def defender(self):\n step = None\n other_card = self.enemy.enemy_step(self.other_hand.get_hand())\n s = 0\n while True:\n self.other_hand.give(other_card, self.table)\n #print()\n print(\"TABLE\")\n print(self.table)\n print(\"#\" * 100)\n my_card = self.player.player_repel(self.table.get_hand()[s], self.my_hand.get_hand())\n if my_card != None:\n self.my_hand.give(my_card, self.table)\n print(self.table)\n print(\"#\" * 100)\n\n step = 0\n else:\n\n step = 1\n for i in range(len(self.table.get_hand())):\n # self.table.give(self.table.get_hand()[i], self.my_hand)\n self.my_hand.add(self.table.get_hand()[i])\n break\n print(\"Your hand\")\n print(self.my_hand)\n other_card = self.enemy.toss(self.table.get_hand(), self.other_hand.get_hand())\n if other_card == None:\n break\n s = s + 2\n if step == 0:\n #print()\n print(\"Successful defense\")\n else:\n print(\"To abandon the defense\")\n\n self.table.get_hand().clear()\n\n return step", "async def _play_heist(self, ctx):\r\n author = ctx.message.author\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n theme = await self.thief.get_guild_theme(guild)\r\n crew = await self.thief.config.guild(guild).Crew()\r\n\r\n await self.thief.check_server_settings(guild)\r\n await self.thief.check_member_settings(author)\r\n\r\n cost = config[\"Cost\"]\r\n wait_time = config[\"Wait\"]\r\n prefix = ctx.prefix\r\n\r\n # Theme Variables\r\n t_crew = theme[\"Crew\"]\r\n t_heist = theme[\"Heist\"]\r\n t_vault = theme[\"Vault\"]\r\n\r\n outcome, msg = await self.thief.requirement_check(prefix, author, cost)\r\n\r\n if outcome == \"Failed\":\r\n return await ctx.send(msg)\r\n\r\n if not config[\"Planned\"]:\r\n await bank.withdraw_credits(author, cost)\r\n config[\"Planned\"] = True\r\n await self.thief.config.guild(guild).Config.set(config)\r\n crew = await self.thief.add_crew_member(author)\r\n await ctx.send(\"A {4} is being planned by {0}\\nThe {4} \"\r\n \"will begin in {1} seconds. Type {2}heist play to join their \"\r\n \"{3}.\".format(author.name, wait_time, ctx.prefix, t_crew, t_heist))\r\n await asyncio.sleep(wait_time)\r\n \r\n crew = await self.thief.config.guild(guild).Crew()\r\n\r\n if len(crew) <= 1:\r\n await ctx.send(\"You tried to rally a {}, but no one wanted to follow you. The \"\r\n \"{} has been cancelled.\".format(t_crew, t_heist))\r\n await self.thief.reset_heist(guild)\r\n else:\r\n await self.heist_game(ctx, guild, t_heist, t_crew, t_vault)\r\n\r\n else:\r\n await bank.withdraw_credits(author, cost)\r\n crew = await self.thief.add_crew_member(author)\r\n crew_size = len(crew)\r\n await ctx.send(\"{0} has joined the {2}.\\nThe {2} now has {1} \"\r\n \"members.\".format(author.display_name, crew_size, t_crew))", "async def fight(self, ctx):\r\n attacker = ctx.message.author.name\r\n defenders = ctx.message.mentions\r\n # only continue if valid attacker and defender\r\n attacker_ship = Ship.find_ship(attacker)\r\n if not attacker_ship:\r\n await ctx.send('{0}, you do not have a ship! `$ship` to get one'.format(ctx.message.author.mention))\r\n return\r\n if not defenders:\r\n await ctx.send('Who are you fighting? `$fight @user` to fight someone')\r\n # reset cooldowns when not successful fights\r\n # self.fight.reset_cooldown()\r\n return\r\n elif len(defenders) > 1:\r\n await ctx.send('Who are you fighting? One at a time (for now)')\r\n return\r\n else:\r\n defender = defenders[0].name\r\n\r\n if attacker == defender:\r\n attacker_ship.gold -= 50\r\n if attacker_ship.gold < 0:\r\n attacker_ship.gold = 0\r\n attacker_ship.update()\r\n await ctx.send('A mutiny has started on {0}\\'s ship! The treasure hold has been ransacked! '\r\n '{1} gold was taken.'.format(defender, 50))\r\n return\r\n\r\n defender_ship = Ship.find_ship(defender)\r\n if not defender_ship:\r\n await ctx.send('{0} does not have a ship! There are no fights'\r\n ' on the high sea if there are no ships to fight'.format(defender))\r\n return\r\n\r\n # actually start fight\r\n em = discord.Embed(title='{0} has attacked {1} :rage: '.format(attacker, defender), colour=0xDDDD00)\r\n\r\n # calculate who wins based on their attack and defense plus random number\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n attacker_msg = ''\r\n defender_msg = ''\r\n\r\n while attacker_ship.hull > 0 and defender_ship.hull > 0:\r\n attack = random.randint(1, 100)\r\n attack += attacker_ship.cannons + attacker_ship.crew\r\n\r\n defense = random.randint(1, 100)\r\n defense += defender_ship.cannons + defender_ship.crew\r\n\r\n defender_ship.damage_hull(attack)\r\n attacker_ship.damage_hull(defense)\r\n\r\n attacker_msg += 'Fired a volley of **{}** cannonballs <a:cannon:554558216889958400> \\n'.format(attack)\r\n defender_msg += '<a:cannon_reversed:554722119905181735> Return fired a volley of **{}** cannonballs \\n'.format(defense)\r\n\r\n\r\n\r\n if attacker_ship.hull > defender_ship.hull: # attacker wins\r\n # base gold at 100, more gold earned for harder fights, less or easier ones\r\n gold = 100 + (defender_ship.level() - attacker_ship.level()) * 2\r\n gold = gold if gold > 0 else 0\r\n attacker_ship.gold += gold\r\n attacker_ship.win += 1\r\n defender_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n\r\n em.add_field(name='{} is the winner! :crossed_swords:'.format(attacker),\r\n value='<a:treasure_chest:554730061463289857> They earned **{}** gold for their coffers.'.format(gold), inline=False)\r\n\r\n else: # defender wins\r\n defender_ship.win += 1\r\n attacker_ship.loss += 1\r\n # reset hulls just in case\r\n attacker_ship.repair_hull()\r\n defender_ship.repair_hull()\r\n em.add_field(name=\"__{}__ HP: {}\".format(attacker, attacker_ship.hull), value=attacker_msg, inline=True)\r\n em.add_field(name=\"__{}__ HP: {}\".format(defender, defender_ship.hull), value=defender_msg, inline=True)\r\n attacker_ship.update()\r\n defender_ship.update()\r\n em.add_field(name='{} is the winner! :shield:'.format(defender),\r\n value=' <a:armor:554559559545520128> Their ship survives to fight another day.', inline=False)\r\n\r\n await ctx.send(embed=em)", "def bcp_game_start(self, **kargs):\n self.bcp_player_add(number=1)\n self.bcp_player_turn_start(player=1)\n self.events.post('game_started', **kargs)", "def runGame(self):\n for player in self.game_state.player_list:\n self.player_hand_dict[player.name] = []\n\n \"\"\" Deal the hand out starting with the player after the Dealer \"\"\"\n print \"Dealing cards...\"\n self.dealCards(self.dealer)\n self.printPlayersHands()\n self.playHand()\n dealer = (self.dealer+1)%self.num_players\n\n \"\"\" Play until the termination conditions are met \"\"\"\n if self.isGameFinished() == False:\n \"\"\" Increment dealer \"\"\"\n self.dealer = (self.dealer+1)%self.num_players\n \"\"\" Reset the game state \"\"\"\n self.game_state.newGameState()\n self.decision_list = []\n self.runGame()\n else:\n print \"\"\n print \"Game over!\"", "def __advance(self):\n # If the game is being prepared.\n if self.__current_phase == self.PHASE_PREPARE:\n # If both players are ready.\n if self.__get_current_player().pre_game_prepare() and self.__get_other_player().pre_game_prepare():\n # Start the turn.\n self.__current_phase = self.PHASE_START_TURN\n\n # Begin the game for each player.\n self.__get_current_player().start_game()\n self.__get_other_player().start_game()\n\n # If the game is being set up.\n elif self.__current_phase == self.PHASE_START_TURN:\n # Advance onto the request fire phase.\n self.__current_phase = self.PHASE_REQUEST_SHOT\n\n # Call the start turn method for both players.\n self.__get_current_player().start_turn()\n self.__get_other_player().start_turn()\n\n # If the game requires the user to shoot.\n elif self.__current_phase == self.PHASE_REQUEST_SHOT:\n # Advance onto the fire phase.\n self.__current_phase = self.PHASE_FIRE\n\n # Call the shoot method of the user.\n self.__get_current_player().request_shot()\n\n # If the game requires the other user to be hit.\n elif self.__current_phase == self.PHASE_REQUEST_HIT:\n # Advance onto the hit phase.\n self.__current_phase = self.PHASE_HIT\n\n # Call the other player's request hit method.\n self.__get_other_player().request_hit(self.__current_fire_location)\n\n # If the game shows the hit result.\n elif self.__current_phase == self.PHASE_SHOW_HIT:\n # Advance onto the await phase.\n self.__current_phase = self.PHASE_AWAIT_OPPONENT_SHOT\n\n # Call the player's show hit method.\n self.__get_current_player().show_hit(self.__current_fire_location, self.__current_fire_effect)\n\n # If the game awaits the next shot.\n elif self.__current_phase == self.PHASE_AWAIT_OPPONENT_SHOT:\n # If the opponent has lost.\n if self.__current_fire_effect == Player.SHOT_HIT_TYPE_GAME_OVER:\n # Store the winner's index.\n engine.Engine.game_manager.winner = self.current_player_index\n # Move to the game over phase.\n engine.Engine.load_level(\"GameOver\")\n else:\n # Call the player's await hit method.\n self.__get_current_player().await_opponent_shot()\n\n # If the turn is over.\n if self.current_player_index == 1:\n # Advance to the next turn.\n self.__current_phase = self.PHASE_END_TURN\n else:\n # Advance onto the next fire phase.\n self.__current_phase = self.PHASE_REQUEST_SHOT\n # Increment the user counter.\n self.current_player_index = 1\n\n elif self.__current_phase == self.PHASE_END_TURN:\n # Start a new turn.\n self.__current_phase = self.PHASE_START_TURN\n # Decrement the user counter.\n self.current_player_index = 0\n\n # Call the end turn methods.\n self.__get_current_player().end_turn()\n self.__get_other_player().end_turn()", "def start(self):\n self.running = True\n\n while self.running:\n font = pygame.font.Font(None, 30)\n text = font.render(f\"Inventory ({self.hero.inventory})\", 1, (1, 0, 0))\n control = font.render(f\"Move with:\", 1, (1, 0, 0))\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(text, (10, 490))\n self.screen.blit(start, (0, 0))\n self.screen.blit(control, (320, 490))\n self.screen.blit(control_keyboard, (428, 476))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n self.hero.move(up)\n\n elif event.key == pygame.K_DOWN:\n self.hero.move(down)\n\n elif event.key == pygame.K_RIGHT:\n self.hero.move(right)\n\n elif event.key == pygame.K_LEFT:\n self.hero.move(left)\n \n elif event.key == pygame.K_RETURN or pygame.K_KP_ENTER or pygame.K_q:\n self.running = False\n\n if self.hero.inventory == 3 and self.hero.position == (14, 14):\n self.screen.blit(won, (50, 200))\n self.hero.position = (14, 14)\n\n elif self.hero.inventory != 3 and self.hero.position == (14, 14):\n self.screen.blit(lose, (50, 200))\n self.hero.position = (14, 14)\n\n self.allsprites.update()\n self.allsprites.draw(self.screen)\n pygame.display.update()", "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "def startBattle(self):\n defender = self.map.getUnitAt(self.pos)\n attacker = self.selectedUnit\n defender.takeDamage(int(attacker.firepower * attacker.hp))\n attacker.takeDamage(int(defender.firepower * defender.hp))\n self.endBattle()", "def run_game():\n\n # Initialize pygame, settings, and screen object\n pygame.init()\n ai_settings = Settings()\n screen = pygame.display.set_mode((ai_settings.screen_width,\n ai_settings.screen_height))\n pygame.display.set_caption(\"Galaga\")\n\n # Make a Play button\n play_button = Button(screen, \"Play\")\n\n # Create an instance to store game statistics\n stats = GameStats(ai_settings)\n\n # Make a scoreboard\n scoreboard = Scoreboard(ai_settings, screen, stats)\n\n # Make a Ship, bullet group, alien group\n ship = Ship(ai_settings, screen)\n bullets = Group()\n aliens = Group()\n\n # Creating an enemy fleet of aliens\n gf.create_fleet(ai_settings, screen, ship, aliens)\n\n # Start the main loop for the game\n while True:\n # Checking for keyboard events \n gf.check_events(ai_settings, screen, stats, scoreboard,\n play_button, ship, aliens, bullets)\n\n if stats.game_active:\n # Update group objects\n ship.update()\n\n gf.update_bullets(ai_settings, screen, stats,\n scoreboard, ship, aliens, bullets)\n gf.update_aliens(ai_settings, screen, stats, scoreboard, ship, aliens, bullets)\n \n gf.update_screen(ai_settings, screen, stats,\n scoreboard, ship, aliens, bullets, play_button)", "def play_game(self):\r\n\r\n print('Welcome to a game of Concentration!!')\r\n if self.who_goes_first():\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n\r\n while True:\r\n if self.match:\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n self.check_game_end()", "def play_game():\n # Display board.\n display_board()\n # While game is still going.\n while game_still_going:\n # Handle a single turn of an arbitrary player.\n handle_turn(current_player)\n # Flip to another player.\n flip_player()\n # Check weather game is over or not.\n check_if_game_over()", "def advance(self):\r\n #if see if the UFO is almost at the edge of the screen\r\n if (self.center.x >= SCREEN_WIDTH-20 or self.center.y >= SCREEN_HEIGHT-20):\r\n #if it is change the velocity to negative to reverse direction\r\n self.velocity.dx *= -2\r\n self.velocity.dy *= -2\r\n \r\n # set x equal to x plus dx\r\n self.center.x += self.velocity.dx\r\n # set y equal to y plus dy\r\n self.center.y += self.velocity.dy\r\n #draw the flying object at its new point.\r\n self.draw()", "def start_game(self):\n player_count = 0\n confirmed_count = 0\n while confirmed_count < 15:\n new = self._next_frame()\n if new:\n percents = self._find_percents()\n if percents:\n self.logger.debug(\"%d players found, confirming %d/15\", len(percents), confirmed_count)\n if player_count == len(percents):\n confirmed_count += 1\n else:\n confirmed_count = 0\n player_count = len(percents)\n self.games.append(Game([Player(self.frame, point) for point in percents]))\n self.game_in_progress = True\n self.logger.info(\"Starting game with %d players\", player_count)", "def start_game(self):\n self._add_mines()", "def start(self):\n while not self.gameover:\n\n # Pop a new block from queue\n self.block = self.queue.pop()\n self.screen.block()\n\n while self.block.mobile:\n # Try to get commands turing a tick\n while not self.tick():\n self.screen.command()\n # Limit CPU cycles, IMPORTANT\n # When debugging or testing, DON'T sleep but go asap\n if not self.debug:\n time.sleep(0.01)\n else:\n self.block.random_move()\n\n # While paused, the block is not mobile so move commands are ignored\n while self.paused:\n self.screen.command()\n time.sleep(0.01)\n continue # So that the block isn't immediately moved down\n\n # After a tick passes, move the block down forcefully\n self.block.down()\n\n # After every collision:\n # Check if there is a full row in the grid\n self.grid.row_is_full()\n\n # Game is now over\n self.screen.print(\"Game over!\")\n self.write_highscore()\n time.sleep(3)\n\n # There is not yet an endgame screen\n self.screen.endgame()", "def play(self):\r\n\r\n gh.report_legend()\r\n\r\n for ship in self.__ships:\r\n ship.set_orientation()\r\n\r\n ship_coords = [ship.coordinates() for ship in self.__ships]\r\n ship_coords = [i for lst in ship_coords for i in lst]\r\n\r\n print(gh.board_to_string(self.__board_size, [], {}, [], ship_coords))\r\n\r\n while self.__ships:\r\n self.__play_one_round()\r\n\r\n gh.report_gameover()", "def start(self):\n with self.players['w'], self.players['b']:\n\n game = 0\n\n while game < self.num_games:\n\n # Print info.\n print \"Game %d - %s [%s] (White) VS: %s [%s] (Black)\" % (game + 1,\n self.players['w'].name,\n type(self.players['w']).__name__,\n self.players['b'].name,\n type(self.players['b']).__name__)\n # Reset board\n self.board.reset()\n\n # Signal to players that a new game is being played.\n [p.new_game() for p in self.players.itervalues()]\n\n curr_player_idx = 'w'\n\n game_pgn = chess.pgn.Game()\n game_pgn.headers[\"White\"] = self.players['w'].name\n game_pgn.headers[\"Black\"] = self.players['b'].name\n game_pgn.headers[\"Date\"] = time.strftime(\"%Y.%m.%d\")\n game_pgn.headers[\"Event\"] = \"Test\"\n game_pgn.headers[\"Round\"] = game\n game_pgn.headers[\"Site\"] = \"My PC\"\n\n _, time_taken = self.play(curr_player_idx, game_pgn=game_pgn)\n\n result = self.board.result(claim_draw=True)\n if result == '1-0':\n winner = self.players['w']\n elif result == '0-1':\n winner = self.players['b']\n else:\n winner = None\n self.data['draws'] += 1\n print \"Draw.\" \n\n if winner is not None:\n self.data['wins'][winner.name] += 1\n print \"%s wins.\" % winner.name\n\n for color, p in self.players.iteritems():\n print \"Player %s took %f seconds in total\" % (p.name, time_taken[color])\n p.time_taken = 0\n\n game_pgn = game_pgn.root()\n game_pgn.headers[\"Result\"] = result\n with open(resource_filename('guerilla', 'data/played_games/') + self.players['w'].name + '_' +\n self.players['b'].name + '_' + str(game) + '.pgn', 'w') as pgn:\n try:\n pgn.write(str(game_pgn))\n except AttributeError as e:\n print \"Error writing pgn file: %s\" % (e)\n\n self.swap_colours()\n game += 1", "def play_one_move(self):\n self.print(\"top of move\")\n # 1) grab three cups\n c1 = self.take_cup_after(self.current_cup_idx())\n c2 = self.take_cup_after(self.current_cup_idx())\n c3 = self.take_cup_after(self.current_cup_idx())\n print(f\"pick up: {c1}, {c2}, {c3}\")\n self.print(\"after pickup\")\n\n # 2) find a destination cup\n destination_idx = self.find_next_destination(self.current_cup)\n print(\n f\"destination index is {destination_idx}, cup is {self.cups[destination_idx]}\"\n )\n\n # 3) insert cups back into the circle\n self.add_cups(destination_idx, [c1, c2, c3])\n self.print(\"after adding cups..\")\n\n # 4) select the next cup\n self.select_next_cup()\n\n self.print(\"post move\")", "def start_of_game(self):\n pass", "def main():\n #Initialize game\n game = QuoridorGame()\n run = True\n clock = pygame.time.Clock()\n move_type = None\n\n #Run loop\n while run:\n clock.tick(FPS)\n\n for event in pygame.event.get():\n #Quit if user exits window\n if event.type == pygame.QUIT:\n run = False\n\n #Get board list representation and player turn\n board_list = game.get_board()\n player_turn = game.get_player_turn()\n\n #Draw board and player pawns\n draw_board(WIN, board_list)\n draw_players(WIN, game.get_p1_location(), game.get_p2_location())\n if move_type == 'p':\n highlight_moves(WIN, game)\n if move_type == 'h':\n highlight_available_h_fences(WIN, game)\n if move_type == 'v':\n highlight_available_v_fences(WIN, game)\n \n #Get user move type \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_p:\n move_type = 'p'\n if event.key == pygame.K_h:\n move_type = 'h'\n if event.key == pygame.K_v:\n move_type = 'v'\n \n #Make move based on mouse input and move type\n if event.type == pygame.MOUSEBUTTONDOWN and move_type == 'p':\n pos = pygame.mouse.get_pos()\n move_pawn(pos, game)\n if event.type == pygame.MOUSEBUTTONDOWN and move_type == 'h':\n pos = pygame.mouse.get_pos()\n place_horizontal_fence(pos, game)\n if event.type == pygame.MOUSEBUTTONDOWN and move_type == 'v':\n pos = pygame.mouse.get_pos()\n place_vertical_fence(pos, game)\n \n #Reset move type after player makes a valid move\n if player_turn != game.get_player_turn():\n move_type = None\n \n #Display message if either user has won the game. \n if game.is_winner(1):\n player_one_won(WIN)\n if game.is_winner(2):\n player_two_won(WIN)\n\n #Reset game if backspace is pressed. \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_BACKSPACE:\n game = QuoridorGame()\n\n #Update display window\n pygame.display.update()\n\n pygame.quit()", "def restart(self):\n self.set_random_pos('starting')\n self.set_random_pos('finishing')\n self.game_loop()", "def game_start():\n herolist = Hero_List(hots_db)\n heroclasses = []\n for item in herolist:\n heroclasses.append(Item(item, 'hero'))\n curgame = Game(Team('home'), Team('enemy'), Team('hero_pool', heroclasses), '')\n return curgame", "def play(self):\n\n input(\"\"\"\nWelcome to Angry Dice! Roll the two dice until you get thru the 3 Stages!\nStage 1 you need to roll 1 & 2\nStage 2 you need to roll ANGRY & 4\nStage 3 you need to roll 5 & 6\nYou can lock a die needed for your current stage\nand just roll the other one, but beware!\nIf you ever get 2 ANGRY's at once, you have to restart to Stage 1!\nAlso, you can never lock a 6! That's cheating!\n\nTo rol the dice, simply input the name of the die you want to roll.\nTheir names are a and b.\n\nPress ENTER to start!\n \"\"\")\n self.cheating = self.roll_parse(\"ab\")\n done = False\n while not done:\n self.print_hand()\n decision = input(\"Roll dice: \")\n self.cheating = self.roll_parse(decision)\n done = self.advance_check()\n self.print_hand()\n print(\"You've won! Calm down!\")", "def startGame(self, yourHand : list, yourPosition : int):\n #self.acitve = True \n self.inGame = True\n self.hand = yourHand\n self.position = yourPosition", "def run_game():\n pygame.init()\n init_settings = Settings()\n\n screen = pygame.display.set_mode(\n (init_settings.screen_width, init_settings.screen_height)\n )\n pygame.display.set_caption(\"Alien Invasion\")\n\n ship = Ship(init_settings, screen)\n\n bullets = Group()\n\n \"\"\" Start Game \"\"\"\n while True:\n \"\"\" listening mouse and key events \"\"\"\n h.check_events(init_settings, screen, ship, bullets)\n ship.update()\n h.update_bullets(bullets)\n h.update_screen(init_settings, screen, ship, bullets)", "def play(self):\n self.player = Knight()\n self._occupy_huts()\n acquired_hut_counter = 0\n\n self.show_game_mission()\n self.player.show_health(bold=True)\n\n while acquired_hut_counter < 5:\n idx = self._process_user_choice()\n self.player.acquire_hut(self.huts[idx-1])\n\n if self.player.health_meter <= 0:\n print_bold(\"YOU LOSE :( Better luck next time\")\n break\n\n if self.huts[idx-1].is_acquired:\n acquired_hut_counter += 1\n\n if acquired_hut_counter == 5:\n print_bold(\"Congratulations! YOU WIN!!!\")", "def miner_begin_jumping(self, player):\n\n if self.onGround:\n self.faceTowards(player)\n self.ai_state = AI_JUMPING\n self.ai_counter = 50\n self.jump(self.direction_val*self.max_speed/2, self.max_speed)\n self.changeAnimation('idle', self.direction_id) #TODO: use a jumping animation rather than an idle animation\n self.weapon.update()\n self.weapon.animation.synch_animation_frame(self.animation)\n return\n \n #TODO: other case?", "def _ship_hit(self):\n if self.stats.ships_left > 0:\n # Decrement ships_left\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n # Remove remianing aliens & bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # Create new fleet and ship at start location\n self._create_fleet()\n self.ship.center_ship()\n\n # pause\n sleep(0.5)\n else:\n self.stats.game_active = False \n pygame.mouse.set_visible(True)", "def play_game(self, player_1, player_2, show_flag=True):\n players = [player_1, player_2]\n player = players[self.ready_player - 1]\n\n if show_flag:\n print('initial state:')\n self.show_board()\n\n while True:\n if self.check_end_game():\n return self.end_game(show_flag)\n\n if show_flag:\n print(f\"Player {player.index}:\")\n move = player.get_move(self)\n moves = [move]\n while self.sowing(player, move):\n\n if self.check_end_game():\n return self.end_game(show_flag)\n move = player.get_move(self)\n if show_flag: print(f'\\t {player.algo} choice: {move}')\n moves.append(move)\n if show_flag: self.show_board()\n player = players[player.opp_index - 1]", "def step(self):\n prey_neighbors = [x for x in self.model.space.get_neighbors(self.pos, self.vision+ 20, False) if isinstance(x,boid.Boid)]\n nearby_obstacles = [x for x in self.model.space.get_neighbors(self.pos, self.vision + 15, False) if isinstance(x, Obstacle)]\n self.velocity += (self.avoid_collision(nearby_obstacles) * self.collision_separation +\n self.attack(prey_neighbors)) / 2\n self.velocity /= np.linalg.norm(self.velocity)\n new_pos = self.pos + self.velocity * self.speed\n self.model.space.move_agent(self, new_pos)\n self.eat(prey_neighbors)\n\n\n # update for drawing\n self.update()", "def main():\n game = TinkerGame()\n game.setup()\n while game.calculate_points() > 0 and not game.game_over:\n game.play()\n game.end()", "def fainted(self):\n self.pkmn.faint()\n messages = self.effect.attemptAfterTurn(self.pkmn)\n assert messages == [], \"Should receive no messages since nothing was performed\"", "def start_animation(self):\n\t\ttime.sleep(1)\n\t\tself.fishbowl.animate_balls()", "def update(self):\n self.player.eaten_cheese = False\n # Checa se o jogador ou agente chegaram no objetivo\n if self.grid[self.player.x][self.player.y] == 2:\n self.player.score += self.player.reward_amount\n self.done = True\n\n # Checa se o jogador ou agente comeram o queijo\n elif self.grid[self.player.x][self.player.y] == 4:\n self.player.score += 0.2\n self.player.eaten_cheese = True\n self.clear_position(self.player.x, self.player.y)\n\n # Popule a atual posicao do jogador com 1 e a do agente com 10\n if self.player.name == \"Player\":\n self.grid[self.player.x][self.player.y] = 1\n elif self.player.name == \"Agent\":\n self.grid[self.player.x][self.player.y] = 10", "def start_game(self) -> None:\n if self.started and not self.finished:\n self.finish_game()\n \n self.started = True\n self.finished = False\n\n self.game_count += 1\n self.games_list[self.game_index] = {\n \"total_kills\": 0,\n \"players\": [],\n \"kills\": {}\n }\n\n return", "def main():\n number_of_players = get_number_of_players()\n number_of_decks = get_number_of_decks()\n game_data = setup_game(number_of_players)\n\n player_list = game_data[0]\n play_shoe = game_data[2]\n play_dealer = game_data[1]\n play_again = True\n\n while play_again:\n replay = play_game(play_shoe, player_list, play_dealer, number_of_decks)\n if replay:\n play_shoe = replay[1]\n else:\n play_again = False\n \n print(\"Thanks for playing\")", "async def run_game(self):\n await self.run_betting()\n self.force_bet()\n await self.print_players_with_bet()\n time.sleep(self.MESSAGE_GAP)\n cards_msg = await self.send_message(self.channel, \"Retrieving a new deck, shuffling, and dealing cards! Please hold!\")\n self.deal_cards()\n time.sleep(self.MESSAGE_GAP)\n await self.edit_message(cards_msg, cards_msg.content + \"\\n\\n\" + self.str_players_with_hand())\n time.sleep(self.MESSAGE_GAP)\n while self.still_playing_game():\n await self.run_round()\n self.ready_new_round_players()\n await self.send_message(self.channel, \"There are no more players eligible to play, so the game is over!\"\n \" Here evaluation to see who won!\\n\" + self.evaluate_game())\n time.sleep(self.MESSAGE_GAP)\n await self.send_message(self.channel, \"Resetting players for next game...\")\n time.sleep(self.MESSAGE_GAP)\n self.reset_players()", "def start_game(self):\n controller = self.controller\n controller.on_init()\n\n self.game_running = True\n\n while self.game_running and controller.is_character_alive():\n controller.keyboard_game_control(self)\n\n self.game_running = False\n\n self.end_game()", "def play_game():\n\n _initial_deal()\n\n main_window.mainloop()", "def callback_game_loop(self) -> None:\n self._goal_generate()\n self._update()\n self.reset()\n\n while self._player != self._goal:\n self._update()\n action = self._action_callback(\n self._player.np,\n self._goal.np,\n *self._action_callback_args,\n )\n if action == \"QUIT\":\n break\n self._player_erase()\n self.FUNCMAP[action]()\n self._update()\n\n if self._display:\n time.sleep(0.1)\n try:\n if chr(cv2.waitKey(5)) in self.KEYMAP[\"QUIT\"]:\n break\n except ValueError:\n pass\n\n if self._display:\n print(f\"Steps taken: {self._routes[self._current_route_key]}\")\n\n if self._display:\n cv2.waitKey(0)", "def start(self, board, player1, player2):\n # Check to see who goes first\n if random.randint(0, 1) == 0:\n self.printGoFirst(player1)\n\n while True:\n # Make a move and check if that move wins/draws the game\n board.makeMove(player1.getNextMove(board, self.letter1), self.letter1)\n board.printBoard()\n if board.isWinner(self.letter1):\n self.printWinner(player1)\n break\n if board.isFull():\n print \"The game is drawn\"\n break\n\n # Make a move and check if that move wins/draws the game\n board.makeMove(player2.getNextMove(board, self.letter2), self.letter2)\n board.printBoard()\n if board.isWinner(self.letter2):\n self.printWinner(player2)\n break\n if board.isFull():\n print \"The game is drawn\"\n break\n else:\n self.printGoFirst(player2)\n\n while True:\n # Make a move and check if that move wins/draws the game\n board.makeMove(player2.getNextMove(board, self.letter1), self.letter1)\n board.printBoard()\n if board.isWinner(self.letter1):\n self.printWinner(player2)\n break\n if board.isFull():\n print \"The game is drawn\"\n break\n\n # Make a move and check if that move wins/draws the game\n board.makeMove(player1.getNextMove(board, self.letter2), self.letter2)\n board.printBoard()\n if board.isWinner(self.letter2):\n self.printWinner(player1)\n break\n if board.isFull():\n print \"The game is drawn\"\n break", "def start(self):\n for game_object in self.game_objects:\n game_object.start()\n # end for\n self.time = time.time()\n self.paused = False\n self.running = True\n print 'GAME STARTED'", "def play(self):\n if self.rounds == 0:\n # When the game has not begun yet, the game must\n # give the players their pieces and a corner to start.\n max_x = ((self.board).size[1] - 1)\n max_y = ((self.board).size[0] - 1)\n starts = [(0, 0), (max_y, max_x), (0, max_x), (max_y, 0)]\n\n for i in xrange(len(self.players)):\n (self.players[i]).add_pieces(self.all_pieces)\n (self.players[i]).start_corner(starts[i])\n\n # if there is no winner, print out the current player's name and\n # let current player perform a move\n if self.winner() == \"None\":\n current = self.players[0]\n print\n \"Current player: \" + current.name\n proposal = current.do_move(self)\n if proposal == None:\n # move on to next player, increment rounds\n first = (self.players).pop(0)\n self.players = self.players + [first]\n self.rounds += 1\n\n\n # ensure that the proposed move is valid\n elif self.valid_move(current, proposal.points):\n # update the board with the move\n (self.board).update(current, proposal.points)\n # let the player update itself accordingly\n current.update_player(proposal, self.board)\n # remove the piece that was played from the player\n current.remove_piece(proposal)\n # place the player at the back of the queue\n first = (self.players).pop(0)\n self.players = self.players + [first]\n # increment the number of rounds just played\n self.rounds += 1\n\n # interrupts the game if an invalid move is proposed\n else:\n raise Exception(\"Invalid move by \" + current.name + \".\")\n\n else:\n print\n \"Game over! And the winner is: \" + self.winner()", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def _ship_hit(self):\n # takes one life away if there's lives left\n # also removes that life from the scoreboard\n if self.stats.ships_left > 0:\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n # removes alien fleet + leftover bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # creates a new fleet and centers players ship\n self._create_fleet()\n self.ship.center_ship()\n\n # stops game for a short while\n sleep(1.5)\n\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)" ]
[ "0.63546705", "0.59987706", "0.5944259", "0.5863334", "0.58432084", "0.5839499", "0.5821344", "0.5808416", "0.57779545", "0.5738902", "0.57381016", "0.5735922", "0.5734481", "0.57334346", "0.5728595", "0.5717012", "0.5702629", "0.5669737", "0.5666819", "0.5664399", "0.5663105", "0.56608796", "0.56550866", "0.564675", "0.561698", "0.5604627", "0.5602721", "0.5594777", "0.55608284", "0.5558924", "0.55585545", "0.5556427", "0.5554684", "0.55447346", "0.5541468", "0.55195624", "0.55101794", "0.5505408", "0.54960585", "0.5478292", "0.5477316", "0.547324", "0.5469767", "0.5464181", "0.54507285", "0.5443976", "0.5435237", "0.54325026", "0.5427994", "0.54227436", "0.54216886", "0.54158586", "0.5414071", "0.5413617", "0.54094416", "0.53964573", "0.5392658", "0.5388881", "0.5386394", "0.5385583", "0.5379346", "0.5369895", "0.5367097", "0.53610027", "0.535373", "0.5352846", "0.5346621", "0.5344328", "0.5340298", "0.5337186", "0.53356624", "0.533341", "0.53326833", "0.53322613", "0.53262335", "0.5323858", "0.5322382", "0.5315764", "0.5310541", "0.5307968", "0.5300479", "0.5292957", "0.5290773", "0.52874285", "0.5285448", "0.52823806", "0.5273017", "0.52606636", "0.52598727", "0.5256868", "0.52564716", "0.52563184", "0.5255748", "0.52542025", "0.5251117", "0.52510965", "0.52473193", "0.5247216", "0.5242336", "0.52408355" ]
0.5274436
86
Get the player scores.
def get_scores(self): return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_scores(self):\n return self.score", "def get_score(self, player):\n if player in self.player_scores:\n return self.player_scores[player]\n else:\n raise Exception(\"Player not in score list\")", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def scores_(self):\n return self.predictor.scores_", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def get_scores(self) -> tuple:\n return (self.get_score(), self.p2_score)", "def get_scores_in_order_of_players(self):\n \n players = self.referee.get_current_state().get_players()\n\n player_scores = []\n for player_color in self.player_color_order:\n for player in players:\n if player_color == player.get_color():\n player_scores.append(player.get_score())\n break\n\n return player_scores", "def getScore(self):\n\t\tself.scoreList = [submissionsss.score for submissionsss in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.scoreList", "def find_all_by_player(self, player):\n cursor = self._connection.cursor()\n command = 'SELECT * FROM scores WHERE player=? ORDER BY level'\n cursor.execute(command, [player])\n return cursor.fetchall()", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def get_scores(self, tournament: Tournament):\n self.model.eval()\n # collate_fn = lambda x: collate_teams(x, tournament.max_members)\n dl_rank = DataLoader(tournament.ranking, num_workers=self.jobs, batch_size=self.bs, shuffle=False)\n iterator = tqdm(dl_rank, position=0, desc=f'{tournament.tournament_id} ranking', disable=True)\n scores = []\n for i, team in enumerate(iterator):\n score = self.model.get_team_score(team.to(self.device))\n scores.append(score.cpu().numpy())\n\n scores = np.concatenate(scores)\n return scores.flatten()", "def get_score(self):\n\n sql = \"SELECT score FROM Users WHERE username = '\" + self.username + \"'\"\n self.cursor.execute(sql)\n return self.cursor.fetchall()[0][0]", "def scores(self) -> List[float]:\n if not self.prediction:\n return []\n return [sentence.score for sentence in self.prediction.sentences]", "def get_score(self, player):\n\n df = pd.read_csv('RPSscores.csv')\n if not str(player) in df['Name'].to_dict().values():\n df.loc[len(df.index)] = [str(player),\n 0, 0, 0]\n player_index = int(df.loc[df['Name'] == str(player)].index[0])\n result = 'wins: ' + str(df.iloc[player_index, 1]) + '\\n' + \\\n 'draws: ' + str(df.iloc[player_index, 2]) + '\\n' + \\\n 'losses: ' + str(df.iloc[player_index, 3])\n return result", "def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores", "def returnPlayerStats(self):\n\t\tplayerStats = [self.name, \n\t\t\t\t\t self.agility, \n\t\t\t\t\t self.personality, \n\t\t\t\t\t self.sanity, \n\t\t\t\t\t self.strength, \n\t\t\t\t\t self.progress]\n\t\treturn playerStats", "def getScore(self):\r\n return self._score", "def all_scores(self):\r\n if not self.child_history:\r\n return None\r\n return [self.child_history[i].get('score') for i in xrange(0, len(self.child_history))]", "def get_score(self):\n return tuple(self.score)", "def getScore(data):\n return score", "def scores_for(self, board):\r\n scores = [1]*board.width\r\n for i in range(board.width):\r\n if not board.can_add_to(i):\r\n scores[i] = -1\r\n elif board.is_win_for(self.checker):\r\n scores[i] = 100\r\n elif board.is_win_for(self.opponent_checker()):\r\n scores[i] = 0\r\n elif self.lookahead == 0:\r\n scores[i] = 50\r\n else:\r\n board.add_checker(self.checker, i)\r\n other = AIPlayer(self.opponent_checker(), self.tiebreak, self.lookahead-1)\r\n other_scores = other.scores_for(board)\r\n if max(other_scores) == 100:\r\n scores[i] = 0\r\n elif max(other_scores) == 50:\r\n scores[i] = 50\r\n elif max(other_scores) == 0:\r\n scores[i] = 100\r\n board.remove_checker(i)\r\n return scores", "def find_all(self):\n cursor = self._connection.cursor()\n cursor.execute('SELECT * FROM scores ORDER BY level')\n all_scores = cursor.fetchall()\n return all_scores", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def score_game(self):\r\n players = self.player_control.get_players()\r\n ###game_control = self.game_control\r\n ###if game_control is not None:\r\n ### game_control.set_vals() # Update any changed game control settings\r\n if len(players) == 0:\r\n return # No players\r\n n_top_score = 0\r\n top_score = players[0].get_score()\r\n for player in players:\r\n if player.get_score() > top_score:\r\n top_score = player.get_score()\r\n for player in players:\r\n player_score = player.get_score()\r\n if player_score == top_score:\r\n n_top_score += 1\r\n \r\n for player in players:\r\n player_score = player.get_score()\r\n player_played = player.get_played()\r\n player_ties = player.get_ties()\r\n player_wins = player.get_wins()\r\n new_played = player_played+1\r\n player.set_played(new_played)\r\n player.set_prop(\"played\")\r\n if player_score == top_score:\r\n if n_top_score > 1:\r\n new_ties = player_ties + 1\r\n player.set_ties(new_ties)\r\n player.set_prop(\"ties\")\r\n else:\r\n new_wins = player_wins + 1\r\n player.set_wins(new_wins)\r\n player.set_prop(\"wins\")\r\n self.update_score_window()", "def getScore(self):\n return self._score", "def get_score(self):\r\n score = self.latest_score()\r\n return {'score': score if score is not None else 0,\r\n 'total': self._max_score}", "def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score", "def scores_for(self, board):\r\n scores = [50] * board.width\r\n\r\n for col in range(board.width):\r\n if not board.can_add_to(col):\r\n scores[col] = -1\r\n elif board.is_win_for(self.checker):\r\n scores[col] = 100\r\n elif board.is_win_for(self.opponent_checker()):\r\n scores[col] = 0\r\n elif self.lookahead == 0:\r\n scores[col] = 50\r\n else: \r\n board.add_checker(self.checker, col)\r\n opponent = AIPlayer(self.opponent_checker(), self.tiebreak, self.lookahead - 1)\r\n opp_scores = opponent.scores_for(board)\r\n if max(opp_scores) == 100:\r\n scores[col] = 0\r\n elif max(opp_scores) == 0:\r\n scores[col] = 100\r\n else:\r\n scores[col] = 50\r\n board.remove_checker(col)\r\n\r\n return scores", "def get_current_score(self):\n\n # Return the player's current turn score\n return self._current_score", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def all_scores(self):\r\n if not self.child_history:\r\n return None\r\n return [self.score_for_attempt(index) for index in xrange(0, len(self.child_history))]", "def _get_current_teams_score(self):\n for game in self._get_live_games():\n teams_playing = [x['abbreviation'] for index, x in game['teams'].items()]\n if self.team in teams_playing:\n # Our team is playing in this game, get the score \n return int(game['scores'][self.team])", "def print_scores(self):\n print(\"scores: \", self.get_scores())", "def get_scores(self):\n return pd.DataFrame(self._scores)", "def childScores(self):\n return [x.score for x in self.children]", "def find_scores(self):\n p1_score = self.p1_store()\n p2_score = self.p2_store()\n return p1_score, p2_score", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def get_total_score(self):\n\n # Return the player's total score\n return self._total_score", "def get_current_score(self, game_id: int, player_id: int) -> int:\n with self.eng.session_mgr() as session:\n return session.query(\n func.sum(TablePlayerRound.score)\n ).filter(and_(\n TablePlayerRound.player_key == player_id,\n TablePlayerRound.game_key == game_id\n )).scalar()", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\r\n return self.lcp.get_score()", "def getScores():\r\n results = \"\"\r\n with sqlite3.connect(database_file) as conn:\r\n cursor = conn.cursor()\r\n team_scores = cursor.execute(\"\"\" SELECT * FROM scores;\"\"\")\r\n\r\n for row in team_scores.fetchall():\r\n teamname, auto, rc, spirit, video = row\r\n results += result_string.format(teamname, auto, rc, spirit, video) + \"\\n\"\r\n return results", "def get_score(self):\n return self.score", "def get_score(self):\n return self.__score", "def play_game(self):\n # print(\"Playing a random game!\")\n for round_num in range(1, self.rounds_to_play + 1):\n # print(\"Play Round No. {}\".format(round_num))\n round = Round(round_num, self.players)\n score = round.play_round()\n # print(len(round.played_cards))\n for i in range(self.num_players):\n self.scores[i] += score[i]\n # print(\"Scores: {}\".format(self.scores))\n # print(\"Final scores: {}\".format(self.scores))\n for player in self.players:\n player.reset_score()\n return self.scores", "def get_players(self):\r\n return self.players.values()", "def getScore(self,board):\n return board.getScore()[self.tile]", "def get_list_team_scores(self):\n scores = defaultdict(lambda: {\n \"scored_xg\": [],\n \"conceded_xg\": [],\n \"home_adv\": 0,\n \"expected_points\": 0\n })\n\n for g in self.games:\n scores[g.HomeTeam][\"scored_xg\"].append(g.FTHG)\n scores[g.HomeTeam][\"conceded_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"scored_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"conceded_xg\"].append(g.FTHG)\n\n for team in scores.keys():\n scores[team][\"expected_points\"] = (self.get_table(metric='points')[team] /\n len(scores[team][\"scored_xg\"]))\n\n return scores", "def get_score(self):\n\n return self._score", "def scoreGame(self):\n # create valueLs[card1,card2,...], pass it to sumHandReturnPoints(valueLs) or twoCardReturnPoints(valueLs)\n scoreLs = []\n ### Score of row\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n points = self.sumHandReturnPoints(valueLs)\n scoreLs.append(points)\n\n ### Score of 4-card column\n for offset in range(0,3): # 0,1,2\n tmpLs = []\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n if len(valueLs) == 5:\n iterStart = 1\n else:\n iterStart = 0\n card = valueLs[iterStart+offset]\n tmpLs.append(card)\n points = self.sumHandReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Score of 2-card column\n #(1) 1st column\n valueLs1 = self.table['row1']\n valueLs2 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs1[0].get_rank())\n tmpLs.append(valueLs2[0].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points)\n #(2) 5th column\n valueLs3 = self.table['row1']\n valueLs4 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs3[-1].get_rank())\n tmpLs.append(valueLs4[-1].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Add up scoreLs\n sumPoints = 0\n for points in scoreLs:\n sumPoints += points\n return sumPoints", "def score(self):\n score_message = {\n 'Onewins': \"\\nThe Winner is Player 1!\",\n 'Twowins': \"\\nThe Winner is Player 2!\",\n 'Tie': \"\\nTie! Looks like everyone's a winner!\",\n 'Nowinner': \"\\nYikes, neither of you win!\"\n }\n if self.pone_score > self.ptwo_score:\n print(score_message['Onewins'])\n elif self.pone_score < self.ptwo_score:\n print(score_message['Twowins'])\n elif self.pone_score == 0 and self.ptwo_score == 0:\n print(score_message['Nowinner'])\n else:\n print(score_message['Tie'])", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def extract_score(self, json):\n\t\ttry:\n\t\t\treturn int(json['player_score'])\n\t\texcept KeyError:\n\t\t\treturn 0", "def get_highscores(self):\n return self.filter(active=False, finished=True).order_by(\"-score\")", "def get_highscores(self):\n return self.database.get_high_scores(self.difficulty)", "def playerStandings():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"select player_id, player_name, wins, (wins + losses) as total_played from normalized_wins_and_losses order by wins desc, total_played desc;\")\n player_standings = db_cursor.fetchall()\n db_conn.commit()\n db_conn.close()\n return player_standings", "def _update_score(self) -> None:\n\n # setting new score by iterating over players\n self.score_play[self.n_play_turns, ] = [\n self._score_table[(\n self.contract.level,\n self.contract.suit,\n self.tricks[i],\n self.contract.player_vulnerability[i],\n int(self.contract.double + self.contract.redouble)\n )]\n for i in range(NUM_PLAYERS)\n ]", "def drawsheet_get_score(player, scores):\n def distance(score, player):\n dx = float(score[0] - player[0]) / 5\n dy = float(score[1] - player[1])\n if dy < 0:\n dy *= 3\n\n return math.sqrt(dx * dx + dy * dy)\n\n if len(scores) == 0:\n return None\n\n scores.sort(key=lambda s: distance(s[1], player[1]))\n #print([(k, distance(k[1], player[1])) for k in scores[:3]])\n score = scores[0]\n del scores[0]\n\n return score[0]", "def readScore(self):\n return self.zmwMetric(\"ReadScore\")", "def set_scores(apps, schema_editor):\n\n Game = apps.get_model(\"stats\", \"Game\")\n for game in Game.objects.all():\n score_allies = 0\n score_opponents = 0\n player_stats = game.playerstat_set.all()\n for stat in player_stats:\n if stat.is_opponent:\n score_opponents += stat.scored\n else:\n score_allies += stat.scored\n\n game.score_allies = score_allies\n game.score_opponents = score_opponents\n game.save()", "def getScores(self,query):\n pass", "def getpoints(self, player):\n return self.Points[player]", "def get_local_score(self):\n for candidate in self.candidate_list:\n self.score += candidate.get_score()", "def fetch_players_stats():\n players_scraper = PlayerStatsScraper(API_URL, API_HEADERS)\n result = players_scraper.save_objects()\n return result", "def perform_get_scores(responder, options):\n match = options['<match-id>']\n all_scores = scores.get_match_scores(match)\n\n if options.get(yaml_opt, False):\n responder(yaml.dump({'scores': all_scores}))\n else:\n if all_scores is None:\n responder('No scores available for match {0}'.format(match))\n else:\n for tla, score in all_scores.iteritems():\n responder('Team {0} scored {1} in match {2}'.format(tla, score, match))", "def getScorecard(self, **kwargs):\n self.dctScorecard[\"players\"] = [team.get_scorecard() for team in self.team_list]\n return self.dctScorecard", "def get_scores(self, params):\n ep = ENDPOINTS.GET_SCORES\n self._check_parameters(ep, params)\n url = self.base_url.format(ep.EXTENSION)\n url = self._extend_url(url, params)\n return self._process_url(url)", "def players(self):\n return self._get(\"players\")", "def get_current_scores_buffer(self):\n\n player_scores = self.get_scores_in_order_of_players()\n score_string = \"Scores:\\n\"\n\n for color, score in zip(self.player_color_order, player_scores):\n player_score = \"{}: {}\".format(color, score)\n score_string += player_score\n score_string += \"\\t\"\n\n return score_string", "def fetch_points(self):\n soup = self.get_soup(\"highscore\")\n\n # find correct line in rankings table\n line = soup.find(\"tr\", {\"class\": \"myrank\"})\n\n rank = int(line.find(\"td\", {\"class\": \"position\"}).contents[0].strip())\n points = int(line.find(\"td\", {\"class\": \"score\"}).contents[0].strip().replace(\".\", \"\"))\n\n return OrderedDict([(\"ranking\", rank), (\"points\", points)])", "def input_player_scores_checked(self):\r\n score_white = self.input_player_score_white()\r\n score_black = self.input_player_score_black()\r\n try:\r\n if score_white + score_black == 1:\r\n return score_white, score_black\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n print(\"La somme des scores des deux joueurs doit être égale à 1!\")\r\n return self.input_player_scores_checked()", "def get_initial_scores(self) -> Dict[str, float]:\n temp_game = Game(self.game.configuration, self.game.initialization)\n scores_dict = {\n self.game.configuration.agent_pbk_to_name[agent_pbk]: score\n for agent_pbk, score in temp_game.get_scores().items()\n }\n return scores_dict", "def get_stats(self):\n\n win_points = 0\n lose_points = 0\n\n for username in self.bets:\n bet_for_win, points = self.bets[username]\n if bet_for_win:\n win_points += points\n else:\n lose_points += points\n\n return win_points, lose_points", "def getScore(self, i):\n return self.scores[i - 1]", "def get_score(self):\n return sum([Letters.get_value(tile.letter) for tile in self.tiles])", "def test_scores(self):\n pig = game.pig.Pig('PlayerA', 'PlayerB', 'PlayerC')\n self.assertEqual(\n pig.get_score(),\n {\n 'PlayerA': 0,\n 'PlayerB': 0,\n 'PlayerC': 0\n }\n )", "def get_score(self):\n return float(self._score)", "def get_players_by_rank(self):\n return sorted(self.participants, key=lambda p: p.tournament_score, reverse=True)", "def vars(cls):\n return {\"score\": float}", "def find_winner_scores(self):\n p1_score, p2_score = self.find_scores()\n if p1_score > p2_score:\n winner = 1\n elif p1_score < p2_score:\n winner = 2\n else:\n winner = 0\n return winner, p1_score, p2_score", "def update_scores(self):\r\n totals = [0, 0, 0, 0]\r\n for player in range(0, 4):\r\n for round_num in range(0, 17):\r\n try:\r\n bid = int(self.spin_bids[player][round_num].get())\r\n tricks = int(self.spin_tricks[player][round_num].get())\r\n except ValueError:\r\n bid = -1\r\n tricks = -1\r\n score = calc_score(min(round_num+1, 13), bid, tricks)\r\n self.lbl_scores[player][round_num].configure(text=str(score))\r\n totals[player] += score\r\n for player in range(0, 4):\r\n self.lbl_totals[player].configure(text=str(totals[player]))\r\n return totals[0] + totals[1] + totals[2] + totals[3]", "def players(self):\n return self.currents.player", "def get_score(self):\n rewards, resets = self.runner.get_rewards_resets()\n self.runner.clear_rewards_resets()\n assert rewards.ndim == 1 and resets.ndim == 1, (rewards.ndim, resets.ndim)\n assert rewards.shape[0] == resets.shape[0], (rewards.shape, resets.shape)\n scores = [0]\n for t in reversed(range(rewards.shape[0])):\n if resets[t]:\n scores.append(0)\n scores[-1] += rewards[t]\n return np.mean(scores)", "def get_score(self):\r\n if self.is_complete():\r\n score = 1\r\n elif self.is_half_complete():\r\n score = 0.5\r\n else:\r\n score = 0\r\n return {'score': score,\r\n 'total': self.max_score()}", "def playerStandings(matchid):\n DB = dbc()\n c = DB.cursor()\n c.execute('SELECT matches.playerid, name, win, total_matches, \\\n score, played, bye \\\n FROM matches JOIN players \\\n ON matches.playerid = players.playerid \\\n WHERE matches.matchid = %s \\\n ORDER BY matches.score DESC', (matchid,))\n player_stats = c.fetchall()\n DB.close()\n return player_stats", "def calculate_score(player_cards):\n score = sum(player_cards)\n return score", "def get_player_best_score(self, player):\n return self.get_highscores().filter(player=player).first()", "def retrieve_all_scores(database_connection: mysql.connector.connect\n ) -> List[int]:\n cursor = database_connection.cursor()\n query = (\"SELECT pm.panelistscore FROM ww_showpnlmap pm \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"WHERE s.bestof = 0 AND s.repeatshowid IS NULL \"\n \"AND pm.panelistscore IS NOT NULL \"\n \"ORDER BY pm.panelistscore ASC;\")\n cursor.execute(query)\n result = cursor.fetchall()\n\n if not result:\n return None\n\n scores = []\n for row in result:\n scores.append(row[0])\n\n return scores", "def get_score_history(self):\n return self._score_history", "def say_scores(score0, score1):\n print(\"Player 0 now has\", score0, \"and Player 1 now has\", score1)\n return say_scores", "def get_r_score(self):\n return self.r_score", "def get_scores(self):\n return SklearnModel.evaluate_no_ground_truth_classifier_metrics(self.X_test, self.predictions)", "def check_score(self) -> None:\n self.player_1, self.player_2 = 0, 0\n for cell in self.cells:\n if cell.player == 1:\n self.player_1 += 1\n elif cell.player == 2:\n self.player_2 += 1", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players", "def find_all_by_level(self, level):\n cursor = self._connection.cursor()\n command = 'SELECT * FROM scores WHERE level=? ORDER BY score'\n cursor.execute(command, [level])\n all_scores_by_level = cursor.fetchall()\n return all_scores_by_level" ]
[ "0.81142956", "0.7553951", "0.738606", "0.7267321", "0.719919", "0.71760154", "0.7120312", "0.704196", "0.6985496", "0.6970983", "0.6936181", "0.6799197", "0.6795344", "0.6784494", "0.6777752", "0.67771673", "0.6773122", "0.6722797", "0.6709587", "0.66867256", "0.66849506", "0.6681714", "0.6673023", "0.6673023", "0.6673023", "0.6670977", "0.66663724", "0.6645387", "0.6644018", "0.6637785", "0.6605198", "0.659989", "0.65964013", "0.6595021", "0.6574269", "0.65563184", "0.65552896", "0.6541076", "0.65407217", "0.65217364", "0.6506385", "0.6504734", "0.6504734", "0.6504734", "0.64738643", "0.64676356", "0.64439815", "0.6420356", "0.6412884", "0.6411397", "0.64001137", "0.6397124", "0.639453", "0.6392098", "0.63855124", "0.6376717", "0.63668597", "0.6338088", "0.6320355", "0.6310468", "0.63053435", "0.63037586", "0.63013804", "0.625223", "0.6250168", "0.62474567", "0.62393916", "0.623457", "0.6210558", "0.62059575", "0.6190886", "0.6190066", "0.6168178", "0.6156113", "0.6154375", "0.61522585", "0.61460584", "0.6120968", "0.6120522", "0.61121124", "0.60931575", "0.60872465", "0.6073818", "0.6060402", "0.6058325", "0.60535806", "0.5984663", "0.5978629", "0.59684795", "0.59594476", "0.595653", "0.59550697", "0.5952092", "0.59510297", "0.59382284", "0.5936364", "0.5932597", "0.5931696", "0.59224", "0.5917628" ]
0.8255623
0
Get the victor(s) if the game is over. Players in the violators list are not eligible for victory.
def get_victors(self): if self.is_game_over(): scores = [p.get_score() for p in self.state.get_players()] if len(scores) == 0: return [] max_score = max(scores) victors = [] for p in self.state.get_players(): if p.get_color() not in self.violators and p.get_score() == max_score: victors.append(self.players[p.get_color()]) return victors else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CheckVictoryCondition(self):\n opponentVictory = True\n for char in self.screen.characters:\n if char.team == 1 and char.leader and not char.dead:\n opponentVictory = False\n if opponentVictory:\n self.screen.refresh()\n self.music.stop()\n sys.exit()\n\n for victory in self.victories:\n playerVictory = True\n nextLevel = victory['next_level']\n if victory['condition'] == 'destroy':\n for char in self.screen.characters:\n if not char.dead and char.team == 2:\n playerVictory = False\n elif victory['condition'] == 'kill leaders':\n for char in self.screen.characters:\n if not char.dead and char.team == 2 and char.leader:\n playerVictory = False\n if playerVictory:\n print('You win')\n if self.music:\n self.music.stop()\n self.screen.objects = []\n self.screen.tileEffects = []\n self = Level(self.screen, nextLevel)", "def losses(self):\n return [g for g in self.games if g.winner is not self.team]", "def attack(self, opponents):\n \n living_heroes = list()\n dead_heroes = list()\n living_opponents = list()\n dead_opponents = list()\n \n for hero in self.heroes:\n if hero.is_alive():\n living_heroes.append(hero)\n else:\n dead_heroes.append(hero)\n \n for hero in opponents.heroes:\n if hero.is_alive():\n living_opponents.append(hero)\n else:\n dead_opponents.append(hero)\n \n while len(living_heroes) > 0 and len(living_opponents) > 0:\n # As long as there are living heroes on both sides...\n random_hero = random.choice(self.heroes)\n random_opponent = random.choice(opponents.heroes)\n # Pick one from each side...\n if random_hero.is_alive() and random_opponent.is_alive():\n # And have them fight\n random_hero.fight(random_opponent)\n # When one of them dies, remove them from the list of living heroes, and add them to the list of dead heroes\n if random_hero.is_alive() == False and random_opponent.is_alive() == True:\n dead_heroes.append(random_hero)\n living_heroes.remove(random_hero)\n elif random_hero.is_alive() == True and random_opponent.is_alive() == False:\n dead_opponents.append(random_opponent)\n living_opponents.remove(random_opponent)\n # break", "def others(state, alive=True):\n me = state['current_player']\n all_players = state['gladiators']\n others = {i: g for i, g in enumerate(all_players) if i != me}\n\n if alive:\n others = {i: g for i, g in others.items() if g['cur_hp'] > 0}\n\n return others", "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def has_victim(self):\n # first-party\n from tcex.api.tc.v3.victims.victim_filter import VictimFilter\n\n victims = VictimFilter(Tql())\n self._tql.add_filter('hasVictim', TqlOperator.EQ, victims, TqlType.SUB_QUERY)\n return victims", "def player_death(game_event):\n print(\"Player Death\")\n # Get the userid from the event\n victim = game_event.get_int('userid')\n attacker = game_event.get_int('attacker')\n print(\"victim: %s\" % victim)\n print(\"attacker: %s\" % attacker)\n \n #victim_edict = edict_from_userid(victim)\n #attacker_edict = edict_from_userid(attacker)\n #print(\"victim_edict: %s\" % victim_edict)\n #print(\"attacker_edict: %s\" % attacker_edict)\n \n # Get the CPlayerInfo instance from the userid\n victimplayerinfo = playerinfo_from_userid(victim)\n attackerplayerinfo = playerinfo_from_userid(attacker)\n print(\"victimplayerinfo: %s\" % victimplayerinfo)\n print(\"attackerplayerinfo: %s\" % attackerplayerinfo)\n # And finally get the player's name \n #victimname = victimplayerinfo.get_name()\n #attackername = attackerplayerinfo.get_name()\n #print(\"victimname: %s\" % victimname)\n #print(\"attackername: %s\" % attackername)\n \n # Get the index of the player\n victimindex = index_from_userid(victim)\n attackerindex = index_from_userid(attacker)\n print(\"victimindex: %s\" % victimindex)\n print(\"attackerindex: %s\" % attackerindex)\n \n print(\"victim_is_fake_client: %s\" % victimplayerinfo.is_fake_client())\n print(\"attacker_is_fake_client: %s\" % attackerplayerinfo.is_fake_client())\n \n victim_steamid = victimplayerinfo.get_networkid_string()\n attacker_steamid = attackerplayerinfo.get_networkid_string()\n \n if not victimplayerinfo.is_fake_client() and not attackerplayerinfo.is_fake_client():\n \n print(\"victim_steamid: %s\" % victim_steamid)\n print(\"attacker_steamid: %s\" % attacker_steamid)\n \n victim_64 = convertSteamIDToCommunityID(victim_steamid)\n attacker_64 = convertSteamIDToCommunityID(attacker_steamid)\n \n kick_player, v_balance, a_balance = leetcoin_client.recordKill(victim_64, attacker_64)\n if v_balance == \"noreg\":\n SayText2(message=\"Unregistered kill/death. Win free bitcoin by registering at leet.gg! (if you haven't already)\").send(victimindex)\n SayText2(message=\"Unregistered kill/death. Win free bitcoin by registering at leet.gg! (if you haven't already)\").send(attackerindex)\n vbalance = leetcoin_client.getPlayerBalance(convertSteamIDToCommunityID(victimplayerinfo.get_networkid_string()))\n SayText2(message=\"Updated \" + vbalance + \"\").send(victimindex)\n if victim_steamid != attacker_steamid:\n abalance = leetcoin_client.getPlayerBalance(convertSteamIDToCommunityID(attackerplayerinfo.get_networkid_string()))\n SayText2(message=\"Updated \" + abalance + \"\").send(attackerindex) \t\n\n return", "def choose_winner(): \r\n max_health = Titan.max_health()\r\n winners = tuple((titan.name for titan in Titan.titans if titan.health == max_health))\r\n return winners", "def player_deaths(self):\n return self.deaths.filter(and_(Death.mindkey != 'null', Death.mindkey != None, Death.mindname != 'Manifested Ghost'))", "def getWolves(self):\n w = []\n for player in self.playersAndRoles:\n if player.lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rêveur\"]:\n w.append(str(player.user.name) + \" est un \" + str(player.lastRole))\n if len(w) == 0: # No werewolf among players : maybe minions are among them\n for player in self.playersAndRoles:\n if player.lastRole in [\"Sbire\"]:\n w.append(str(player.user.name) + \" est un \" + str(player.lastRole) +\n \" mais comme il n'y a pas de Loups-Garous, ce sbire devient un Loup-Garou.\")\n return w", "def game_over(self):\n return self.lives() < 0", "def get_voters():", "def get_voters():", "def playerdefeated(self):\n globalvalues.gameover_combat()", "async def get_non_voters(self, guild: discord.Guild, uservotes: dict):\n\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for member in guild.members:\n if player_role in member.roles:\n userkey = f\"{member.name}#{member.discriminator}\"\n if userkey not in uservotes:\n uservotes[userkey] = \"No vote\"\n\n return uservotes", "def current_players(self):\n return self.previous_event.current_players", "def vwho():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'vavailable': vavailable(), 'veta': data['vetas'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def game_over(self):\n raise NotImplementedError(\"Abstract method\") # no mercy for stooges", "def get_players(self, hero, data, verbose):\n\n if len(self.players) > 1:\n out = f\"\\n\\nplayers:\"\n for name, player in data[\"players\"].items():\n if name != hero:\n out += \"\\n \" + name\n if verbose:\n out += Game._verbose_print(player)\n else:\n out = f\"\\n\\nThere's nobody else here.\"\n\n return out", "def attack_opponent(self):\n coordHit = self.attack_table.item(\n self.attack_table.currentRow(), self.attack_table.currentColumn())\n if coordHit in self.clicked:\n self.attack_table.clearSelection()\n error_sound = vlc.MediaPlayer(\"resources/error.mp3\")\n error_sound.play()\n else:\n self.attack_table.item(self.attack_table.currentRow(\n ), self.attack_table.currentColumn()).setBackground(Qt.gray)\n self.clicked.append(coordHit)\n shoot_sound = vlc.MediaPlayer(\"resources/shoot.mp3\")\n shoot_sound.play()\n for ship in self.enemyShips:\n if ship.check_position(coordHit) == True:\n ship.hit(coordHit)\n self.attack_table.item(self.attack_table.currentRow(\n ), self.attack_table.currentColumn()).setBackground(Qt.darkRed)\n if self.check_enemy_fleet() == False:\n self.menu = Menu(self.lang, self.username)\n self.menu.show()\n self.win_window = Win(self.lang)\n self.win_window.show()\n self.close()\n self.hit_coordinate()\n self.attack_table.clearSelection()", "def get_violators(self):\n return self.violators", "def choose_opponent(self):\n possible_opponents = [\n self.data['personalities'].pop(),\n self.data['events'].pop()\n ]\n title = 'Recruit a member or gain experience:'\n options = []\n for possible_opponent in possible_opponents:\n option = card_format(possible_opponent)\n options.append(option)\n choice = self.present_menu(options, title)\n self.stats['opponent'] = possible_opponents[choice]", "def get_players(self):\r\n return self.players.values()", "def gameOver(self):\n\t\treturn self.lives == 0", "def vitamins(self) -> List[RecipeObjectNutrientsCalories]:\n return self._vitamins", "def _checkRoundOver(self):\n\n if not any(player.isAlive() for player in self.teams[0].players):\n self.endGame()", "def check_players_collision(self):\n # Check if bullet hit the player 1\n bullet_hit_p = len(pygame.sprite.spritecollide(self.player1, self.player2_bullet, True))\n bullet_hit_m = len(pygame.sprite.spritecollide(self.player1, self.mob_bullet, True))\n if bullet_hit_p and not self.player1.isInvincible():\n self.p2_score += 500\n if bullet_hit_p + bullet_hit_m > 0 and not self.player1.is_destroyed():\n self.player1.destroy()\n self.explosions.add(Explosion(self.fps // 4, self.player1.get_x(), self.player1.get_y(), self.screen_width,\n self.screen_height, 0, self.debug))\n\n # Check if Player 2 bullet hit the player 1\n bullet_hit_p = len(pygame.sprite.spritecollide(self.player2, self.player1_bullet, True))\n bullet_hit_m = len(pygame.sprite.spritecollide(self.player2, self.mob_bullet, True))\n if bullet_hit_p and not self.player2.isInvincible():\n self.p1_score += 500\n if bullet_hit_p + bullet_hit_m > 0 and not self.player2.is_destroyed():\n self.player2.destroy()\n self.explosions.add(Explosion(self.fps // 4, self.player2.get_x(), self.player2.get_y(), self.screen_width,\n self.screen_height, 0, self.debug))", "def get_allowed_vos():\n return get_vos(filter_by_existing_users(filter_out_bans(read_mapfiles(), read_banfile())))", "def wins(self):\n return [g for g in self.games if g.winner is self.team]", "def passive(self, friendly_team, opposing_team, target):\n stat_buffs = 0\n for position in self.UNCOLLECTED_SOULS:\n if position > 0 and not opposing_team['team'][position] == None and opposing_team['team'][position].current_hp <= 0:\n stat_buffs += 2\n position -= 4\n stat_bonus = {\n 'armor': stat_buffs,\n 'resistance': stat_buffs,\n }\n self.set_stats(self.get_effective_stats(stat_bonus))", "def review(self):\n for player in self.team.players:\n player.career.potentially_retire()", "def get_winners_of_game(self):\n return self.game_winners", "async def applyVote(self, votes):\n voteCount = {vote: 0 for vote in self.getMembersName()}\n voteCount[None] = 0\n for vote in votes.values():\n voteCount[vote] += 1\n\n if voteCount[None] != 0:\n await self.textChannel.send(\n \"Attention, des joueurs n'ont pas voté / ont mal écrit, les votes peuvent être faussés.\")\n del voteCount[None]\n\n playerOrder = sorted(voteCount.items(), key=lambda x: x[1], reverse=True)\n print(\"playerOrder\", playerOrder)\n if playerOrder[0][1] == 0: # Nobody vote\n await self.textChannel.send(\"`Partie non valide`, personne n'a voté.\")\n\n elif playerOrder[0][1] == 1: # People think nobody is a werewolf\n await self.textChannel.send(\"Le village pense qu'il n'y a pas de loups-garou ? Vérification ...\")\n werewolves = self.getWolves()\n if len(werewolves) == 0:\n await self.textChannel.send(\"Le village a raison, il n'y a pas de loups-garous parmis eux.\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else:\n await self.textChannel.send(\"Malheuresement, il y avait```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n else: # Classic vote\n werewolves = self.getWolves()\n deaths = []\n for i in range(len(playerOrder)):\n player = self.getMemberFromName(name=playerOrder[i][0])\n isDead = await player.isDead(channel=self.textChannel)\n if isDead:\n deaths += await player.death(channel=self.textChannel, members=self.players)\n print(\"voteCount :\", voteCount)\n\n # Get player name with same number of vote against them\n playerEqualVote = []\n for p in playerOrder:\n if p[1] == playerOrder[i][1] and p[0] != playerOrder[i][0]:\n playerEqualVote.append(self.getMemberFromName(name=p[0]))\n print(\"Other players with equals number of vote :\", playerEqualVote)\n for otherPlayer in playerEqualVote:\n isDead = await otherPlayer.isDead(channel=self.textChannel)\n if isDead:\n deaths += await otherPlayer.death(channel=self.textChannel, members=self.players)\n break\n\n for i in range(len(deaths)):\n if deaths[i] is None:\n del deaths[i]\n\n if len(deaths) == 0: # No one die\n if len(werewolves) == 0: # No Werewolves\n await self.textChannel.send(\"Il n'ya pas eu de mort et il n'y a aucun Loup-Garou !\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else: # Werewolves among players\n await self.textChannel.send(\n \"Il n'y a pas eu de mort mais```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n elif len(deaths) == 1:\n if deaths[0].lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rêveur\"]: # Werewolf die\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n elif deaths[0].lastRole in [\"Tanneur\"]: # Tanner died\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNÉ#```\")\n if len(werewolves) > 0: # Wolves in game\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT ÉGALEMENT GAGNÉ```\")\n else: # Villager died\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n else: # more than 2 deaths\n rolesDead = []\n for dead in deaths:\n if dead.lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rêveur\"]:\n rolesDead.append(\"Loup-Garou\")\n elif dead.lastRole in [\"Tanneur\"]:\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNÉ#```\")\n else:\n rolesDead.append(\"Villageois\")\n print(\"rolesDead :\", rolesDead)\n rolesDead = list(dict.fromkeys(rolesDead))\n print(\"rolesDead unique :\", rolesDead)\n if \"Loup-Garou\" in rolesDead:\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else:\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")", "def victory_check(self):\n\n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get the possible ways to win\n possible_wins = board.get_wins(affinity)\n \n # if we can win, pick a good win \n if len(possible_wins) == 1: return possible_wins[0]\n elif len(possible_wins) > 1:\n best_win = None\n wins_by_x = {}\n wins_by_y = {}\n for win in possible_wins:\n if win[0] not in wins_by_x.keys():\n wins_by_x[win[0]] = []\n if win[1] not in wins_by_y.keys():\n wins_by_y[win[1]] = []\n wins_by_x[win[0]].append(win)\n wins_by_y[win[1]].append(win)\n for y in wins_by_y:\n if len(wins_by_y[y]) > 1: \n for win in wins_by_y[y]:\n if best_win is None or win[0] < best_win[0]:\n best_win = win \n return best_win\n\n else: return None", "def team_battle(self):\n # deaths1 = 0\n # deaths2 = 0\n #\n # while deaths1 < len(self.team_one.heroes) and deaths2 < len(self.team_two.heroes):\n # self.team_one.attack(self.team_two)\n # self.team_two.attack(self.team_one)\n # if\n # print(\"hi\")\n while self.team_one.find_hero == True and self.team_two.find_hero == True:\n print(\"team_battle\")", "def trigger_violence(self):\n # First time offender get registered in the system and changes category into an Aggressor and a Victim\n if self.assaulted == 0:\n if self.stress > self.random.random():\n self.category = 'aggressor'\n self.assaulted += 1\n self.spouse.category = 'victim'\n self.spouse.got_attacked += 1\n\n # Second-time offender, checks to see if it is a recidivist.\n elif self.stress > self.random.random():\n self.assaulted += 1\n self.spouse.got_attacked += 1", "def tournament_selection(self, population: List[IndividualType]) -> List[IndividualType]:\n survivors: List[IndividualType] = []\n for _ in range(self.configuration.n_survivors):\n # Choose participants\n rooster: List[IndividualType] = random.sample(population, self.configuration.rooster_size)\n # Select fittest of participants as survivor\n fittest_individual_of_rooster = self.get_best_individual(rooster)\n population.remove(fittest_individual_of_rooster)\n survivors.append(fittest_individual_of_rooster)\n return survivors", "def players(self):\n return self.currents.player", "def sngl_obj_evo(self, lacking):\n prob, algo = self.probinit('jde', 0)\n l = list()\n u = 6+(self.N-3)*4\n for i in range(lacking):\n archi = archipelago(algo,prob,8,16, topology=fully_connected())\n for j in range(u):\n archi.evolve(5)\n stdout.write(\"\\r{0} / {1}\".format(i*u+j+1, lacking*u))\n stdout.flush()\n tmp = [isl for isl in archi]\n tmp.sort(key = lambda x: x.population.champion.f[0]);\n l.append(tmp[0].population.champion)\n stdout.write(\" Done. \")\n return l, prob", "def sixteen_is_dead(players):\n \n number = setup_number_of_dices()\n faces = setup_number_of_faces()\n result_list = []\n for player in range(1, players+1):\n total_points = 0\n while total_points < 16:\n user_input = user_interface(player)\n if user_input == \"\":\n while True:\n user_input_2 = user_interface_2()\n if user_input_2 == \"\":\n dice_number = roll_dice(number,faces)\n total_points += dice_number\n print()\n print (\"Deine aktuelle Punktzahl beträgt:\",total_points)\n print()\n if total_points == 10:\n time.sleep(3)\n continue\n else:\n break\n else:\n dice_number = roll_cheating_dice(number,faces)\n total_points += dice_number\n print()\n print (\"Deine aktuelle Punktzahl beträgt:\",total_points)\n print()\n if total_points == 10:\n time.sleep(3)\n continue\n else:\n break\n if (total_points >= 16) or (total_points == 9) or (user_input == \"n\"):\n print()\n break\n if total_points < 16:\n result_list.append(total_points)\n else:\n print()\n break\n if total_points >= 16:\n print(\"Spieler\",player,\"hat das Spiel verloren!\")\n print()\n restart()\n else:\n player = 1\n for i in result_list:\n if i == min(result_list):\n print(\"Spieler\",player,\"hat das Spiel mit\",i,\"Punkten verloren!\")\n player += 1\n print()\n restart()", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def all_votesp(self, game_key):\n participants = models.Participant.query(\n models.Participant.playing == True,\n models.Participant.vote == None,\n ancestor=game_key).fetch()\n logging.info(\n \"participants who have not voted: %s\", \n [p.plus_id for p in participants])\n if participants:\n return False\n else:\n return True", "def player_win(self):\n global chips\n global placed_bet\n\n chips = (self.final_bet*2 + chips)\n self.victory = True\n placed_bet = False", "def ready_players(self):\n return self.players.filter_by(sitting_out=False).join(players_active).all()", "def player_lose(self):\n global chips\n chips = chips - self.final_bet\n self.defeat = True\n placed_bet = False", "def game_over(self):\n # TODO: Define the game over condition for Adventure.\n # use self.over to determine if the game if over\n return self.over", "def getOpponents(self, gameState):\n\n if self.red:\n return gameState.getBlueTeamIndices()\n else:\n return gameState.getRedTeamIndices()", "def check_cheating(self, dice=[]):\n\n #Assume they're not cheating until proven guilty\n self.cheating = False\n\n if self.current_stage == 3:\n if self.die_a not in dice and (self.die_a.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True\n elif self.die_b not in dice and (self.die_b.value == 6):\n print(\"You're cheating! You cannot lock a 6! You cannot win \"\n \"until you reroll it!\")\n self.cheating = True", "def check_victory(board):\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n player = board[row][col]\n\n # not a player move\n if player == 0 or player == 9:\n continue\n\n # look right\n if col + 3 < WIDTH and player == board[row][col + 1] and player == board[row][col + 2]\\\n and player == board[row][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n if row + 3 < HEIGHT:\n\n # down\n if player == board[row + 1][col] and player == board[row + 2][col] and player == board[row + 3][col]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and right\n if col + 3 < WIDTH and player == board[row + 1][col + 1] and player == board[row + 2][col + 2]\\\n and player == board[row + 3][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and left\n if col - 3 >= 0 and player == board[row + 1][col - 1] and player == board[row + 2][col - 2] \\\n and player == board[row + 3][col - 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n\n # # if no one has won yet\n for row in range(HEIGHT):\n for col in range(WIDTH):\n if board[row][col] == 0 or board[row][col] == 9:\n return None\n\n return 0", "async def players(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n\n await amor_manager.say(\"Current Players: {}\".format(\", \".join(tod_games[room]['participants'].keys())))", "def decide_winner(self, user, computer):\n user_index = choice.index(user)\n computer_index = choice.index(computer)\n diff = user_index - computer_index\n if diff == -2 or diff == 1:\n return [1, 0]\n elif diff == 0:\n return [0, 0]\n else:\n return [0, 1]", "def handleAllSpritesCollisions(self):\n # verifico le collisioni coi nemici\n # purchè il giocatore sia per loro visibile\n if self.player.sprite.getState() == PlayerState.VISIBILE:\n if pygame.sprite.groupcollide(self.player, self.enemies, False, False):\n print(\"COLLISIONE CON NEMICO\")\n self.sound_explosion.play()\n self.removeLife()\n\n\n # verifico le collisioni coi proiettili sparati dai nemici che sparano\n # purchè il giocatore sia per loro visibile\n if self.player.sprite.getState() == PlayerState.VISIBILE:\n if pygame.sprite.groupcollide(self.player, self.shooterBullets, False, False):\n print(\"COLLISIONE CON NEMICO\")\n self.sound_explosion.play()\n self.removeLife()\n\n # verifico le collisioni con le bombe che rimuovon le monete nel loro intorno\n bombCollisions = pygame.sprite.groupcollide(self.player, self.bombs, False, True)\n if bombCollisions:\n self.sound_bomb_explosion.play()\n bombs = bombCollisions[self.player.sprite]\n for b in bombs:\n self.score += Score.COIN * b.getRemovedObjects()\n\n # verifico le collisioni con le wall bombs\n if pygame.sprite.groupcollide(self.player, self.wallBombs, False, True):\n self.sound_explosion.play()\n\n # verifico le collisioni con gli enemyKillers\n enemyKillersCollisions = pygame.sprite.groupcollide(self.player, self.enemyKillers, False, True)\n if enemyKillersCollisions:\n self.sound_enemy_killer.play()\n enemies = enemyKillersCollisions[self.player.sprite]\n for e in enemies:\n self.score += Score.ENEMY * e.getRemovedObjects()\n\n # verifico le collisioni coi timerReloaders\n if pygame.sprite.groupcollide(self.player, self.timeReloaders, False, True):\n self.sound_time_reloader.play()\n self.start = time.time()\n\n # verifico le collisioni con le monete\n if pygame.sprite.groupcollide(self.player, self.coins, False, True):\n self.sound_coin.play()\n self.score += Score.COIN\n\n # verifico le collisioni con i greedy enemies\n if pygame.sprite.groupcollide(self.player, self.greedyEnemies, False, True):\n self.sound_greedy_enemies.play()\n self.enemies_eater = not self.enemies_eater\n\n # verifico le collisioni tra proiettili e nemici\n if pygame.sprite.groupcollide(self.playerBullets, self.enemies, True, True):\n self.sound_explosion.play()\n self.score += Score.ENEMY\n\n # le collisioni tra proiettili e muri\n # sono gestite internamente alla classe Bullet\n\n # verifico le collisioni tra bonus dei proiettili e giocatore\n weaponPlayersCollisions = pygame.sprite.groupcollide(self.player, self.bonusPlayerBullets, False, True)\n if weaponPlayersCollisions:\n\n weapon_players = weaponPlayersCollisions[self.player.sprite]\n print(\"Collisione con bonusPlayersBullet: %s\" % len(weapon_players))\n for p in weapon_players:\n # riassegno le armi del mio giocatore\n # da notare che se il potere in questione esiste gia'\n # esso viene sovrascritto dal nuovo valore di potere\n self.sound_weapon.play()\n print(\"Aggiungo il potere dell'arma per %s secondi\" % p.get_duration())\n self.player.sprite.addPower(PlayerPowers.WEAPON,\n (self.get_remaining_time(), p.get_duration()))\n\n # nel caso il valore di enemies_eater sia attivo\n # verifico la collosione con le monete anche per i nemici\n if self.enemies_eater:\n if pygame.sprite.groupcollide(self.enemies, self.coins, False, True):\n self.sound_coin.play()\n self.score += Score.COIN\n\n # nel caso il giocatore collida con un portale, viene\n # catapultato su una nuova posizione a caso!\n if pygame.sprite.groupcollide(self.player, self.portals, False, True):\n self.sound_portal.play()\n # nel caso al momento il player non abbia il potere della invisibilita\n # gliene do' un po'...\n if not self.player.sprite.hasPower(PlayerPowers.INVISIBILITY):\n # do' il tempo al giocatore di schivare eventuali avversari\n # attorno alla sua nuova posizione...\n self.player.sprite.addPower(PlayerPowers.INVISIBILITY, (self.get_remaining_time(),5))\n # scelgo una nuova posizione a caso tra quelle libere da muri\n self.player.sprite.setPosition(random.choice(self.free_locations))\n\n # verifico le collisioni con gli invisibilityPlayers\n invisiblePlayersCollisions = pygame.sprite.groupcollide(self.player, self.invisibilityPlayers, False, True)\n if invisiblePlayersCollisions:\n inv_players = invisiblePlayersCollisions[self.player.sprite]\n for p in inv_players:\n # riassegno il potere di invisibilità del mio giocatore\n # da notare che se il potere in questione esiste gia'\n # esso viene sovrascritto dal nuovo valore di potere\n self.sound_invisibility_player.play()\n self.player.sprite.addPower(PlayerPowers.INVISIBILITY,(self.get_remaining_time(), p.get_duration()))", "def nonplayer_deaths(self):\n return self.deaths.filter(or_(Death.mindkey == 'null', Death.mindkey == None))", "def checkForOnes(self, playersView: Player):\n # TODO checkForOnes not implemented\n raise NotImplementedError()", "def get_winner(self):\n winner: Player = Player('none')\n points_winner = 0\n for player in self.players:\n for key, value in player.get_stats().items():\n print('{}: {}'.format(key, value))\n if key == 'points':\n if value >= points_winner:\n winner = player\n print()\n\n print('The winner is: ' + winner.get_name())\n return winner", "def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]", "def collect(self, player: Player):\n player.set_invincible(True)", "def test_orangered_victory(self):\n self.assertEqual(None, self.sapphire.owner)\n sess = self.sess\n self.battle.create_skirmish(self.alice, 5)\n\n self.battle.ends = self.battle.begins\n sess.commit()\n updates = Battle.update_all(sess)\n sess.commit()\n\n self.assertNotEqual(len(updates['ended']), 0)\n self.assertEqual(updates[\"ended\"][0], self.battle)\n self.assertEqual(0, self.sapphire.owner)", "async def infected(self, ctx):\n user_list = await self.config.all_users()\n infected_list = []\n for user, data in user_list.items():\n user = ctx.bot.get_user(user)\n if user:\n userState = data[\"gameState\"]\n if userState == \"infected\":\n infected_list.append(f\"{user.mention} - {user}\")\n if infected_list:\n infected_list = \"\\n\".join(infected_list)\n color = await ctx.embed_color()\n if len(infected_list) > 2000:\n embeds = []\n infected_pages = list(pagify(infected_list))\n for index, page in enumerate(infected_pages, start=1):\n embed = discord.Embed(color=color, title=\"Infected Users\", description=page)\n embed.set_footer(text=f\"{index}/{len(infected_pages)}\")\n embeds.append(embed)\n await menu(ctx, embeds, DEFAULT_CONTROLS)\n else:\n await ctx.send(\n embed=discord.Embed(\n color=color,\n title=\"Infected Users\",\n description=infected_list,\n )\n )\n else:\n await ctx.send(\"No one has been infected yet..\")", "def get_accepted_players(self):\n return self.accepted_players", "def determine_winners(self, players=None):\n players_and_cards = [(holding.player.id, holding.codes) for holding in self.live_holdings]\n if players:\n player_ids = [p.id for p in players]\n players_and_cards = [d for d in players_and_cards if d[0] in player_ids]\n winners = determine_winners(players_and_cards, self.board.codes)\n return [Player.query.get(winner) for winner in winners]", "def test_fav_6(self):\n\t\tplayer_list = [Player(\"Blake Base\", 1, 300000, 10), Player(\"Corey Catcher\", 2, 500000, 20), Player(\"Dexter Dugout\", 3, 200000, 50)]\n\t\tself.assertEqual( free_agent_vorp(player_list, 100000, 4), (0, 0, []) )", "def get_opposing_player(self, player_name):\n pass", "def player_collision(self, player):\n for item in self.inventory:\n player.receive_item(item)\n self.die()\n return True", "def attack_countries(self, other_country):\n print(\"\\n>>>\\tEncounter enemy!\")\n my_point = self.get_total_cp(self.troop_list, other_country)\n enemy_point = other_country.get_total_cp(other_country.troop_list, self)\n if my_point <= 0:\n return False\n elif enemy_point <= 0:\n return True\n if my_point > enemy_point:\n for key in self.troop_list:\n if self.troop_list[key][0] > 0:\n if self.troop_list[key][0] >= 2:\n self.troop_list[key][0] -= 2\n if self.troop_list[key][1].health > 10:\n self.troop_list[key][1].health -= 10\n if self.troop_list[key][1].attack >= 5:\n self.troop_list[key][1].attack -= 5\n for key in other_country.troop_list:\n if other_country.troop_list[key][0] > 0:\n if other_country.troop_list[key][0] >= 3:\n other_country.troop_list[key][0] -= 3\n if other_country.troop_list[key][1].health > 15:\n other_country.troop_list[key][1].health -= 15\n if other_country.troop_list[key][1].attack >=10:\n other_country.troop_list[key][1].attack -= 10\n if enemy_point >= my_point:\n for key in self.troop_list:\n if self.troop_list[key][0] > 0:\n if self.troop_list[key][0] >= 3:\n self.troop_list[key][0] -= 3\n if self.troop_list[key][1].health > 15:\n self.troop_list[key][1].health -= 15\n if self.troop_list[key][1].attack >= 10:\n self.troop_list[key][1].attack -= 10\n for key in other_country.troop_list:\n if other_country.troop_list[key][0] > 0:\n if other_country.troop_list[key][0] >= 2:\n other_country.troop_list[key][0] -= 2\n if other_country.troop_list[key][1].health > 10:\n other_country.troop_list[key][1].health -= 10\n if other_country.troop_list[key][1].attack >= 5:\n other_country.troop_list[key][1].attack -= 5", "def is_game_over(self):\n return self.state.all_avatars_placed() and self.state.is_game_over()", "def get_able_players(self):\n highest_bid = self.get_highest_bid()\n\n def is_able(player):\n return player.is_active() and (player.pot_money < highest_bid or player.can_decide)\n\n first_n = (self.dealer_index + 1) % len(self.in_game_players)\n in_game_sorted = self.in_game_players[first_n:] + self.in_game_players[:first_n]\n return [p for p in in_game_sorted if is_able(p)]", "def obs(self, player):\n vec = []\n hand = self._hands[player]\n for card in hand:\n vec += card.vector()\n vec += [0.0] * (20 * (108 - len(hand)))\n vec += self._discard[-1].vector()\n vec += _player_vec(player)\n vec += _player_vec(self.turn())\n for hand in self._hands:\n vec += [float(len(hand))]\n vec += [0.0] * (MAX_PLAYERS - self._num_players)\n return vec", "def check_win(players: List[Player]) -> Tuple[bool, Optional[Player]]:\n total_players = len(players)\n for player in players:\n if player.influence == 0:\n total_players -= 1\n if total_players == 1:\n for player in players:\n if player.influence >0:\n return True, player\n return False, None", "def game_is_over(self) -> models.Conclusion:\n raise NotImplementedError", "def active_game(self, player):\n actives = self.filter(active=True, finished=False, player=player)\n if actives.count() > 1:\n log.warning(f\"User {player} has more than one active round.\")\n return actives.latest(\"created\")", "def check_winner(self):\r\n if all(heap == 0 for heap in self.heaps):\r\n if self.misere:\r\n self.winner = self.other_player\r\n self.loser = self.current_player\r\n else:\r\n self.winner = self.current_player\r\n self.loser = self.other_player", "def restricted_teams(self, user):\n return []", "def check_if_game_over():\n check_for_winner()\n check_for_tie()", "def check_collisions(self):", "def check_evolve(self):\n if self.team == 'white':\n if self.position[0] == 0:\n self.evolve()\n \n else:\n if self.position[0] == 7:\n self.evolve()", "def champion_check(self):\n better_champion = False\n\n for species in self.species:\n if species.leader.original_fitness > self.champion_fitness:\n self.age_since_improvement = 0\n self.champion_fitness = species.leader.original_fitness\n better_champion = True\n\n if not better_champion:\n self.age_since_improvement += 1", "def calculate_survivors(self, planet=None):\n mage_life = 0\n if self.magos:\n\n # Calculate survivors\n mage_life = sum(self.race.rango_vid_mago) // 2\n survivors = min(self.vida / mage_life, len(self.magos))\n if planet:\n planet.magos = survivors\n\n # Kill off the dead and improve the survivors\n shuffle(self.magos)\n [self.magos.pop() for i in range(len(self.magos) - survivors)]\n for m in self.magos:\n m = mage(m.ataque + 5, m.vida + 10)\n\n if self.soldados:\n\n # Calculate survivors\n soldier_life = sum(self.race.rango_vid_soldado) // 2\n survivors = self.vida - len(self.magos)*mage_life\n survivors //= soldier_life\n if planet:\n planet.soldados = survivors\n\n # Kill off the dead and improve the survivors\n shuffle(self.soldados)\n [self.soldados.pop()\n for i in range(len(self.soldados) - survivors)]\n for s in self.soldados:\n s = soldier(s.ataque + 5, s.vida + 10)", "def is_over(self):\n alive_players = [1 if p.status == \"alive\" else 0 for p in self.players]\n # If only one player is alive, the game is over.\n if sum(alive_players) == 1:\n return True\n\n # If all rounds are finshed\n if self.round_counter >= 2:\n return True\n return False", "def players(self) -> List[Player]:\n return [self.white_player, self.black_player]", "def check_death(player_list):\n\n count = 0\n remaining_players = []\n\n for cycle in player_list:\n if not cycle.is_dead():\n count += 1\n remaining_players.append(cycle.get_name())\n\n if count == 0:\n pygame.time.wait(1000)\n return True, \"None\"\n elif count == 1:\n pygame.time.wait(1000)\n return True, remaining_players[0]\n\n return False, remaining_players", "def is_game_over(cls):\n cls.record_winner()\n cls.record_tie()", "def game_on(cls):\n sommet = cls()\n\n moves = []\n\n while not sommet.map.game_over():\n print(sommet.map)\n print(\"{} joue\".format(sommet.is_vamp))\n\n moves += [sommet.next_move()]\n\n sommet = cls(is_vamp=not sommet.is_vamp)\n sommet.map.add_moves(moves)\n\n print(sommet.map)\n print(\"Vainqueur : {}\".format(sommet.map.winner()))\n\n print(\"{} sommets ont été créés pour les besoins de cette simulation.\".format(\n cls.nb_vertices_created()))", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def player_has_active_games(self, player):\n return self.filter(active=True, finished=False, player=player)", "def determineWinner(self):\n if self.game_state.numActive() == 1:\n for player in self.game_state.player_list:\n if self.game_state.active_dict[player.name]:\n print \"\"\n print player.name + \" wins with\"\n for card in self.player_hand_dict[player.name]:\n print card\n print \"and takes \" + str(self.game_state.pot) + \" chips!\"\n self.game_state.player_chips[player.name] += self.game_state.pot\n return\n\n for player in self.game_state.player_list:\n for card in self.game_state.board:\n self.player_hand_dict[player.name].append(Card(card.suit, card.rank))\n hand_ranking = HandRanking(self.game_state.player_list, self.player_hand_dict)\n hand_ranking.rankHands()\n winning_rank = -1\n winner = None\n tie_list = []\n \"\"\" Get winning rank, only consider active players for the pot \"\"\"\n for player in self.game_state.player_list:\n if self.game_state.active_dict[player.name] == True:\n if DEBUG:\n print \"Considering \" + str(player.name) + \"'s hand for the pot.\"\n if hand_ranking.player_ranks_dict[player.name] > winning_rank:\n winning_rank = hand_ranking.player_ranks_dict[player.name]\n winner = player \n tie_list = []\n tie_list.append(player)\n elif hand_ranking.player_ranks_dict[player.name] == winning_rank:\n tie_list.append(player)\n \"\"\" winner should never be equal to None \"\"\"\n\n \"\"\" Check for tie and resolve if needed \"\"\"\n if len(tie_list) > 1:\n if DEBUG:\n print \"found potential tie...\"\n for player in tie_list:\n print player.name + \"'s hand:\"\n for card in hand_ranking.player_best_hand_dict[player.name]:\n print card\n print \"resolving tie...\"\n result_tie_list = self.resolveTie(hand_ranking, tie_list)\n print \"\"\n self.printPlayersHands()\n for player in result_tie_list:\n print player.name + \",\",\n print \" wins with\",\n hand_ranking.printRanking(winning_rank)\n print \"and takes \" + str(self.game_state.pot / len(tie_list)) + \" chips!\"\n for player in result_tie_list:\n self.game_state.player_chips[player.name] += self.game_state.pot / len(tie_list)\n else:\n print \"\"\n self.printPlayersHands()\n print winner.name + \" wins with\",\n hand_ranking.printRanking(winning_rank)\n print \"and takes \" + str(self.game_state.pot) + \" chips!\"\n self.game_state.player_chips[winner.name] += self.game_state.pot", "def show_contest_winner(self, db_session):\n users_contest_list = db_session.query(db.User).filter(db.User.entered_in_contest.isnot(False)).all()\n if len(users_contest_list) > 0:\n winner = random.choice(users_contest_list)\n self._add_to_chat_queue('The winner is {}!'.format(winner.name))\n else:\n self._add_to_chat_queue('There are currently no entrants for the contest.')", "async def guild_infected(self, ctx, *, guild: discord.Guild = None):\n if not guild:\n guild = ctx.guild\n user_list = await self.config.all_users()\n infected_list = []\n for user, data in user_list.items():\n user = guild.get_member(user)\n if user:\n userState = data[\"gameState\"]\n if userState == \"infected\":\n infected_list.append(f\"{user.mention} - {user}\")\n if infected_list:\n infected_list = \"\\n\".join(infected_list)\n color = await ctx.embed_color()\n if len(infected_list) > 2000:\n embeds = []\n infected_pages = list(pagify(infected_list))\n for index, page in enumerate(infected_pages, start=1):\n embed = discord.Embed(color=color, title=\"Infected Members\", description=page)\n embed.set_footer(text=f\"{index}/{len(infected_pages)}\")\n embeds.append(embed)\n await menu(ctx, embeds, DEFAULT_CONTROLS)\n else:\n await ctx.send(\n embed=discord.Embed(\n color=color,\n title=\"Infected Members\",\n description=infected_list,\n )\n )\n else:\n await ctx.send(\"No one has been infected yet..\")", "def get_player_squares(self, player: PlayerColor) -> List[Square]:\r\n return [square for square in self.squares.values() if\r\n square.state == SquareState.OCCUPIED\r\n and square.occupant.owner == player]", "def human_players(self):\n return self._get(\"human_players\")", "def get_best_player_to_poison(self, players_not_to_poison):\n best_player = self.dict_of_players[0]\n\n for each_key in self.dict_of_players.keys():\n if self.dict_of_players[each_key] in players_not_to_poison:\n pass\n else:\n if self.dict_of_players[each_key].deck.get_expected_poison_value() > best_player.deck.get_expected_poison_value():\n best_player = self.dict_of_players[each_key]\n\n return best_player", "def check_collisions(self, g):\n self.rects = {}\n for gc in self.sc.game_objects:\n self.load_game_object(gc)\n if g.name in self.backw_rects.keys():\n r = self.backw_rects[g.name]\n return r.collidedictall(self.rects)\n return []", "def get_lovers(var: GameState, player: User, *, include_player: bool = False) -> set[User]:\n if player not in var.matchmaker_lovers:\n return set()\n\n visited = {player}\n queue = set(var.matchmaker_lovers[player])\n while queue:\n cur = queue.pop()\n visited.add(cur)\n queue |= var.matchmaker_lovers[cur] - visited\n\n return visited if include_player else visited - {player}", "def player_hurt(event_var):\r\n debug.write(\"[SourceRPG] handling player_hurt\", 1)\r\n userid = event_var['userid']\r\n attacker = event_var['attacker']\r\n \"\"\" Only pass if the user did not kill themselves and are not on the same team \"\"\"\r\n if userid <> attacker:\r\n debug.write(\"Playerids are not the same\", 2)\r\n if attacker.isdigit() and int(attacker) > 0:\r\n debug.write(\"Attacker is not work spawn\", 2)\r\n if event_var['es_userteam'] <> event_var['es_attacker']:\r\n \"\"\" If one of the players is a bot and is not legible for experience, return \"\"\"\r\n debug.write(\"Players are not on the same team\", 2)\r\n if not canReceiveExperience(userid, attacker):\r\n return\r\n debug.write(\"Handling the experience rewards\", 1)\r\n player = players[attacker]\r\n weapon = event_var['weapon']\r\n if weapon in weaponXp:\r\n if weaponXp[weapon][0]:\r\n player.addXp( weaponXp[weapon][0], tellUserOverride = False)\r\n else:\r\n player.addXp( int(damageXp), tellUserOverride = False )\r\n debug.write(\"[SourceRPG] player_hurt handled\", 1)", "def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.data.get('max_visibility_night', self.game.data['map']['max_visibility_night'])\r\n \r\n #TODO(g): Add day/night cycle\r\n max_vis = max_vis_day\r\n \r\n # Cast rays from the player. Step out from the player and find the\r\n # angle to the player to determine if visible.\r\n center = self.game.player.pos.ToList()\r\n \r\n # Check every tile\r\n for y in range(center[1] - max_vis, center[1] + max_vis):\r\n for x in range(center[0] - max_vis, center[0] + max_vis):\r\n dist = rpg_base.GetDistance(center, [x, y])\r\n # Only really test tiles that are within viewing range\r\n if dist <= max_vis:\r\n #Log('%s -> %s = %s' % (center, [x, y], dist))\r\n if self.game.map.HasLineOfSightToPlayer(x, y):\r\n self.SetVisibility(x, y)", "def _get_live_games(self):\n response = requests.get(self._get_score_url())\n if response.status_code == 200:\n return [g for g in response.json()['games'] if g['status']['state'] == self.desired_game_state]", "def getPlayers(self):\n return iter(self.players)", "def players(self):\n return self._get(\"players\")", "def print_collisions(self):", "async def isTilted(self, ctx, summonerName):\n summoner = await RiotApi.getSummoner(summonerName)\n summonerId = summoner['id']\n summonerAccountId = summoner['accountId']\n match_history = await RiotApi.getSummonerHistory(summonerAccountId)\n loss_streak = 0\n\n for match in match_history['matches'][:5]:\n match_info = await RiotApi.getMatch(match['gameId'])\n participant_id = 0\n\n # find participant Identity number\n for identity in match_info['participantIdentities']:\n if summonerId == identity['player']['summonerId']:\n participant_id = identity['participantId']\n \n # assumes team is 100 if identity is 1-5 else assume team 200 \n team_100 = True if participant_id <= 5 else False\n team_100_win = match_info['teams'][0]['win'] == \"Win\"\n\n if team_100 == team_100_win:\n break\n\n loss_streak = loss_streak + 1\n \n if loss_streak > 0:\n await ctx.send(f'Ouch! {summonerName} is tilted! :slight_frown:\\n:fire: {summonerName} is on a {loss_streak} game loss streak! :fire:')\n else:\n await ctx.send(f'Nice! :thumbsup:\\n{summonerName} is not tilted :white_check_mark:')" ]
[ "0.59914505", "0.5907969", "0.58250535", "0.5708909", "0.5472831", "0.54458094", "0.5445035", "0.5442779", "0.5384571", "0.5359108", "0.5300459", "0.5299243", "0.5299243", "0.5279039", "0.52614534", "0.52344775", "0.5208366", "0.5185099", "0.5184724", "0.5178754", "0.516631", "0.51556057", "0.5123591", "0.5120934", "0.5112273", "0.5101496", "0.5098089", "0.5089273", "0.5086071", "0.50821304", "0.5081631", "0.5081225", "0.50790805", "0.50699157", "0.50694674", "0.5066167", "0.5059869", "0.50499463", "0.50493914", "0.5039686", "0.50341547", "0.501279", "0.50083554", "0.5007499", "0.4988679", "0.49575183", "0.4956525", "0.49495187", "0.49400538", "0.49385357", "0.49321762", "0.49251637", "0.49212748", "0.49192423", "0.4917787", "0.4905806", "0.4904036", "0.49039117", "0.49024007", "0.490231", "0.4897697", "0.48953608", "0.48930243", "0.48916233", "0.48742938", "0.48732388", "0.4872146", "0.48705217", "0.48632893", "0.48626414", "0.48579815", "0.48546034", "0.48485076", "0.4844438", "0.48439816", "0.48377606", "0.48349604", "0.48314", "0.48241192", "0.48168084", "0.48154712", "0.4814394", "0.48114803", "0.48086122", "0.4806396", "0.48043013", "0.48034543", "0.4799517", "0.47886536", "0.47881678", "0.4785165", "0.47794107", "0.4779195", "0.4777454", "0.47718573", "0.47710708", "0.47698355", "0.47694457", "0.4769364", "0.47647172" ]
0.75721735
0
Is the game over?
def is_game_over(self): return self.state.all_avatars_placed() and self.state.is_game_over()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isGameOver(self):\n pass", "def gameOver(self):\n\t\treturn self.lives == 0", "def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False", "def game_over(self) -> bool:\n return self.rstate.game_over()", "def is_game_over(self) -> bool:\n return self._is_game_over", "def is_game_over(cls):\n cls.record_winner()\n cls.record_tie()", "def is_over(self):\n return self.game.is_over()", "def _check_game_over(self):\n return self.game_board.check_game_over()", "def game_over(self):\n return bool(self.last_round and self.last_player == self.current_player)", "def __game_is_over(self):\n return not (self.__playing and self.__bricks_total > 0 and self.__num_lives > 0)", "def is_game_over(self):\n if (self.check_win(HexBoard.RED) or self.check_win(HexBoard.BLUE) or \n len(self.get_move_list())==0):\n self.game_over = True\n return self.game_over", "def game_over(self):\n return self.lives() < 0", "def check_game_over(self):\n red, blue = self.board.count_piece()\n if blue == 0:\n self.ui.show_result(\"RED WIN!\")\n self.turn = RED\n elif red == 0:\n self.ui.show_result(\"BLUE WIN!\")\n self.turn = BLUE\n elif red == blue == 1:\n self.ui.show_result(\"DRAW!\")", "def game_over(self):\n self.over = True", "def game_over(self):\n # TODO: Define the game over condition for Adventure.\n # use self.over to determine if the game if over\n return self.over", "def gameover(self):\n if self._gameover:\n return True\n \n if self.terminal():\n self._gameover = True\n return True\n \n return False", "def check_if_game_over():\n check_for_winner()\n check_for_tie()", "def is_game_over(self):\n\n if len(self.next_pieces) == 0:\n return True", "def event_game_over(self):\n print('Game over!')\n self._cmd_exit()", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def notify_game_over(self):\n self.is_game_over = True", "def is_over(self):\n winner = TictactoeMatch.get_winner(self.inputs_)\n if winner:\n self.result_ = winner\n if Config.USER['debug']['enabled']:\n print \"It is over! Player \"+str(self.result_)+\" (\"+str(self.player_label_[self.result_])+\") wins!\"\n return True\n for value in self.inputs_:\n if value == TictactoeMatch.EMPTY:\n if Config.USER['debug']['enabled']:\n print \"Go!\"\n return False\n self.result_ = TictactoeMatch.DRAW\n if Config.USER['debug']['enabled']:\n print \"It is over! Draw!\"\n return True", "def check_if_over(self):\n if self.remainingBalls == 0:\n self.check_if_won()\n self.game_over = True", "def isGameOver(self):\n for row in range(0, self.rows):\n for col in range(0, self.cols):\n if self.isMine(row, col) and self.isClicked(row, col):\n return True\n return False", "def is_over(self):\n alive_players = [1 if p.status == \"alive\" else 0 for p in self.players]\n # If only one player is alive, the game is over.\n if sum(alive_players) == 1:\n return True\n\n # If all rounds are finshed\n if self.round_counter >= 2:\n return True\n return False", "def IsGameOver(self):\n return any(c.cX + c.width >= self.end_location for c in self.enemies)", "def is_game_won(self):\n return True", "def check_if_game_over():\n # Calling check for winners.\n check_for_winner()\n # Calling check it's tie or not.\n check_if_tie()", "def is_game_over(board):\n winner = check_winner(board)\n draw = check_draw(winner, board)\n return True if winner or draw else False", "def gameOver():\n if len(p1)==0 and len(p1winnings)==0:\n return True\n elif len(p2)==0 and len(p2winnings)==0:\n return True\n return False", "def isGameOver(self):\n for i in range(self.rows):\n for j in range(self.columns):\n if self.grid[i][j].face == 'down':\n return False\n #if here then all cards must be face up\n return True", "def is_game_over(self):\n bk = False\n wk = False\n\n # Find the kings\n for row in range(8):\n for col in range(8):\n if self.board.squares[row][col] == ChessPiece.B_KING: # Black king symbol\n bk = True\n break\n if self.board.squares[row][col] == ChessPiece.W_KING: # Black king symbol\n wk = True\n break\n\n # If a king is missing, end the game. This fixes a bug we were having\n if bk == False:\n return 1\n if wk == False:\n return 2\n\n if self.white_wins():\n return 1\n elif self.black_wins():\n return 2\n elif self.tie():\n return 3\n else:\n return 0", "def isOpen(self):\n\t\treturn not self.endgame", "def game_over(self):\r\n win.blit(self.image_of_game_over, (250, 170))", "def check_game_over(self):\n for piece in self.pieces:\n if not piece.destroyed:\n return False\n print(\"Signal.END\")\n return True", "def is_game_win(self):\n return not self.deck and not self.hand", "def check_game(self):\n gameOver = None\n if self.turn > 4:\n gameOver = self.check_x_won()\n if gameOver is True:\n self.game_x_won()\n return\n\n gameOver = None\n if self.turn > 5:\n gameOver = self.check_o_won()\n if gameOver is True:\n self.game_o_won()\n return\n\n if self.turn >= 9:\n self.game_tie()\n return", "def game_over(self):\n if self.alive:\n return\n\n self.screen.fill(Color.BLACK)\n self.draw_text(\n \"GAME OVER\", WIN_CENTER, font=FONT_M, size=48, color=Color.WHITE\n )\n again = \"Press any key to play again\"\n again_pos = CENTER_W, WIN_H - BLOCK_H\n self.draw_text(again, again_pos, color=Color.WHITE)\n\n pygame.display.flip()\n self.wait_keydown()\n\n if self.running:\n self.reset()", "def __game_is_over(self, x, y):\n\t\tif np.count_nonzero(self.board) >= 42:\n\t\t\treturn True\n\n\t\tlines = self.__extract_lines(x, y)\n\n\t\tfor line in lines:\n\t\t\tif self.__winner_in_line(line) != 0:\n\t\t\t\treturn True\n\n\t\treturn False", "def GAMEOVER_LOOP():\n pass", "def check_loss(self):\n return POKEMON in self.get_game()", "def game_over(self):\n red_minion = 0\n blue_minion = 0\n red_master = 0\n blue_master = 0\n only_masters = True\n for row in self.board:\n for piece in row:\n if piece != 0:\n if not piece.master:\n if piece.player:\n blue_minion += 1\n else:\n red_minion += 1\n only_masters = False\n else:\n if piece.player:\n blue_master += 1\n else:\n red_master += 1\n if blue_minion + blue_master == 0:\n self.winner = \"Red\"\n self.red_victories += 1\n self.number_of_games +=1\n self.game_over_screen()\n return True\n elif red_minion + red_master == 0:\n self.winner = \"Blue\"\n self.blue_victories += 1\n self.number_of_games +=1\n self.game_over_screen()\n return True\n elif only_masters:\n if red_master > blue_master:\n self.winner = \"Red\"\n self.red_victories += 1\n elif blue_master > red_master:\n self.winner = \"Blue\"\n self.blue_victories += 1\n else:\n self.winner = \"Nobody\"\n self.number_of_games +=1\n self.game_over_screen()\n return True\n \n return False", "def is_game_over(self):\n if max([max(row) for row in self.grid]) == 2 ** (self.grid_size ** 2):\n raise GameException('Congrats, You won !')\n\n # If there is a zero then the game is not over\n for row in self.grid:\n if 0 in row:\n return False\n\n # Check if two consecutive number (vertically or horizontally) are\n # equal. In this case the game is not over.\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n # horizontal check\n if (i < self.grid_size - 1 and\n self.grid[i][j] == self.grid[i + 1][j]):\n return False\n # vertical check\n if (j < self.grid_size - 1 and\n self.grid[i][j] == self.grid[i][j + 1]):\n return False\n\n return True", "def game_over(state):\r\n return wins(state, HUMAN) or wins(state, COMP)", "def gameover(self):\n font = pygame.font.Font(None, CASE_SIZE)\n text = font.render('Game over!', True,(255, 255, 255), (0, 0, 0))\n self.screen.blit(text,(CASE_SIZE * 6, CASE_SIZE * 7))\n self.try_again()\n pygame.display.flip()", "def gameOver():\n PTS, COIN, LIVES = 0, 1, 2\n uniSprite = 0\n globalSound(\"stop\") # Stopping any music\n playSound(overSound, \"music\") # Playing game over music\n startTime = time.get_ticks()\n # Game over screen should only stay for 5 seconds\n while time.get_ticks() - startTime < 5000:\n for evnt in event.get():\n if evnt.type == QUIT:\n return \"exit\"\n # Drawing game over screen\n screen.fill(BLACK)\n uniSprite = spriteCounter(uniSprite)\n drawStats(None, None, marioScore[PTS], marioScore[COIN], time.get_ticks(), levelNum, True, True, statCoin,\n uniSprite, 0)\n screen.blit(overText,(300,300)) # Blitting game over text\n display.flip()\n fpsCounter.tick(60)\n return \"menu\"", "def win_game(self):\r\n self.board.clear_hovered_tiles_list()\r\n self.is_game_over = True\r\n self.reset_button.won_game()\r\n self.high_score.update(self.timer.seconds)", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def game_over(_user_id):\n _board = boards[_user_id]\n return _board.is_game_over()", "def _checkRoundOver(self):\n\n if not any(player.isAlive() for player in self.teams[0].players):\n self.endGame()", "def game_over(self, win=False):\n self.is_game_over = True\n\n if win:\n self.flags_pts.set_data(*np.where(self.mines)[::-1]) # shows mines marked with flags\n self.title_txt.set_text('You win! Press F2 to start a new game')\n else:\n self.wrong_img.set_data(self.wrong) # wrong guesses\n self.mines_pts = self.ax.plot(self.jj[self.mines & ~self.flags],\n self.ii[self.mines & ~self.flags],\n 'kX', ms=10) # shows mines\n self.title_txt.set_text('You lose! Press F2 to start a new game')\n\n self.refresh_canvas()", "def game_is_over(self) -> models.Conclusion:\n raise NotImplementedError", "async def check_game_over(self, game_id):\n game = await self.get_game(game_id)\n player1_stand = await self.check_player_standing(game[1])\n player2_stand = await self.check_player_standing(game[2])\n if player1_stand and player2_stand:\n return True\n else:\n return False", "def isGameOver(board, *args, **kwargs):\n black_win = consecutive_score(\"black\", board, board.size)\n white_win = consecutive_score(\"white\", board, board.size)\n if black_win >= 100 and white_win >= 100:\n return \"tie\"\n elif black_win >= 100 and white_win <= 100:\n return \"black\"\n elif black_win <= 100 and white_win >= 100:\n return \"white\"\n else:\n return -1", "def is_end_game(self):\n win = self.is_game_won()\n tie = self.game_is_tied()\n return win or tie", "def game_over(state):\n return wins(state, HUMAN) or wins(state, COMP)", "def game_over(self):\n\n if self._number_of_moves == 9:\n return True\n\n return self._number_of_moves == 9 or self.winner_found()", "def checkGameState(self, fpsclock, screen):\n if self.isWin() or self.isLost():\n if self.exitMenu(fpsclock, screen):\n return True\n return False", "def is_over(self):\n return (self.possible_moves() == []) or self.loss_condition()", "def check_player_reached():\n global round_start_timer, round_over\n\n if player1.alive and player1.rect.top < (platform_width // 2):\n add_time_points()\n reset_players()\n player1.wins += 1\n return True\n\n elif player2.alive and (player2.rect.top + player2.image.get_height()) > \\\n (SCREEN_HEIGHT - platform_width):\n player2.wins += 1\n round_over = True\n add_time_points()\n reset_players()\n return True", "def draw_game_over(self):\n arcade.draw_rectangle_filled(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,\n SCREEN_WIDTH // 2,\n SCREEN_HEIGHT // 1.5, arcade.color.BRONZE)\n arcade.draw_rectangle_filled(SCREEN_WIDTH // 2, 410, 600, 140, arcade.color.COOL_GREY)\n arcade.draw_rectangle_filled(SCREEN_WIDTH // 2, 275, 550, 100, arcade.color.COOL_GREY)\n\n output = \"Oops, You Lost :(\"\n arcade.draw_text(output, 360, 381, arcade.color.BLACK, 66)\n\n output = \"Click Anywhere To Restart\"\n arcade.draw_text(output, 375, 258, arcade.color.BLACK, 40)", "def is_game_over(self):\n board = list(self.board)\n for wins in self.WINNING:\n # Create a tuple\n w = (board[wins[0]], board[wins[1]], board[wins[2]])\n if w == ('X', 'X', 'X'):\n return 'X'\n if w == ('O', 'O', 'O'):\n return 'O'\n # Check for stalemate\n if ' ' in board:\n return None\n return ' '", "def game_over(self):\n raise NotImplementedError(\"Abstract method\") # no mercy for stooges", "def game_over(self) -> bool:\n for row in range(9):\n for col in range(9):\n if self._grid_sol[row][col] != self.get_cell(row, col):\n return False\n return True", "def check_over(self):\n if self.board.has_winner() == 1:\n return 1\n elif self.board.has_winner() == 2:\n return 2\n elif self.board.check_cats_game():\n return 0\n else:\n return -1", "def is_over(self, state):\n return state.current_total == 0", "def is_over(self, board):\n if _winner(board) != 0:\n return True\n return False", "def isGameOver(self, boards):\n return self.deadTest(boards[0]) and self.deadTest(boards[1]) and self.deadTest(boards[2])", "def isGameOver(self, boards):\n return self.deadTest(boards[0]) and self.deadTest(boards[1]) and self.deadTest(boards[2])", "def is_winning_state(self):\n return self.game.is_winning_state()", "def check_for_end_of_game(self):\n return self.player_1.score + self.player_2.score >= self.number_of_cells", "def EndGame(self):\n check_endgame = not self.player.getPlayer().isGeneralExist()\n\n return check_endgame", "def is_over(self):\n for el1, el2, el3 in self.WINNING_POSITIONS:\n if self.board[el1] == self.board[el2] == self.board[el3]:\n if self.board[el1] == 0:\n continue\n\n self.winner = self.board[el1]\n return True\n\n if self.__class__.EMPTY_POSITION_COUNTER not in self.board:\n return True\n\n return False", "def verify_ending(self):\n self._fast_forward_to_penultimate_play()\n if self.game_status.game_over:\n # Game shouldn't be over quite yet!\n self.reset()\n return False\n\n self.apply_next_event()\n game_over = self.game_status.game_over\n excess_outs = self.game_status.excess_outs\n self.reset()\n return game_over and not excess_outs", "def game_over(winner):\n global in_play, outcome, score\n \n if winner == \"Dealer\":\n score -= 1\n if Dealer.busted:\n outcome = \"Player busted! New Deal?\"\n \n else:\n outcome = \"Dealer Wins! New Deal?\"\n \n else:\n score += 1\n if Player.busted:\n outcome = \"Dealer busted! New Deal?\"\n \n else:\n outcome = \"Player Wins! New Deal?\"\n \n in_play = False", "def is_over(self):\n return self.is_dead", "def is_over(self):\n return self.is_dead", "def is_over(self):\n return self.is_dead", "def uber_check_win(self):\n if self.player1.score == self.player2.score:\n print(\"It's a draw!\")\n elif self.player1.score > self.player2.score:\n print(\"Player 1 is a proper bad ass mother fucker\")\n else:\n print(\"Player numma 2 is a proper bad ass mother fucker\")", "def is_game_over(self):\n\n # This checks whether or not the board is full...\n if len(self.board.values()) == 100 and \\\n 0 not in self.board.values():\n p1 = self._longest_chain(1)\n p2 = self._longest_chain(2)\n if len(p1) > len(p2):\n return 1\n elif len(p2) > len(p1):\n return 2\n else:\n return 0\n\n # If it's not full. We check for boxes\n else:\n for x in range(self.width-1):\n for y in range(self.height-1):\n slice = self._slice((x,y), (2,2))\n if 0 not in slice[0] and 0 not in slice[1]:\n # is this slice a box?\n if slice[0][0] == slice[0][1] and \\\n slice[0][1] == slice[1][0] and \\\n slice[1][0] == slice[1][1]:\n return slice[0][0] # winner\n\n return -1 # game is not over", "def gameOver(self):\n i = 0 # accumulator for the number of None objects\n for x in range(ALIEN_ROWS):\n for y in range(ALIENS_IN_ROW):\n if self._aliens[x][y] ==None:\n i +=1\n if i == ALIEN_ROWS * ALIENS_IN_ROW:\n self._gameOver = True\n\n for x in range(ALIEN_ROWS):\n for y in range(ALIENS_IN_ROW):\n if self._aliens[x][y] !=None:\n positiony = self._aliens[x][y].getAY() - ALIEN_HEIGHT/2\n if posy<= self._dline:\n self._gameOver = False", "def _checkRoundOver(self):\n\n # if we already ended it doesn't matter\n if self.hasEnded():\n return\n\n if not any(player.isAlive() for player in self.teams[0].players):\n # allow continuing after wave 1\n if self._wave > 1:\n self.continueOrEndGame()\n else:\n self.endGame()", "def game_over(self, won=True):\n if won is True:\n self.game[\"game_status\"] = self.WON\n else:\n self.game[\"game_status\"] = self.DISCONNECTED\n db.save_game(self.game_id, self.game)", "def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables): #\n return True, -1\n\n return False, -1", "def draw_game_over(self):\n output = \"Game Over!\"\n arcade.draw_text(output, 250, 400, arcade.color.BLACK, 54)\n\n output = \"Click to restart\"\n arcade.draw_text(output, 330, 200, arcade.color.BLACK, 24)", "def verify_winner(self):\r\n return self.count_pegs() == 1", "def game_on(self):\n doc = self.documentation\n return (self.draw.accepted or doc[len(doc)-1].accepted) and (self.board.stones_set < self.board.max_nr_stones) and (self.board.score[opponent(self.draw.player)] > 0)", "def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)", "def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1", "def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1", "def check_win_lose(self):\n if self.b.get_player_i() == 7: # player got to the bank\n return 1 # win\n if self.b.get_chaser_i() == self.b.get_player_i(): # chaser catch the player\n return 2 # lose\n return 0 # nothing", "def lost():\n\tforeground_module.foreground_speed = 0\n\tbackground_module.background_speed = 0\n\tdisplay_fail_msg(win)\n\n\tif player_module.player.y > foreground_module.ground_y:\n\t\ttry:\n\t\t\tprocess_object.terminate()\n\t\texcept: pass\n\n\t\treturn True\n\treturn False", "def has_won(board, player):\r\n return False", "def is_game_lost(self):\n values = [self.hand[i]._lvalue + self.hand[i]._rvalue for i in range(len(self.hand))]\n return not sum_in_list_dyn(values, self.number_point)", "def game_tie(self):\n\n shape = self.board.shape\n if np.count_nonzero(self.board) == (shape[0] * shape[1]):\n # The board is full\n player = 0\n return True\n else:\n return False", "def handle_game_over(self, winner, end_state):\n #############################\n #\n #\n # YOUR CODE HERE\n #\n #\n ##############################\n print(\"Game over, these are the stats:\")\n print('Winner: ' + str(winner))\n print('End state: ' + str(end_state))", "def cell_is_game_over(self, y, x, map_data):\n # check for water\n if map_data[y][x] == self.WATER_SYMBOL:\n return True\n\n # check for anti-tank\n # up direction\n for i in range(y, -1, -1):\n if map_data[i][x] == self.ANTI_TANK_DOWN_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(i, x, map_data):\n break\n\n # down direction\n for i in range(y, self.y_size):\n if map_data[i][x] == self.ANTI_TANK_UP_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(i, x, map_data):\n break\n\n # left direction\n for i in range(x, -1, -1):\n if map_data[y][i] == self.ANTI_TANK_RIGHT_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(y, i, map_data):\n break\n\n # right direction\n for i in range(x, self.x_size):\n if map_data[y][i] == self.ANTI_TANK_LEFT_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(y, i, map_data):\n break\n\n # no water or anti-tank danger\n return False", "def check_if_tie():\n global game_still_going\n if '_' not in board:\n game_still_going = False", "def check_game_over(self, row, col):\n player_symbol = self.board[row][col]\n\n # Top Right: Row -1 Col 1\n # Bottom Left: Row 1 Col -1\n self.check_four_in_a_row(player_symbol, row, col, -1, 1, 1, -1)\n\n # Top Left: Row -1 Col -1\n # Bottom Right Row 1 Col 1\n self.check_four_in_a_row(player_symbol, row, col, -1, -1, 1, 1)\n\n # Horizontal: Row 0 Col 1, Row 0 Col -1\n self.check_four_in_a_row(player_symbol, row, col, 0, 1, 0, -1)\n\n # Vertical: Row 1 Col 0, Row -1 Col 0\n self.check_four_in_a_row(player_symbol, row, col, 1, 0, -1, 0)\n\n if self.turns >= self.num_playable_rows * self.num_playable_columns:\n self.game_over = True\n self.board_full = True", "def check_game_over(board: Board, whites_turn: bool) -> bool:\n if is_in_check(board, whites_turn) and can_move(board, whites_turn):\n turn = 'White' if whites_turn else 'Black'\n print()\n print(f'{turn} is in check')\n return False\n elif is_in_check(board, whites_turn) and can_move(board, whites_turn) == False:\n print()\n print('Checkmate')\n return True\n elif is_stalemate(board, whites_turn):\n print()\n print('Stalemate')\n return True\n else:\n return False" ]
[ "0.88837326", "0.83810914", "0.83710665", "0.8281846", "0.82191014", "0.8210489", "0.82002926", "0.80712324", "0.80512774", "0.8045562", "0.7989821", "0.79308355", "0.7928047", "0.7927743", "0.7903066", "0.7842491", "0.7819719", "0.77655524", "0.77628475", "0.7760699", "0.7757634", "0.77452826", "0.7695644", "0.76710814", "0.76402634", "0.76028264", "0.75854594", "0.7585281", "0.7529858", "0.7520863", "0.7517584", "0.7510557", "0.7420468", "0.7360333", "0.731862", "0.7316865", "0.72823906", "0.7279698", "0.72760504", "0.7258077", "0.7206231", "0.7194574", "0.7191711", "0.71769017", "0.71551245", "0.71500796", "0.7141239", "0.7134773", "0.7127699", "0.70947385", "0.7092184", "0.7085013", "0.7062464", "0.7061871", "0.7059201", "0.7052249", "0.7038398", "0.701214", "0.70078355", "0.6996354", "0.6991609", "0.698698", "0.69687885", "0.6949852", "0.694967", "0.69246614", "0.6920014", "0.6913598", "0.6913598", "0.6885692", "0.6884207", "0.6880081", "0.6876916", "0.6876832", "0.6874374", "0.6868966", "0.6868966", "0.6868966", "0.6867658", "0.68460107", "0.68405366", "0.68302107", "0.6819965", "0.67982227", "0.679413", "0.6781873", "0.67737234", "0.67661834", "0.67536026", "0.67536026", "0.674923", "0.67429066", "0.67256826", "0.6725654", "0.6723835", "0.67096317", "0.6699365", "0.6698258", "0.6690291", "0.66899735" ]
0.7849975
15