path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105194628/cell_3
[ "text_plain_output_1.png" ]
place = input('1. Hill Station, 2. Beach Enter he place you would like to visit') if place == '1' or 'Hill Station': print('Let us! Plan our trip to Hill Station now') elif place == '2' or 'Beach': print('Let us pack our bags for the Sun View')
code
105194628/cell_5
[ "text_plain_output_1.png" ]
ask = input('We are planning to go to vist an Hill Station or a temple would you like to join us? ') if ask == 'Yes' or ask == 'yes': ask2 = input('Do you mind to pick a place for our visit, We are thinking about Beach and Temple ') if ask2 == 'Beach' or ask2 == 'beach': print('Let us pack our bags for the best road trip to vitness the beautiful Sun View') elif ask2 == 'Temple' or ask2 == 'temple': print('Let us witness the glorious arcitecture of the temple') else: print('We will try knowing others opinion before picking it up') else: print('We will be looking ahead for you to join us')
code
74050036/cell_13
[ "text_plain_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns)
code
74050036/cell_25
[ "text_html_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.isna().sum(0) df.columns = [re.sub('^_', '', col) for col in df.columns] list(df.columns) df.columns = [re.sub('^\\bsource\\b\\.', '', col) for col in df.columns] list(df.columns) df.head()
code
74050036/cell_34
[ "text_plain_output_1.png" ]
import json import numpy as np import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.isna().sum(0) df.columns = [re.sub('^_', '', col) for col in df.columns] list(df.columns) df.columns = [re.sub('^\\bsource\\b\\.', '', col) for col in df.columns] list(df.columns) df['complaint_what_happened'].replace('', np.nan, inplace=True) df.dropna(subset=['complaint_what_happened'], inplace=True) len(df[df['complaint_what_happened'] == '']) df.shape df['complaint_what_happened'].head()
code
74050036/cell_23
[ "text_plain_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.isna().sum(0) df.columns = [re.sub('^_', '', col) for col in df.columns] list(df.columns)
code
74050036/cell_30
[ "text_plain_output_1.png" ]
import json import numpy as np import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.isna().sum(0) df.columns = [re.sub('^_', '', col) for col in df.columns] list(df.columns) df.columns = [re.sub('^\\bsource\\b\\.', '', col) for col in df.columns] list(df.columns) df['complaint_what_happened'].replace('', np.nan, inplace=True) df.dropna(subset=['complaint_what_happened'], inplace=True) len(df[df['complaint_what_happened'] == '']) df.shape
code
74050036/cell_29
[ "text_plain_output_1.png" ]
import json import numpy as np import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.isna().sum(0) df.columns = [re.sub('^_', '', col) for col in df.columns] list(df.columns) df.columns = [re.sub('^\\bsource\\b\\.', '', col) for col in df.columns] list(df.columns) df['complaint_what_happened'].replace('', np.nan, inplace=True) df.dropna(subset=['complaint_what_happened'], inplace=True) len(df[df['complaint_what_happened'] == ''])
code
74050036/cell_11
[ "text_plain_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape
code
74050036/cell_19
[ "text_plain_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.isna().sum(0)
code
74050036/cell_15
[ "text_plain_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.info()
code
74050036/cell_17
[ "text_html_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.describe()
code
74050036/cell_24
[ "text_plain_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.isna().sum(0) df.columns = [re.sub('^_', '', col) for col in df.columns] list(df.columns) df.columns = [re.sub('^\\bsource\\b\\.', '', col) for col in df.columns] list(df.columns)
code
74050036/cell_10
[ "text_html_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.head()
code
74050036/cell_27
[ "text_plain_output_1.png" ]
import json import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.isna().sum(0) df.columns = [re.sub('^_', '', col) for col in df.columns] list(df.columns) df.columns = [re.sub('^\\bsource\\b\\.', '', col) for col in df.columns] list(df.columns) len(df[df['complaint_what_happened'] == ''])
code
74050036/cell_36
[ "text_html_output_1.png" ]
import json import numpy as np import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) f = open('../input/automatic-ticket-classification-data/complaints-2021-05-14_08_16.json') data = json.load(f) df = pd.json_normalize(data) df.shape list(df.columns) df.isna().sum(0) df.columns = [re.sub('^_', '', col) for col in df.columns] list(df.columns) df.columns = [re.sub('^\\bsource\\b\\.', '', col) for col in df.columns] list(df.columns) df['complaint_what_happened'].replace('', np.nan, inplace=True) df.dropna(subset=['complaint_what_happened'], inplace=True) len(df[df['complaint_what_happened'] == '']) df.shape def clean_text(text): """This function - makes the given text lowercase - removes text in square brackets - removes punctuation and - removes words containing numbers. :param text: text to be cleaned :return: cleaned text """ text = text.lower() text = re.sub('\\[.*?\\]', '', text) text = re.sub('[%s]' % re.escape(string.punctuation), '', text) text = re.sub('\\w*\\d\\w*', '', text) return text df_clean = pd.DataFrame(df['complaint_what_happened'].apply(lambda x: clean_text(x))) df_clean.head()
code
34125586/cell_2
[ "image_output_1.png" ]
import os import numpy as np import pandas as pd import requests import json import pandas as pd import time import plotly import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34125586/cell_18
[ "image_output_1.png" ]
import json import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import requests import seaborn as sns name = 'sexualcuddler' key = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' def get_match_data(summoner_name, api_key): url = 'https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/' url += summoner_name + '?api_key=' + api_key data = requests.get(url).json() id = data['accountId'] url = 'https://na1.api.riotgames.com/lol/match/v4/matchlists/by-account/' url += id + '?api_key=' + api_key data = requests.get(url).json() game_IDs = [x['gameId'] for x in data['matches']] list_of_games = [] for game_id in game_IDs: url = 'https://na1.api.riotgames.com/lol/match/v4/matches/' url += str(game_id) + '?api_key=' + api_key list_of_games.append(requests.get(url).json()) open('Output.txt', 'w').close() with open('Output.txt', 'w', encoding='utf-8') as f: for game in list_of_games: f.write('%s\n' % json.dumps(game)) def read_text_file(): with open('/kaggle/input/leaguedata/Output.txt') as f: content = f.readlines() return [json.loads(x) for x in content] def return_participant_ID(dict, name): for x in dict['participantIdentities']: if x['player']['summonerName'] == name: return x['participantId'] def get_stats(dict, ID): for x in dict['participants']: if x['participantId'] == ID: stat_dict = x['stats'] stat_dict['championId'] = x['championId'] stat_dict['spell1Id'] = x['spell1Id'] stat_dict['spell2Id'] = x['spell2Id'] stat_dict['game mode'] = dict['gameMode'] stat_dict['gameDuration'] = dict['gameDuration'] stat_dict['role'] = x['timeline']['role'] stat_dict['lane'] = x['timeline']['lane'] team_pos = 0 if x['teamId'] == 200: team_pos = 1 stat_dict['teamfirstTower'] = dict['teams'][team_pos]['firstTower'] stat_dict['teamfirstDragon'] = dict['teams'][team_pos]['firstDragon'] stat_dict['teamfirstBaron'] = dict['teams'][team_pos]['firstBaron'] return stat_dict def get_champ_dict(): data = open('/kaggle/input/leaguedata/Champion Ids.txt', 'r', encoding='utf-8').read() data = json.loads(data) champions = [x for x in data['data']] dict = {int(data['data'][x]['key']): str(x) for x in champions} return dict data = read_text_file() list_of_df = [] for game in data: if 'status' not in game: id = return_participant_ID(game, name) stats = get_stats(game, id) list_of_df.append(pd.DataFrame(stats, index=[0])) final_df = pd.concat(list_of_df).reset_index(drop=True) final_df['championId'] = final_df['championId'].apply(lambda x: get_champ_dict()[x].strip()) final_df.rename(columns={'championId': 'Champion'}, inplace=True) champ_list = final_df["Champion"].value_counts().iloc[0:10].index.to_list() champ_df = final_df[final_df["Champion"].isin(champ_list)] sns.set(style="darkgrid") ax = sns.countplot(x="Champion", data=champ_df, order = champ_list) ax.set_xticklabels(labels = champ_list, rotation=50) ax.set_title("Tamoor's 10 Most Played Champions") plt.show() labels = ["Win", "Loss"] counts = [len(final_df[final_df["win"]==True]),len(final_df[final_df["win"]==False])] fig1, ax1 = plt.subplots() ax1.pie(counts, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. ax1.set_title("Win/Loss Percentage") plt.show() jungle_df = final_df[final_df["lane"]=="JUNGLE"] # jungle_df["teamfirstDragon"].value_counts() ax = sns.countplot(x="teamfirstDragon", data=jungle_df) ax.set_title("Tamoor's Team Obtained the First Dragon") plt.show() jungle_df.loc[(jungle_df['totalDamageDealtToChampions'] < 8000) & (jungle_df['gameDuration'] > 1920), 'Champion'].to_string(index=False)
code
34125586/cell_8
[ "text_plain_output_1.png" ]
import json import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import requests name = 'sexualcuddler' key = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' def get_match_data(summoner_name, api_key): url = 'https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/' url += summoner_name + '?api_key=' + api_key data = requests.get(url).json() id = data['accountId'] url = 'https://na1.api.riotgames.com/lol/match/v4/matchlists/by-account/' url += id + '?api_key=' + api_key data = requests.get(url).json() game_IDs = [x['gameId'] for x in data['matches']] list_of_games = [] for game_id in game_IDs: url = 'https://na1.api.riotgames.com/lol/match/v4/matches/' url += str(game_id) + '?api_key=' + api_key list_of_games.append(requests.get(url).json()) open('Output.txt', 'w').close() with open('Output.txt', 'w', encoding='utf-8') as f: for game in list_of_games: f.write('%s\n' % json.dumps(game)) def read_text_file(): with open('/kaggle/input/leaguedata/Output.txt') as f: content = f.readlines() return [json.loads(x) for x in content] def return_participant_ID(dict, name): for x in dict['participantIdentities']: if x['player']['summonerName'] == name: return x['participantId'] def get_stats(dict, ID): for x in dict['participants']: if x['participantId'] == ID: stat_dict = x['stats'] stat_dict['championId'] = x['championId'] stat_dict['spell1Id'] = x['spell1Id'] stat_dict['spell2Id'] = x['spell2Id'] stat_dict['game mode'] = dict['gameMode'] stat_dict['gameDuration'] = dict['gameDuration'] stat_dict['role'] = x['timeline']['role'] stat_dict['lane'] = x['timeline']['lane'] team_pos = 0 if x['teamId'] == 200: team_pos = 1 stat_dict['teamfirstTower'] = dict['teams'][team_pos]['firstTower'] stat_dict['teamfirstDragon'] = dict['teams'][team_pos]['firstDragon'] stat_dict['teamfirstBaron'] = dict['teams'][team_pos]['firstBaron'] return stat_dict def get_champ_dict(): data = open('/kaggle/input/leaguedata/Champion Ids.txt', 'r', encoding='utf-8').read() data = json.loads(data) champions = [x for x in data['data']] dict = {int(data['data'][x]['key']): str(x) for x in champions} return dict data = read_text_file() list_of_df = [] for game in data: if 'status' not in game: id = return_participant_ID(game, name) stats = get_stats(game, id) list_of_df.append(pd.DataFrame(stats, index=[0])) final_df = pd.concat(list_of_df).reset_index(drop=True) final_df['championId'] = final_df['championId'].apply(lambda x: get_champ_dict()[x].strip()) final_df.rename(columns={'championId': 'Champion'}, inplace=True) print('Number of Double Kills: {}'.format(final_df['doubleKills'].sum())) print('Number of Triple Kills: {}'.format(final_df['tripleKills'].sum())) print('Number of Quadra Kills: {}'.format(final_df['quadraKills'].sum())) print('Number of Penta Kills: {}'.format(final_df['pentaKills'].sum()))
code
34125586/cell_16
[ "image_output_1.png" ]
import json import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import requests import seaborn as sns name = 'sexualcuddler' key = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' def get_match_data(summoner_name, api_key): url = 'https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/' url += summoner_name + '?api_key=' + api_key data = requests.get(url).json() id = data['accountId'] url = 'https://na1.api.riotgames.com/lol/match/v4/matchlists/by-account/' url += id + '?api_key=' + api_key data = requests.get(url).json() game_IDs = [x['gameId'] for x in data['matches']] list_of_games = [] for game_id in game_IDs: url = 'https://na1.api.riotgames.com/lol/match/v4/matches/' url += str(game_id) + '?api_key=' + api_key list_of_games.append(requests.get(url).json()) open('Output.txt', 'w').close() with open('Output.txt', 'w', encoding='utf-8') as f: for game in list_of_games: f.write('%s\n' % json.dumps(game)) def read_text_file(): with open('/kaggle/input/leaguedata/Output.txt') as f: content = f.readlines() return [json.loads(x) for x in content] def return_participant_ID(dict, name): for x in dict['participantIdentities']: if x['player']['summonerName'] == name: return x['participantId'] def get_stats(dict, ID): for x in dict['participants']: if x['participantId'] == ID: stat_dict = x['stats'] stat_dict['championId'] = x['championId'] stat_dict['spell1Id'] = x['spell1Id'] stat_dict['spell2Id'] = x['spell2Id'] stat_dict['game mode'] = dict['gameMode'] stat_dict['gameDuration'] = dict['gameDuration'] stat_dict['role'] = x['timeline']['role'] stat_dict['lane'] = x['timeline']['lane'] team_pos = 0 if x['teamId'] == 200: team_pos = 1 stat_dict['teamfirstTower'] = dict['teams'][team_pos]['firstTower'] stat_dict['teamfirstDragon'] = dict['teams'][team_pos]['firstDragon'] stat_dict['teamfirstBaron'] = dict['teams'][team_pos]['firstBaron'] return stat_dict def get_champ_dict(): data = open('/kaggle/input/leaguedata/Champion Ids.txt', 'r', encoding='utf-8').read() data = json.loads(data) champions = [x for x in data['data']] dict = {int(data['data'][x]['key']): str(x) for x in champions} return dict data = read_text_file() list_of_df = [] for game in data: if 'status' not in game: id = return_participant_ID(game, name) stats = get_stats(game, id) list_of_df.append(pd.DataFrame(stats, index=[0])) final_df = pd.concat(list_of_df).reset_index(drop=True) final_df['championId'] = final_df['championId'].apply(lambda x: get_champ_dict()[x].strip()) final_df.rename(columns={'championId': 'Champion'}, inplace=True) champ_list = final_df["Champion"].value_counts().iloc[0:10].index.to_list() champ_df = final_df[final_df["Champion"].isin(champ_list)] sns.set(style="darkgrid") ax = sns.countplot(x="Champion", data=champ_df, order = champ_list) ax.set_xticklabels(labels = champ_list, rotation=50) ax.set_title("Tamoor's 10 Most Played Champions") plt.show() labels = ["Win", "Loss"] counts = [len(final_df[final_df["win"]==True]),len(final_df[final_df["win"]==False])] fig1, ax1 = plt.subplots() ax1.pie(counts, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. ax1.set_title("Win/Loss Percentage") plt.show() jungle_df = final_df[final_df["lane"]=="JUNGLE"] # jungle_df["teamfirstDragon"].value_counts() ax = sns.countplot(x="teamfirstDragon", data=jungle_df) ax.set_title("Tamoor's Team Obtained the First Dragon") plt.show() ax = sns.scatterplot(x='gameDuration', y='totalDamageDealtToChampions', data=jungle_df) ax.set_title('Champion Damage vs. Game Duration') ax.set(xlabel='Game Duration (in minutes)', ylabel='Total Damage to Champs') ax.set_xticklabels([round(x / 60, 0) for x in ax.get_xticks()]) plt.show()
code
34125586/cell_14
[ "image_output_1.png" ]
import json import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import requests import seaborn as sns name = 'sexualcuddler' key = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' def get_match_data(summoner_name, api_key): url = 'https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/' url += summoner_name + '?api_key=' + api_key data = requests.get(url).json() id = data['accountId'] url = 'https://na1.api.riotgames.com/lol/match/v4/matchlists/by-account/' url += id + '?api_key=' + api_key data = requests.get(url).json() game_IDs = [x['gameId'] for x in data['matches']] list_of_games = [] for game_id in game_IDs: url = 'https://na1.api.riotgames.com/lol/match/v4/matches/' url += str(game_id) + '?api_key=' + api_key list_of_games.append(requests.get(url).json()) open('Output.txt', 'w').close() with open('Output.txt', 'w', encoding='utf-8') as f: for game in list_of_games: f.write('%s\n' % json.dumps(game)) def read_text_file(): with open('/kaggle/input/leaguedata/Output.txt') as f: content = f.readlines() return [json.loads(x) for x in content] def return_participant_ID(dict, name): for x in dict['participantIdentities']: if x['player']['summonerName'] == name: return x['participantId'] def get_stats(dict, ID): for x in dict['participants']: if x['participantId'] == ID: stat_dict = x['stats'] stat_dict['championId'] = x['championId'] stat_dict['spell1Id'] = x['spell1Id'] stat_dict['spell2Id'] = x['spell2Id'] stat_dict['game mode'] = dict['gameMode'] stat_dict['gameDuration'] = dict['gameDuration'] stat_dict['role'] = x['timeline']['role'] stat_dict['lane'] = x['timeline']['lane'] team_pos = 0 if x['teamId'] == 200: team_pos = 1 stat_dict['teamfirstTower'] = dict['teams'][team_pos]['firstTower'] stat_dict['teamfirstDragon'] = dict['teams'][team_pos]['firstDragon'] stat_dict['teamfirstBaron'] = dict['teams'][team_pos]['firstBaron'] return stat_dict def get_champ_dict(): data = open('/kaggle/input/leaguedata/Champion Ids.txt', 'r', encoding='utf-8').read() data = json.loads(data) champions = [x for x in data['data']] dict = {int(data['data'][x]['key']): str(x) for x in champions} return dict data = read_text_file() list_of_df = [] for game in data: if 'status' not in game: id = return_participant_ID(game, name) stats = get_stats(game, id) list_of_df.append(pd.DataFrame(stats, index=[0])) final_df = pd.concat(list_of_df).reset_index(drop=True) final_df['championId'] = final_df['championId'].apply(lambda x: get_champ_dict()[x].strip()) final_df.rename(columns={'championId': 'Champion'}, inplace=True) champ_list = final_df["Champion"].value_counts().iloc[0:10].index.to_list() champ_df = final_df[final_df["Champion"].isin(champ_list)] sns.set(style="darkgrid") ax = sns.countplot(x="Champion", data=champ_df, order = champ_list) ax.set_xticklabels(labels = champ_list, rotation=50) ax.set_title("Tamoor's 10 Most Played Champions") plt.show() labels = ["Win", "Loss"] counts = [len(final_df[final_df["win"]==True]),len(final_df[final_df["win"]==False])] fig1, ax1 = plt.subplots() ax1.pie(counts, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. ax1.set_title("Win/Loss Percentage") plt.show() jungle_df = final_df[final_df['lane'] == 'JUNGLE'] ax = sns.countplot(x='teamfirstDragon', data=jungle_df) ax.set_title("Tamoor's Team Obtained the First Dragon") plt.show()
code
34125586/cell_10
[ "text_plain_output_1.png" ]
import json import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import requests import seaborn as sns name = 'sexualcuddler' key = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' def get_match_data(summoner_name, api_key): url = 'https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/' url += summoner_name + '?api_key=' + api_key data = requests.get(url).json() id = data['accountId'] url = 'https://na1.api.riotgames.com/lol/match/v4/matchlists/by-account/' url += id + '?api_key=' + api_key data = requests.get(url).json() game_IDs = [x['gameId'] for x in data['matches']] list_of_games = [] for game_id in game_IDs: url = 'https://na1.api.riotgames.com/lol/match/v4/matches/' url += str(game_id) + '?api_key=' + api_key list_of_games.append(requests.get(url).json()) open('Output.txt', 'w').close() with open('Output.txt', 'w', encoding='utf-8') as f: for game in list_of_games: f.write('%s\n' % json.dumps(game)) def read_text_file(): with open('/kaggle/input/leaguedata/Output.txt') as f: content = f.readlines() return [json.loads(x) for x in content] def return_participant_ID(dict, name): for x in dict['participantIdentities']: if x['player']['summonerName'] == name: return x['participantId'] def get_stats(dict, ID): for x in dict['participants']: if x['participantId'] == ID: stat_dict = x['stats'] stat_dict['championId'] = x['championId'] stat_dict['spell1Id'] = x['spell1Id'] stat_dict['spell2Id'] = x['spell2Id'] stat_dict['game mode'] = dict['gameMode'] stat_dict['gameDuration'] = dict['gameDuration'] stat_dict['role'] = x['timeline']['role'] stat_dict['lane'] = x['timeline']['lane'] team_pos = 0 if x['teamId'] == 200: team_pos = 1 stat_dict['teamfirstTower'] = dict['teams'][team_pos]['firstTower'] stat_dict['teamfirstDragon'] = dict['teams'][team_pos]['firstDragon'] stat_dict['teamfirstBaron'] = dict['teams'][team_pos]['firstBaron'] return stat_dict def get_champ_dict(): data = open('/kaggle/input/leaguedata/Champion Ids.txt', 'r', encoding='utf-8').read() data = json.loads(data) champions = [x for x in data['data']] dict = {int(data['data'][x]['key']): str(x) for x in champions} return dict data = read_text_file() list_of_df = [] for game in data: if 'status' not in game: id = return_participant_ID(game, name) stats = get_stats(game, id) list_of_df.append(pd.DataFrame(stats, index=[0])) final_df = pd.concat(list_of_df).reset_index(drop=True) final_df['championId'] = final_df['championId'].apply(lambda x: get_champ_dict()[x].strip()) final_df.rename(columns={'championId': 'Champion'}, inplace=True) champ_list = final_df['Champion'].value_counts().iloc[0:10].index.to_list() champ_df = final_df[final_df['Champion'].isin(champ_list)] sns.set(style='darkgrid') ax = sns.countplot(x='Champion', data=champ_df, order=champ_list) ax.set_xticklabels(labels=champ_list, rotation=50) ax.set_title("Tamoor's 10 Most Played Champions") plt.show()
code
34125586/cell_12
[ "text_plain_output_1.png" ]
import json import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import requests import seaborn as sns name = 'sexualcuddler' key = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' def get_match_data(summoner_name, api_key): url = 'https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/' url += summoner_name + '?api_key=' + api_key data = requests.get(url).json() id = data['accountId'] url = 'https://na1.api.riotgames.com/lol/match/v4/matchlists/by-account/' url += id + '?api_key=' + api_key data = requests.get(url).json() game_IDs = [x['gameId'] for x in data['matches']] list_of_games = [] for game_id in game_IDs: url = 'https://na1.api.riotgames.com/lol/match/v4/matches/' url += str(game_id) + '?api_key=' + api_key list_of_games.append(requests.get(url).json()) open('Output.txt', 'w').close() with open('Output.txt', 'w', encoding='utf-8') as f: for game in list_of_games: f.write('%s\n' % json.dumps(game)) def read_text_file(): with open('/kaggle/input/leaguedata/Output.txt') as f: content = f.readlines() return [json.loads(x) for x in content] def return_participant_ID(dict, name): for x in dict['participantIdentities']: if x['player']['summonerName'] == name: return x['participantId'] def get_stats(dict, ID): for x in dict['participants']: if x['participantId'] == ID: stat_dict = x['stats'] stat_dict['championId'] = x['championId'] stat_dict['spell1Id'] = x['spell1Id'] stat_dict['spell2Id'] = x['spell2Id'] stat_dict['game mode'] = dict['gameMode'] stat_dict['gameDuration'] = dict['gameDuration'] stat_dict['role'] = x['timeline']['role'] stat_dict['lane'] = x['timeline']['lane'] team_pos = 0 if x['teamId'] == 200: team_pos = 1 stat_dict['teamfirstTower'] = dict['teams'][team_pos]['firstTower'] stat_dict['teamfirstDragon'] = dict['teams'][team_pos]['firstDragon'] stat_dict['teamfirstBaron'] = dict['teams'][team_pos]['firstBaron'] return stat_dict def get_champ_dict(): data = open('/kaggle/input/leaguedata/Champion Ids.txt', 'r', encoding='utf-8').read() data = json.loads(data) champions = [x for x in data['data']] dict = {int(data['data'][x]['key']): str(x) for x in champions} return dict data = read_text_file() list_of_df = [] for game in data: if 'status' not in game: id = return_participant_ID(game, name) stats = get_stats(game, id) list_of_df.append(pd.DataFrame(stats, index=[0])) final_df = pd.concat(list_of_df).reset_index(drop=True) final_df['championId'] = final_df['championId'].apply(lambda x: get_champ_dict()[x].strip()) final_df.rename(columns={'championId': 'Champion'}, inplace=True) champ_list = final_df["Champion"].value_counts().iloc[0:10].index.to_list() champ_df = final_df[final_df["Champion"].isin(champ_list)] sns.set(style="darkgrid") ax = sns.countplot(x="Champion", data=champ_df, order = champ_list) ax.set_xticklabels(labels = champ_list, rotation=50) ax.set_title("Tamoor's 10 Most Played Champions") plt.show() labels = ['Win', 'Loss'] counts = [len(final_df[final_df['win'] == True]), len(final_df[final_df['win'] == False])] fig1, ax1 = plt.subplots() ax1.pie(counts, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') ax1.set_title('Win/Loss Percentage') plt.show()
code
121150743/cell_5
[ "text_plain_output_1.png" ]
pip install Keras-Preprocessing
code
128026093/cell_25
[ "image_output_1.png" ]
from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) train_df.describe().T # explore correlation of features corr = train_df.corr() mask = np.triu(corr) ax, fig = plt.subplots(figsize = (13,13)) sns.heatmap(corr, mask = mask, annot = True, fmt=".3f") fig, axes = plt.subplots(6, 3, figsize = (13, 13)) fig.suptitle('Histogram for all numerical variables in the dataset') for i, col in enumerate(train_df.columns): ax = axes[i//3, i%3] sns.histplot(x = col, data = train_df, kde = True, ax = ax) ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1) plt.xticks(rotation=90) X = train_df.drop(['yield'], axis=1) y = train_df['yield'] X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14) def adj_r2_score(predictors, targets, predictions): r2 = r2_score(targets, predictions) n = predictors.shape[0] k = predictors.shape[1] return 1 - (1 - r2) * (n - 1) / (n - k - 1) def mape_score(targets, predictions): return np.mean(np.abs(targets - predictions) / targets) * 100 def model_performance_regression(model, predictors, target): """ Function to compute different metrics to check regression model performance model: regressor predictors: independent variables target: dependent variable """ pred = model.predict(predictors) r2 = r2_score(target, pred) adjr2 = adj_r2_score(predictors, target, pred) rmse = np.sqrt(mean_squared_error(target, pred)) mae = mean_absolute_error(target, pred) mape = mape_score(target, pred) df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0]) return df_perf dt_regressor = DecisionTreeRegressor(random_state=1) dt_regressor.fit(X_train, y_train) dt_regressor_perf_test = model_performance_regression(dt_regressor, X_val, y_val) dt_regressor_perf_test
code
128026093/cell_30
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor,BaggingRegressor from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) train_df.describe().T # explore correlation of features corr = train_df.corr() mask = np.triu(corr) ax, fig = plt.subplots(figsize = (13,13)) sns.heatmap(corr, mask = mask, annot = True, fmt=".3f") fig, axes = plt.subplots(6, 3, figsize = (13, 13)) fig.suptitle('Histogram for all numerical variables in the dataset') for i, col in enumerate(train_df.columns): ax = axes[i//3, i%3] sns.histplot(x = col, data = train_df, kde = True, ax = ax) ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1) plt.xticks(rotation=90) X = train_df.drop(['yield'], axis=1) y = train_df['yield'] X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14) def adj_r2_score(predictors, targets, predictions): r2 = r2_score(targets, predictions) n = predictors.shape[0] k = predictors.shape[1] return 1 - (1 - r2) * (n - 1) / (n - k - 1) def mape_score(targets, predictions): return np.mean(np.abs(targets - predictions) / targets) * 100 def model_performance_regression(model, predictors, target): """ Function to compute different metrics to check regression model performance model: regressor predictors: independent variables target: dependent variable """ pred = model.predict(predictors) r2 = r2_score(target, pred) adjr2 = adj_r2_score(predictors, target, pred) rmse = np.sqrt(mean_squared_error(target, pred)) mae = mean_absolute_error(target, pred) mape = mape_score(target, pred) df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0]) return df_perf dt_regressor = DecisionTreeRegressor(random_state=1) dt_regressor.fit(X_train, y_train) dt_regressor_perf_test = model_performance_regression(dt_regressor, X_val, y_val) dt_regressor_perf_test features = list(X_train.columns) importances = dt_regressor.feature_importances_ indices = np.argsort(importances) plt.barh(range(len(indices)), importances[indices], color='violet', align='center') plt.yticks(range(len(indices)), [features[i] for i in indices]) regressor = RandomForestRegressor(n_estimators=100, random_state=1) regressor.fit(X_train, y_train) regressor_perf_test = model_performance_regression(regressor, X_val, y_val) regressor_perf_test
code
128026093/cell_20
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) train_df.describe().T # explore correlation of features corr = train_df.corr() mask = np.triu(corr) ax, fig = plt.subplots(figsize = (13,13)) sns.heatmap(corr, mask = mask, annot = True, fmt=".3f") fig, axes = plt.subplots(6, 3, figsize = (13, 13)) fig.suptitle('Histogram for all numerical variables in the dataset') for i, col in enumerate(train_df.columns): ax = axes[i//3, i%3] sns.histplot(x = col, data = train_df, kde = True, ax = ax) ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1) plt.xticks(rotation=90) X = train_df.drop(['yield'], axis=1) y = train_df['yield'] X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14) print(X_train.shape, X_val.shape, y_train.shape, y_val.shape)
code
128026093/cell_6
[ "text_html_output_1.png" ]
import pandas as pd def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') summary(train_df)
code
128026093/cell_26
[ "text_plain_output_1.png" ]
from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) train_df.describe().T # explore correlation of features corr = train_df.corr() mask = np.triu(corr) ax, fig = plt.subplots(figsize = (13,13)) sns.heatmap(corr, mask = mask, annot = True, fmt=".3f") fig, axes = plt.subplots(6, 3, figsize = (13, 13)) fig.suptitle('Histogram for all numerical variables in the dataset') for i, col in enumerate(train_df.columns): ax = axes[i//3, i%3] sns.histplot(x = col, data = train_df, kde = True, ax = ax) ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1) plt.xticks(rotation=90) X = train_df.drop(['yield'], axis=1) y = train_df['yield'] X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14) def adj_r2_score(predictors, targets, predictions): r2 = r2_score(targets, predictions) n = predictors.shape[0] k = predictors.shape[1] return 1 - (1 - r2) * (n - 1) / (n - k - 1) def mape_score(targets, predictions): return np.mean(np.abs(targets - predictions) / targets) * 100 def model_performance_regression(model, predictors, target): """ Function to compute different metrics to check regression model performance model: regressor predictors: independent variables target: dependent variable """ pred = model.predict(predictors) r2 = r2_score(target, pred) adjr2 = adj_r2_score(predictors, target, pred) rmse = np.sqrt(mean_squared_error(target, pred)) mae = mean_absolute_error(target, pred) mape = mape_score(target, pred) df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0]) return df_perf dt_regressor = DecisionTreeRegressor(random_state=1) dt_regressor.fit(X_train, y_train) dt_regressor_perf_test = model_performance_regression(dt_regressor, X_val, y_val) dt_regressor_perf_test features = list(X_train.columns) importances = dt_regressor.feature_importances_ indices = np.argsort(importances) plt.figure(figsize=(5, 5)) plt.title('Feature Importances') plt.barh(range(len(indices)), importances[indices], color='violet', align='center') plt.yticks(range(len(indices)), [features[i] for i in indices]) plt.xlabel('Relative Importance') plt.show()
code
128026093/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns
code
128026093/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) train_df.describe().T
code
128026093/cell_28
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor,BaggingRegressor from sklearn.metrics import make_scorer,mean_squared_error, r2_score, mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) train_df.describe().T # explore correlation of features corr = train_df.corr() mask = np.triu(corr) ax, fig = plt.subplots(figsize = (13,13)) sns.heatmap(corr, mask = mask, annot = True, fmt=".3f") fig, axes = plt.subplots(6, 3, figsize = (13, 13)) fig.suptitle('Histogram for all numerical variables in the dataset') for i, col in enumerate(train_df.columns): ax = axes[i//3, i%3] sns.histplot(x = col, data = train_df, kde = True, ax = ax) ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1) plt.xticks(rotation=90) X = train_df.drop(['yield'], axis=1) y = train_df['yield'] X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=14) def adj_r2_score(predictors, targets, predictions): r2 = r2_score(targets, predictions) n = predictors.shape[0] k = predictors.shape[1] return 1 - (1 - r2) * (n - 1) / (n - k - 1) def mape_score(targets, predictions): return np.mean(np.abs(targets - predictions) / targets) * 100 def model_performance_regression(model, predictors, target): """ Function to compute different metrics to check regression model performance model: regressor predictors: independent variables target: dependent variable """ pred = model.predict(predictors) r2 = r2_score(target, pred) adjr2 = adj_r2_score(predictors, target, pred) rmse = np.sqrt(mean_squared_error(target, pred)) mae = mean_absolute_error(target, pred) mape = mape_score(target, pred) df_perf = pd.DataFrame({'RMSE': rmse, 'MAE': mae, 'R-squared': r2, 'Adj. R-squared': adjr2, 'MAPE': mape}, index=[0]) return df_perf dt_regressor = DecisionTreeRegressor(random_state=1) dt_regressor.fit(X_train, y_train) dt_regressor_perf_test = model_performance_regression(dt_regressor, X_val, y_val) dt_regressor_perf_test features = list(X_train.columns) importances = dt_regressor.feature_importances_ indices = np.argsort(importances) plt.barh(range(len(indices)), importances[indices], color='violet', align='center') plt.yticks(range(len(indices)), [features[i] for i in indices]) bagging_estimator = BaggingRegressor(random_state=1) bagging_estimator.fit(X_train, y_train) bagging_estimator_perf_test = model_performance_regression(bagging_estimator, X_val, y_val) bagging_estimator_perf_test
code
128026093/cell_8
[ "image_output_1.png" ]
import pandas as pd def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') summary(test_df)
code
128026093/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) train_df.describe().T # explore correlation of features corr = train_df.corr() mask = np.triu(corr) ax, fig = plt.subplots(figsize = (13,13)) sns.heatmap(corr, mask = mask, annot = True, fmt=".3f") fig, axes = plt.subplots(6, 3, figsize=(13, 13)) fig.suptitle('Histogram for all numerical variables in the dataset') for i, col in enumerate(train_df.columns): ax = axes[i // 3, i % 3] sns.histplot(x=col, data=train_df, kde=True, ax=ax) ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1)
code
128026093/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) train_df.describe().T # explore correlation of features corr = train_df.corr() mask = np.triu(corr) ax, fig = plt.subplots(figsize = (13,13)) sns.heatmap(corr, mask = mask, annot = True, fmt=".3f") fig, axes = plt.subplots(6, 3, figsize = (13, 13)) fig.suptitle('Histogram for all numerical variables in the dataset') for i, col in enumerate(train_df.columns): ax = axes[i//3, i%3] sns.histplot(x = col, data = train_df, kde = True, ax = ax) ax.axvline(x=train_df[col].mean(), c='r', ls='-', lw=1) plt.figure(figsize=(15, 8)) sns.boxplot(x='variable', y='value', data=pd.melt(train_df.drop(['yield'], axis=1))).set_title('Boxplot of each feature', size=15) plt.xticks(rotation=90) plt.show()
code
128026093/cell_14
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) train_df.describe().T corr = train_df.corr() mask = np.triu(corr) ax, fig = plt.subplots(figsize=(13, 13)) sns.heatmap(corr, mask=mask, annot=True, fmt='.3f')
code
128026093/cell_12
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd def summary(df): result = pd.DataFrame(df.dtypes, columns=['data type']) result['#duplicate'] = df.duplicated().sum() result['#missing'] = df.isnull().sum().values result['#unique'] = df.nunique().values return result train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train_df.drop(['id'], axis=1, inplace=True) test_df.drop(['id'], axis=1, inplace=True) test_df.describe().T
code
128020724/cell_2
[ "image_output_1.png" ]
pip install fuzzy-c-means
code
128020724/cell_11
[ "text_plain_output_1.png" ]
from fcmeans import FCM from sklearn.datasets import load_digits from sklearn.metrics import adjusted_rand_score from sklearn.preprocessing import minmax_scale import numpy as np # linear algebra digits = load_digits() K = 10 # number of clusters X = digits['data'] feature_names = digits['feature_names'] X = minmax_scale(X,feature_range=(0,1)) npixels = int(np.sqrt(X.shape[1])) y = digits['target'] target_names = digits['target_names'] images = digits['images'] for i in range(1,101): ax = plt.subplot(10,10,i) ax.imshow(images[i],cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) fcm = FCM(n_clusters=K, m=1.1, max_iter=100, error=1e-09, random_state=911) fcm.fit(X) centers = fcm.centers labels = fcm.predict(X) print('ARI = {ari:.2}'.format(ari=adjusted_rand_score(y, labels)))
code
128020724/cell_19
[ "text_html_output_1.png" ]
from fcmeans import FCM from sklearn.datasets import load_digits from sklearn.preprocessing import minmax_scale import numpy as np # linear algebra digits = load_digits() K = 10 # number of clusters X = digits['data'] feature_names = digits['feature_names'] X = minmax_scale(X,feature_range=(0,1)) npixels = int(np.sqrt(X.shape[1])) y = digits['target'] target_names = digits['target_names'] images = digits['images'] for i in range(1,101): ax = plt.subplot(10,10,i) ax.imshow(images[i],cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) fcm = FCM(n_clusters=K, m=1.1, max_iter=100, error=1e-09, random_state=911) fcm.fit(X) centers = fcm.centers labels = fcm.predict(X) for i in range(K): ax = plt.subplot(1,10,i+1) ax.imshow(np.reshape(fcm.centers[i],[npixels,npixels]),cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) i1 = np.where(y==9) # label = 3 -> digit = 9 i2 = np.where(labels==8) # label = 8 -> digit = 3 S = list(set(i2[0]).intersection(set(i1[0]))) for i in range(len(S)): ax = plt.subplot(1,len(S),i+1) ax.imshow(np.reshape(X[S[i]],[npixels,npixels]),cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) ax.set(title=str(i)) for i in range(3,6): plt.figure() ax = plt.subplot(2,3,1) ax.imshow(np.reshape(X[S[i]],[npixels,npixels]),cmap='Greys') ax.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) ax = plt.subplot(2,3,3) ax.plot(np.log(fcm.soft_predict(X[S])[i])); ax.grid(visible=True) ax.set_xticks(list(range(10))) ax.set(xlabel='Label',ylabel='Membership (log-scale)') fcm.soft_predict(X[[0]])
code
128020724/cell_7
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_digits from sklearn.preprocessing import minmax_scale import numpy as np # linear algebra digits = load_digits() K = 10 X = digits['data'] feature_names = digits['feature_names'] X = minmax_scale(X, feature_range=(0, 1)) npixels = int(np.sqrt(X.shape[1])) y = digits['target'] target_names = digits['target_names'] images = digits['images'] for i in range(1, 101): ax = plt.subplot(10, 10, i) ax.imshow(images[i], cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False)
code
128020724/cell_18
[ "text_plain_output_1.png" ]
ax = plt.subplot(2,3,1) ax.imshow(np.reshape(X[0],[npixels,npixels]),cmap='Greys') ax.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) ax = plt.subplot(2,3,3) ax.plot(np.log(fcm.soft_predict(X[[0]])[0])); ax.grid(visible=True) ax.set_xticks(list(range(10))) ax.set(xlabel='Label',ylabel='Membership (log-scale)');
code
128020724/cell_16
[ "image_output_1.png" ]
from fcmeans import FCM from sklearn.datasets import load_digits from sklearn.preprocessing import minmax_scale import numpy as np # linear algebra digits = load_digits() K = 10 # number of clusters X = digits['data'] feature_names = digits['feature_names'] X = minmax_scale(X,feature_range=(0,1)) npixels = int(np.sqrt(X.shape[1])) y = digits['target'] target_names = digits['target_names'] images = digits['images'] for i in range(1,101): ax = plt.subplot(10,10,i) ax.imshow(images[i],cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) fcm = FCM(n_clusters=K, m=1.1, max_iter=100, error=1e-09, random_state=911) fcm.fit(X) centers = fcm.centers labels = fcm.predict(X) for i in range(K): ax = plt.subplot(1,10,i+1) ax.imshow(np.reshape(fcm.centers[i],[npixels,npixels]),cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) i1 = np.where(y == 9) i2 = np.where(labels == 8) S = list(set(i2[0]).intersection(set(i1[0]))) for i in range(len(S)): ax = plt.subplot(1, len(S), i + 1) ax.imshow(np.reshape(X[S[i]], [npixels, npixels]), cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) ax.set(title=str(i)) for i in range(3, 6): plt.figure() ax = plt.subplot(2, 3, 1) ax.imshow(np.reshape(X[S[i]], [npixels, npixels]), cmap='Greys') ax.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) ax = plt.subplot(2, 3, 3) ax.plot(np.log(fcm.soft_predict(X[S])[i])) ax.grid(visible=True) ax.set_xticks(list(range(10))) ax.set(xlabel='Label', ylabel='Membership (log-scale)')
code
128020724/cell_3
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.datasets import load_digits from sklearn.preprocessing import minmax_scale from matplotlib import pyplot as plt from fcmeans import FCM from sklearn.metrics import adjusted_rand_score from pandas import crosstab
code
128020724/cell_14
[ "text_plain_output_1.png" ]
from fcmeans import FCM from sklearn.datasets import load_digits from sklearn.preprocessing import minmax_scale import numpy as np # linear algebra digits = load_digits() K = 10 # number of clusters X = digits['data'] feature_names = digits['feature_names'] X = minmax_scale(X,feature_range=(0,1)) npixels = int(np.sqrt(X.shape[1])) y = digits['target'] target_names = digits['target_names'] images = digits['images'] for i in range(1,101): ax = plt.subplot(10,10,i) ax.imshow(images[i],cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) fcm = FCM(n_clusters=K, m=1.1, max_iter=100, error=1e-09, random_state=911) fcm.fit(X) centers = fcm.centers labels = fcm.predict(X) for i in range(K): ax = plt.subplot(1, 10, i + 1) ax.imshow(np.reshape(fcm.centers[i], [npixels, npixels]), cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False)
code
128020724/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from fcmeans import FCM from pandas import crosstab from sklearn.datasets import load_digits from sklearn.preprocessing import minmax_scale import numpy as np # linear algebra digits = load_digits() K = 10 # number of clusters X = digits['data'] feature_names = digits['feature_names'] X = minmax_scale(X,feature_range=(0,1)) npixels = int(np.sqrt(X.shape[1])) y = digits['target'] target_names = digits['target_names'] images = digits['images'] for i in range(1,101): ax = plt.subplot(10,10,i) ax.imshow(images[i],cmap='Greys') ax.axes.xaxis.set_visible(False) ax.axes.yaxis.set_visible(False) fcm = FCM(n_clusters=K, m=1.1, max_iter=100, error=1e-09, random_state=911) fcm.fit(X) centers = fcm.centers labels = fcm.predict(X) crosstab(y, labels)
code
128020724/cell_5
[ "image_output_1.png" ]
from sklearn.datasets import load_digits digits = load_digits() print(digits.DESCR)
code
128031562/cell_4
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import nltk nltk.download('punkt')
code
128031562/cell_2
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
!pip install newspaper3k
code
128031562/cell_1
[ "text_plain_output_1.png" ]
!pip install transformers torch
code
128031562/cell_10
[ "text_plain_output_1.png" ]
from newspaper import Article from transformers import ProphetNetForConditionalGeneration, ProphetNetTokenizer, pipeline from urllib.request import urlopen import feedparser model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased-cnndm') tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased-cnndm') def prophetnet_summarizer(text, config=None): if config: model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased') model.config.update(config) else: model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased') tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') inputs = tokenizer(text, return_tensors='pt') summary_ids = model.generate(inputs['input_ids'], attention_mask=inputs['attention_mask'], num_beams=4, length_penalty=2.0, max_length=200, min_length=100, no_repeat_ngram_size=3) summary = tokenizer.batch_decode(summary_ids, skip_special_tokens=True) return summary def prophetnet_summarizer(text, config=None): if config: model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased') model.config.update(config) else: model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased') tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') inputs = tokenizer(text, return_tensors='pt') summary_ids = model.generate(inputs['input_ids'], attention_mask=inputs['attention_mask'], num_beams=4, length_penalty=2.0, max_length=200, min_length=100, no_repeat_ngram_size=3) summary = tokenizer.batch_decode(summary_ids, skip_special_tokens=True) return summary site = 'https://news.google.com/rss/search?q=politics' op = urlopen(site) rd = op.read() op.close() sp_page = soup(rd, 'xml') news_list = sp_page.find_all('item') rss_feed = feedparser.parse('https://www.aljazeera.com/xml/rss/all.xml') news_list = rss_feed.entries c = 0 for i, news in enumerate(news_list): c += 1 print(f'News Article {i + 1}:') print('Title: ', news.title) print('News Link: ', news.link) news_data = Article(news.link) news_data.download() news_data.parse() news_text = news_data.text summary = prophetnet_summarizer(news_text) print('News Summary: ', summary[0].capitalize()) print('News Poster Link: ', news_data.top_image) print('-' * 60) if c >= 2: break
code
128031562/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from transformers import ProphetNetForConditionalGeneration, ProphetNetTokenizer, pipeline model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased-cnndm') tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased-cnndm')
code
128041917/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.graph_objs as go import plotly.offline as py import seaborn as sns a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes a.isna().sum() a['age'] = a['Rings'] + 1.5 a.drop('Rings', axis=1, inplace=True) fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8)) = plt.subplots(nrows= 2, ncols = 4, figsize = (25,20)) sns.boxplot(ax = ax1, y = 'Length', data = a, color = 'green') sns.boxplot(ax = ax2, y = 'Diameter', data = a, color = 'red') sns.boxplot(ax = ax3, y = 'Height', data = a, color = 'limegreen') sns.boxplot(ax = ax4, y = 'Whole weight', data = a, color = 'cyan') sns.boxplot(ax = ax5, y = 'Shucked weight', data = a, color = 'salmon') sns.boxplot(ax = ax6, y = 'Viscera weight', data = a, color = 'mediumorchid') sns.boxplot(ax = ax7, y = 'Shell weight', data = a, color = 'lime') sns.boxplot(ax = ax8, y = 'age', data = a, color = 'plum') a1 = a.copy() a2 = a.copy() a3 = a.copy() a_m = a1[a1['Sex'] == 'M'] a_m.drop('Sex', axis=1, inplace=True) a_f = a2[a2['Sex'] == 'F'] a_f.drop('Sex', axis=1, inplace=True) a_i = a3[a3['Sex'] == 'I'] a_i.drop('Sex', axis=1, inplace=True) a_m.drop(['age'], axis=1, inplace=True) a_f.drop(['age'], axis=1, inplace=True) a_i.drop(['age'], axis=1, inplace=True) a_m = a_m.mean() a_f = a_f.mean() a_i = a_i.mean() trace1 = go.Bar(x=a_m.index[::-1], y=a_m.values[::-1], name='M', marker=dict(color='cyan')) trace2 = go.Bar(x=a_f.index[::-1], y=a_f.values[::-1], name='F', marker=dict(color='violet')) trace3 = go.Bar(x=a_i.index[::-1], y=a_i.values[::-1], name='I', marker=dict(color='lightsteelblue')) data = [trace1, trace2, trace3] layout = go.Layout(title='Feature Distribution', width=800) fig = go.Figure(data=data, layout=layout) a1 = a.copy() a2 = a.copy() a3 = a.copy() a_m = a1[a1['Sex'] == 'M'] a_m.drop('Sex', axis=1, inplace=True) a_f = a2[a2['Sex'] == 'F'] a_f.drop('Sex', axis=1, inplace=True) a_i = a3[a3['Sex'] == 'I'] a_i.drop('Sex', axis=1, inplace=True) a_m.drop(['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight'], axis=1, inplace=True) a_f.drop(['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight'], axis=1, inplace=True) a_i.drop(['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight'], axis=1, inplace=True) a_m = a_m.mean() a_f = a_f.mean() a_i = a_i.mean() trace1 = go.Bar(x=a_m.index[::-1], y=a_m.values[::-1], name='M', marker=dict(color='limegreen')) trace2 = go.Bar(x=a_f.index[::-1], y=a_f.values[::-1], name='F', marker=dict(color='olive')) trace3 = go.Bar(x=a_i.index[::-1], y=a_i.values[::-1], name='I', marker=dict(color='seagreen')) data = [trace1, trace2, trace3] layout = go.Layout(title='Feature Distribution', width=800) fig = go.Figure(data=data, layout=layout) sns.pairplot(a, hue='Sex', palette='viridis_r')
code
128041917/cell_13
[ "text_html_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes a.isna().sum() a['age'] = a['Rings'] + 1.5 a.drop('Rings', axis=1, inplace=True) a
code
128041917/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes
code
128041917/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a
code
128041917/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as py a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes a.isna().sum() a['age'] = a['Rings'] + 1.5 a.drop('Rings', axis=1, inplace=True) a1 = a.copy() a2 = a.copy() a3 = a.copy() a_m = a1[a1['Sex'] == 'M'] a_m.drop('Sex', axis=1, inplace=True) a_f = a2[a2['Sex'] == 'F'] a_f.drop('Sex', axis=1, inplace=True) a_i = a3[a3['Sex'] == 'I'] a_i.drop('Sex', axis=1, inplace=True) a_m.drop(['age'], axis=1, inplace=True) a_f.drop(['age'], axis=1, inplace=True) a_i.drop(['age'], axis=1, inplace=True) a_m = a_m.mean() a_f = a_f.mean() a_i = a_i.mean() trace1 = go.Bar(x=a_m.index[::-1], y=a_m.values[::-1], name='M', marker=dict(color='cyan')) trace2 = go.Bar(x=a_f.index[::-1], y=a_f.values[::-1], name='F', marker=dict(color='violet')) trace3 = go.Bar(x=a_i.index[::-1], y=a_i.values[::-1], name='I', marker=dict(color='lightsteelblue')) data = [trace1, trace2, trace3] layout = go.Layout(title='Feature Distribution', width=800) fig = go.Figure(data=data, layout=layout) a1 = a.copy() a2 = a.copy() a3 = a.copy() a_m = a1[a1['Sex'] == 'M'] a_m.drop('Sex', axis=1, inplace=True) a_f = a2[a2['Sex'] == 'F'] a_f.drop('Sex', axis=1, inplace=True) a_i = a3[a3['Sex'] == 'I'] a_i.drop('Sex', axis=1, inplace=True) a_m.drop(['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight'], axis=1, inplace=True) a_f.drop(['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight'], axis=1, inplace=True) a_i.drop(['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight'], axis=1, inplace=True) a_m = a_m.mean() a_f = a_f.mean() a_i = a_i.mean() trace1 = go.Bar(x=a_m.index[::-1], y=a_m.values[::-1], name='M', marker=dict(color='limegreen')) trace2 = go.Bar(x=a_f.index[::-1], y=a_f.values[::-1], name='F', marker=dict(color='olive')) trace3 = go.Bar(x=a_i.index[::-1], y=a_i.values[::-1], name='I', marker=dict(color='seagreen')) data = [trace1, trace2, trace3] layout = go.Layout(title='Feature Distribution', width=800) fig = go.Figure(data=data, layout=layout) py.iplot(fig)
code
128041917/cell_6
[ "text_html_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.tail()
code
128041917/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns color = sns.color_palette() import matplotlib.pyplot as plt import cufflinks as cf import plotly.offline as py color = sns.color_palette() import plotly.graph_objs as go py.init_notebook_mode(connected=True) import plotly.tools as tls import warnings warnings.filterwarnings('ignore')
code
128041917/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as py a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes a.isna().sum() a['age'] = a['Rings'] + 1.5 a.drop('Rings', axis=1, inplace=True) a1 = a.copy() a2 = a.copy() a3 = a.copy() a_m = a1[a1['Sex'] == 'M'] a_m.drop('Sex', axis=1, inplace=True) a_f = a2[a2['Sex'] == 'F'] a_f.drop('Sex', axis=1, inplace=True) a_i = a3[a3['Sex'] == 'I'] a_i.drop('Sex', axis=1, inplace=True) a_m.drop(['age'], axis=1, inplace=True) a_f.drop(['age'], axis=1, inplace=True) a_i.drop(['age'], axis=1, inplace=True) a_m = a_m.mean() a_f = a_f.mean() a_i = a_i.mean() trace1 = go.Bar(x=a_m.index[::-1], y=a_m.values[::-1], name='M', marker=dict(color='cyan')) trace2 = go.Bar(x=a_f.index[::-1], y=a_f.values[::-1], name='F', marker=dict(color='violet')) trace3 = go.Bar(x=a_i.index[::-1], y=a_i.values[::-1], name='I', marker=dict(color='lightsteelblue')) data = [trace1, trace2, trace3] layout = go.Layout(title='Feature Distribution', width=800) fig = go.Figure(data=data, layout=layout) py.iplot(fig)
code
128041917/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape
code
128041917/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes a.isna().sum() a['age'] = a['Rings'] + 1.5 a.drop('Rings', axis=1, inplace=True) fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8)) = plt.subplots(nrows=2, ncols=4, figsize=(25, 20)) sns.boxplot(ax=ax1, y='Length', data=a, color='green') sns.boxplot(ax=ax2, y='Diameter', data=a, color='red') sns.boxplot(ax=ax3, y='Height', data=a, color='limegreen') sns.boxplot(ax=ax4, y='Whole weight', data=a, color='cyan') sns.boxplot(ax=ax5, y='Shucked weight', data=a, color='salmon') sns.boxplot(ax=ax6, y='Viscera weight', data=a, color='mediumorchid') sns.boxplot(ax=ax7, y='Shell weight', data=a, color='lime') sns.boxplot(ax=ax8, y='age', data=a, color='plum')
code
128041917/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe
code
128041917/cell_16
[ "text_html_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes a.isna().sum() a['age'] = a['Rings'] + 1.5 a.drop('Rings', axis=1, inplace=True) a_sex = a['Sex'].value_counts() print(a_sex) a['Sex'].value_counts().plot(kind='bar')
code
128041917/cell_14
[ "text_html_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes a.isna().sum() a['age'] = a['Rings'] + 1.5 a.drop('Rings', axis=1, inplace=True) a.info()
code
128041917/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objs as go import plotly.offline as py a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes a.isna().sum() a['age'] = a['Rings'] + 1.5 a.drop('Rings', axis=1, inplace=True) a1 = a.copy() a2 = a.copy() a3 = a.copy() a_m = a1[a1['Sex'] == 'M'] a_m.drop('Sex', axis=1, inplace=True) a_f = a2[a2['Sex'] == 'F'] a_f.drop('Sex', axis=1, inplace=True) a_i = a3[a3['Sex'] == 'I'] a_i.drop('Sex', axis=1, inplace=True) a_m.drop(['age'], axis=1, inplace=True) a_f.drop(['age'], axis=1, inplace=True) a_i.drop(['age'], axis=1, inplace=True) a_m = a_m.mean() a_f = a_f.mean() a_i = a_i.mean() trace1 = go.Bar(x=a_m.index[::-1], y=a_m.values[::-1], name='M', marker=dict(color='cyan')) trace2 = go.Bar(x=a_f.index[::-1], y=a_f.values[::-1], name='F', marker=dict(color='violet')) trace3 = go.Bar(x=a_i.index[::-1], y=a_i.values[::-1], name='I', marker=dict(color='lightsteelblue')) data = [trace1, trace2, trace3] layout = go.Layout(title='Feature Distribution', width=800) fig = go.Figure(data=data, layout=layout) a1 = a.copy() a2 = a.copy() a3 = a.copy() a_m = a1[a1['Sex'] == 'M'] a_m.drop('Sex', axis=1, inplace=True) a_f = a2[a2['Sex'] == 'F'] a_f.drop('Sex', axis=1, inplace=True) a_i = a3[a3['Sex'] == 'I'] a_i.drop('Sex', axis=1, inplace=True) a_m.drop(['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight'], axis=1, inplace=True) a_f.drop(['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight'], axis=1, inplace=True) a_i.drop(['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight'], axis=1, inplace=True) a_m = a_m.mean() a_f = a_f.mean() a_i = a_i.mean() trace1 = go.Bar(x=a_m.index[::-1], y=a_m.values[::-1], name='M', marker=dict(color='limegreen')) trace2 = go.Bar(x=a_f.index[::-1], y=a_f.values[::-1], name='F', marker=dict(color='olive')) trace3 = go.Bar(x=a_i.index[::-1], y=a_i.values[::-1], name='I', marker=dict(color='seagreen')) data = [trace1, trace2, trace3] layout = go.Layout(title='Feature Distribution', width=800) fig = go.Figure(data=data, layout=layout) a.columns
code
128041917/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.shape a.describe a.dtypes a.isna().sum()
code
128041917/cell_5
[ "text_html_output_1.png" ]
import pandas as pd a = pd.read_csv('/kaggle/input/abalone-dataset/abalone.csv') a a.head()
code
128025497/cell_13
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import chi2 from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_data = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') plt.xticks(rotation=90) train_data['symptom_sum'] = train_data.iloc[:, 1:-1].sum(axis=1) X = train_data.drop(['id', 'prognosis'], axis=1).reset_index(drop=True) y = train_data['prognosis'].reset_index(drop=True) le = LabelEncoder() y = le.fit_transform(y) y = np.array(y) scaler = MinMaxScaler() X_scaled = scaler.fit_transform(X) scores, p_values = chi2(X_scaled, y) feature_scores = {feature: score for feature, score in zip(X.columns, p_values)} sorted_scores = sorted(feature_scores.items(), key=lambda x: x[1], reverse=True) fig, ax = plt.subplots(figsize=(10, 10)) ax.barh(range(len(sorted_scores)), [score[1] for score in sorted_scores]) ax.set_yticks(range(len(sorted_scores))) ax.set_yticklabels([score[0] for score in sorted_scores]) ax.set_xlabel('p_value') ax.set_title('Feature p_value by chi-squared test') plt.show()
code
128025497/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_data = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_data = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') print('Number of missing values in each column:') print(train_data.isnull().sum()) print('Number of duplicate rows:') print(train_data.duplicated().sum()) print('Distribution of the target variable:') print(train_data['prognosis'].value_counts()) plt.hist(train_data['prognosis'], bins=30, width=0.5) plt.title('Histogram of the target variable') plt.xticks(rotation=90) plt.show()
code
128025497/cell_20
[ "text_plain_output_1.png" ]
from sklearn.metrics import make_scorer from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.utils.class_weight import compute_sample_weight import matplotlib.pyplot as plt import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_data = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') plt.xticks(rotation=90) train_data['symptom_sum'] = train_data.iloc[:, 1:-1].sum(axis=1) X = train_data.drop(['id', 'prognosis'], axis=1).reset_index(drop=True) y = train_data['prognosis'].reset_index(drop=True) le = LabelEncoder() y = le.fit_transform(y) y = np.array(y) scaler = MinMaxScaler() X_scaled = scaler.fit_transform(X) def map3_score(estimator, X_train, y_true): """ Computes the mean Average Precision (MAP) at 3 score between the true labels and the predicted labels. Assumes that there is only one true label per sample. Args: y_true (array-like): True labels array, shape (n_samples,) y_pred (array-like): Predicted probabilities matrix, shape (n_samples, n_classes) Returns: float: MAP@3 score """ score = 0.0 y_pred_top3 = estimator.predict(X_train)[0] for i in range(len(y_true)): true_label = y_true[i] pred_labels = y_pred_top3[i] ap = 0.0 for j in range(3): if pred_labels[j] == true_label: ap = 1.0 / (j + 1) break score += ap map3 = score / len(y_true) return map3 map3_scorer = make_scorer(map3_score, greater_is_better=True) class_weights = compute_sample_weight('balanced', y=y) svm_classifier = SVCTop3Classifier() param_grid = {'C': [0.1, 1, 10], 'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'gamma': ['scale', 'auto'], 'max_iter': [-1]} grid_search = GridSearchCV(svm_classifier, param_grid, scoring=map3_score, cv=5, verbose=3) grid_search.fit(X_scaled, y, sample_weight=class_weights) results_df = pd.DataFrame(grid_search.cv_results_) for i, row in results_df.sort_values(by=['rank_test_score']).head(10).iterrows(): print(row['mean_test_score'], row['std_test_score'], row['params'])
code
128025497/cell_2
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_data = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') print(train_data.sample(5))
code
128025497/cell_19
[ "image_output_1.png" ]
from sklearn.metrics import make_scorer from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.utils.class_weight import compute_sample_weight import matplotlib.pyplot as plt import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_data = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') plt.xticks(rotation=90) train_data['symptom_sum'] = train_data.iloc[:, 1:-1].sum(axis=1) X = train_data.drop(['id', 'prognosis'], axis=1).reset_index(drop=True) y = train_data['prognosis'].reset_index(drop=True) le = LabelEncoder() y = le.fit_transform(y) y = np.array(y) scaler = MinMaxScaler() X_scaled = scaler.fit_transform(X) def map3_score(estimator, X_train, y_true): """ Computes the mean Average Precision (MAP) at 3 score between the true labels and the predicted labels. Assumes that there is only one true label per sample. Args: y_true (array-like): True labels array, shape (n_samples,) y_pred (array-like): Predicted probabilities matrix, shape (n_samples, n_classes) Returns: float: MAP@3 score """ score = 0.0 y_pred_top3 = estimator.predict(X_train)[0] for i in range(len(y_true)): true_label = y_true[i] pred_labels = y_pred_top3[i] ap = 0.0 for j in range(3): if pred_labels[j] == true_label: ap = 1.0 / (j + 1) break score += ap map3 = score / len(y_true) return map3 map3_scorer = make_scorer(map3_score, greater_is_better=True) class_weights = compute_sample_weight('balanced', y=y) svm_classifier = SVCTop3Classifier() param_grid = {'C': [0.1, 1, 10], 'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'gamma': ['scale', 'auto'], 'max_iter': [-1]} grid_search = GridSearchCV(svm_classifier, param_grid, scoring=map3_score, cv=5, verbose=3) grid_search.fit(X_scaled, y, sample_weight=class_weights) print('Best Parameters: ', grid_search.best_params_) print('Best Score: ', grid_search.best_score_)
code
128025497/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/playground-series-s3e13/train.csv') test_data = pd.read_csv('/kaggle/input/playground-series-s3e13/test.csv') plt.xticks(rotation=90) train_data['symptom_sum'] = train_data.iloc[:, 1:-1].sum(axis=1) plt.hist(train_data['symptom_sum'], bins=len(np.unique(train_data['symptom_sum']))) plt.title('Histogram of symptom count') plt.xlabel('Symptom count') plt.ylabel('Number of entries') plt.show()
code
128025497/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
"""X = train_data.drop(['id', 'prognosis'], axis=1).reset_index(drop = True) print(len(X.columns)) significant_features = [score[0] for score in sorted_scores if score[1] < 0.05] X_significant = X[significant_features] print(len(X_significant.columns)) X_scaled = scaler.fit_transform(X_significant)"""
code
104129266/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') training.fillna(0) cat_cols = [col for col in training.columns if training[col].dtype == 'object'] cat_cols
code
104129266/cell_6
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') training.fillna(0) cat_cols = [col for col in training.columns if training[col].dtype == 'object'] cat_cols training = training[cat_cols + ['failure']] training woe = pd.crosstab(training['product_code'], training['failure']) woe['sum'] = woe[0] + woe[1] woe['%0'] = woe[0] / woe[0].sum() woe['%1'] = woe[1] / woe[1].sum() woe['woe'] = np.log(woe['%1'] / woe['%0']) woe
code
104129266/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') training.info()
code
104129266/cell_7
[ "text_html_output_1.png" ]
import category_encoders as ce import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') training.fillna(0) cat_cols = [col for col in training.columns if training[col].dtype == 'object'] cat_cols training = training[cat_cols + ['failure']] training import category_encoders as ce woe_encoder = ce.WOEEncoder(cols=cat_cols).fit(X=training[cat_cols], y=training['failure']) temp = woe_encoder.transform(training[cat_cols]) temp
code
104129266/cell_8
[ "text_html_output_1.png" ]
import category_encoders as ce import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') training.fillna(0) cat_cols = [col for col in training.columns if training[col].dtype == 'object'] cat_cols training = training[cat_cols + ['failure']] training import category_encoders as ce woe_encoder = ce.WOEEncoder(cols=cat_cols).fit(X=training[cat_cols], y=training['failure']) temp = woe_encoder.transform(training[cat_cols]) temp temp['failure'] = training['failure'] temp.corr()
code
104129266/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') training.fillna(0)
code
104129266/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) training = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') training.fillna(0) cat_cols = [col for col in training.columns if training[col].dtype == 'object'] cat_cols training = training[cat_cols + ['failure']] training
code
33099008/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts() df_2 = df_1.dropna() df_2.shape
code
33099008/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['software_os_version'].value_counts()
code
33099008/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df.head()
code
33099008/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['device_brand'].value_counts()
code
33099008/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.head()
code
33099008/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33099008/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['software_os_vendor'].value_counts()
code
33099008/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['software_os_name'].value_counts()
code
33099008/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') vodafone_subset_6.head(10)
code
33099008/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df['device_type_rus'].value_counts()
code
33099008/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df_1 = pd.get_dummies(df, columns=['phone_value', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus']) df_1.dtypes.value_counts()
code
33099008/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) vodafone_subset_6 = pd.read_csv('../input/vodafone6nm/vodafone-subset-6.csv') df = vodafone_subset_6[['target', 'ROUM', 'phone_value', 'DATA_VOLUME_WEEKDAYS', 'DATA_VOLUME_WEEKENDS', 'device_brand', 'software_os_vendor', 'software_os_name', 'software_os_version', 'device_type_rus', 'AVG_ARPU', 'lifetime', 'how_long_same_model', 'ecommerce_score', 'banks_sms_count', 'instagram_volume', 'viber_volume', 'linkedin_volume', 'tinder_volume', 'telegram_volume', 'google_volume', 'whatsapp_volume', 'youtube_volume']] df.info()
code
34128312/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/youtube-new/USvideos.csv') print(df.columns)
code
34128312/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/youtube-new/USvideos.csv') cnt_video_per_category = df.groupby(['category_id']).count().reset_index() cnt_video_per_category = cnt_video_per_category.loc[:, ['category_id', 'video_id']] df_1 = pd.merge(cnt_video_per_category, category_df, left_on='category_id', right_on='category_id', how='left') df_1 = df_1.sort_values(by='video_id', ascending=False) df_1['Proportion'] = round(df_1['video_id'] / sum(df_1['video_id']) * 100, 2) print(df_1) sns.set(style='whitegrid') plt.figure(figsize=(11, 10)) ax = sns.barplot(x='category_name', y='video_id', data=df_1) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha='right') plt.tight_layout() plt.show()
code
34128312/cell_7
[ "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/youtube-new/USvideos.csv') cnt_video_per_category = df.groupby(["category_id"]).count().reset_index() cnt_video_per_category = cnt_video_per_category.loc[:,['category_id','video_id']] df_1 = pd.merge(cnt_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left') df_1 = df_1.sort_values(by='video_id', ascending = False) df_1["Proportion"] = round((df_1["video_id"]/sum(df_1["video_id"]) * 100),2) print(df_1) sns.set(style="whitegrid") plt.figure(figsize=(11, 10)) ax = sns.barplot(x="category_name",y="video_id", data=df_1) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.tight_layout() plt.show() df['likes_rate'] = df['likes'] / df['views'] * 100 df['dislikes_rate'] = df['dislikes'] / df['views'] * 100 df['comment_rate'] = df['comment_count'] / df['views'] * 100 cnt_likes_per_video_per_category = df.groupby('category_id').mean().reset_index() cnt_likes_per_video_per_category = cnt_likes_per_video_per_category.loc[:, ['category_id', 'likes_rate', 'dislikes_rate', 'comment_rate']] df_2 = pd.merge(cnt_likes_per_video_per_category, category_df, left_on='category_id', right_on='category_id', how='left') print(df_2) df_2 = df_2.sort_values(by='likes_rate', ascending=False) sns.set(style='whitegrid') plt.figure(figsize=(11, 10)) plt.title('likes rate') ax = sns.barplot(x='category_name', y='likes_rate', data=df_2) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha='right') plt.tight_layout() plt.show() df_2 = df_2.sort_values(by='dislikes_rate', ascending=False) sns.set(style='whitegrid') plt.figure(figsize=(11, 10)) plt.title('dislikes rate') ax = sns.barplot(x='category_name', y='dislikes_rate', data=df_2) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha='right') plt.tight_layout() plt.show() df_2 = df_2.sort_values(by='comment_rate', ascending=False) sns.set(style='whitegrid') plt.figure(figsize=(11, 10)) plt.title('comments rate') ax = sns.barplot(x='category_name', y='comment_rate', data=df_2) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha='right') plt.tight_layout() plt.show()
code
34128312/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/youtube-new/USvideos.csv') df.head()
code
122260425/cell_21
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat display(df['Item_Fat_Content'].value_counts(normalize=True).round(2)) print('\n') df['Item_Fat_Content'].value_counts()
code
122260425/cell_9
[ "image_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum()
code