path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
50244608/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees.drop(['EmployeeCount', 'StandardHours', 'Over18', 'EmployeeNumber'], axis=1, inplace=True) left = employees[employees['Attrition'] == 1] stayed = employees[employees['Attrition'] == 0] stayed.describe()
code
50244608/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees.hist(bins=30, figsize=(20, 20))
code
50244608/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes
code
50244608/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum()
code
50244608/cell_5
[ "image_output_1.png" ]
!pip install plotly==4.14.1
code
50244608/cell_36
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns employees = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.shape employees.dtypes employees.isnull().sum() employees.duplicated().sum() sns.set_style('darkgrid') employees.drop(['EmployeeCount', 'StandardHours', 'Over18', 'EmployeeNumber'], axis=1, inplace=True) correlations = employees.corr() f, ax = plt.subplots(figsize = (20,20)) sns.heatmap(correlations, annot=True); plt.figure(figsize=[25, 12]) sns.countplot(x='Age', hue='Attrition', data=employees, palette='seismic_r')
code
17118616/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from gensim.models import KeyedVectors from nltk.tokenize import RegexpTokenizer import pandas as pd import gensim from gensim.models import Word2Vec from gensim.models import KeyedVectors import pandas as pd from nltk.tokenize import RegexpTokenizer forum_posts = pd.read_csv('../input/meta-kaggle/ForumMessages.csv') sample_data = forum_posts.Message[:100].astype('str').tolist() tokenizer = RegexpTokenizer('\\w+') sample_data_tokenized = [w.lower() for w in sample_data] sample_data_tokenized = [tokenizer.tokenize(i) for i in sample_data_tokenized] model = KeyedVectors.load_word2vec_format('../input/word2vec-google/GoogleNews-vectors-negative300.bin', binary=True) model.build_vocab(sample_data_tokenized, update=True) model.train(sample_data_tokenized)
code
128007844/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql.functions import col , round spark = SparkSession.builder.appName('SparkByExamples.com').getOrCreate() df = spark.read.csv('/kaggle/input/students-exam-scores/Original_data_with_more_rows.csv', header=True) df.createOrReplaceTempView('students_score') df = df.withColumn('total_score', col('MathScore') + col('ReadingScore') + col('WritingScore')) df = df.withColumn('Percentage', round(col('total_score') / 300 * 100, 2)) df.show()
code
128007844/cell_1
[ "text_plain_output_1.png" ]
pip install pyspark
code
128007844/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from pyspark.sql import SparkSession spark = SparkSession.builder.appName('SparkByExamples.com').getOrCreate() df = spark.read.csv('/kaggle/input/students-exam-scores/Original_data_with_more_rows.csv', header=True) df_1 = spark.read.csv('/kaggle/input/students-exam-scores/Expanded_data_with_more_features.csv', header=True) df_1.show() column_names = df_1.columns print(column_names)
code
128007844/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pyspark.sql import SparkSession spark = SparkSession.builder.appName('SparkByExamples.com').getOrCreate()
code
73072005/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/breast-cancer-detection/train.csv') test = pd.read_csv('../input/breast-cancer-detection/test.csv') train.columns = ['_'.join(col.split(' ')).lower() for col in train.columns] test.columns = ['_'.join(col.split(' ')).lower() for col in test.columns] a = train.columns b = test.columns train = train.drop('unnamed:_32', axis=1) test = test.drop('unnamed:_32', axis=1) print('Null values in Train dataset are as follows:\n') a = train.isnull().sum() print(a[a > 0]) print('\n', 'o' * 80, '\n') print('Null values in Train dataset are as follows:\n') b = test.isnull().sum() print(b[b > 0])
code
73072005/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/breast-cancer-detection/train.csv') test = pd.read_csv('../input/breast-cancer-detection/test.csv') train.columns = ['_'.join(col.split(' ')).lower() for col in train.columns] test.columns = ['_'.join(col.split(' ')).lower() for col in test.columns] print('Columns in train dataset are as follows:\n') a = train.columns print(*a, sep=', ') print('\n', '$' * 100, '\n') print('Columns in test dataset are as follows:\n') b = test.columns print(*b, sep=', ')
code
73072005/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler ss = StandardScaler() X_train = ss.fit_transform(X_train) X_valid = ss.transform(X_valid)
code
73072005/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/breast-cancer-detection/train.csv') test = pd.read_csv('../input/breast-cancer-detection/test.csv') test.head()
code
73072005/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns print('Priyatama is ready!')
code
73072005/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/breast-cancer-detection/train.csv') test = pd.read_csv('../input/breast-cancer-detection/test.csv') train.columns = ['_'.join(col.split(' ')).lower() for col in train.columns] test.columns = ['_'.join(col.split(' ')).lower() for col in test.columns] a = train.columns b = test.columns train = train.drop('unnamed:_32', axis=1) test = test.drop('unnamed:_32', axis=1) print('Object type columns in Train dataset as follows:\n') print(train.select_dtypes(exclude='float64')) print('\n', '$' * 100, '\n') print('Object type columns in Test dataset as follows:\n') print(test.select_dtypes(exclude='float64'))
code
73072005/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/breast-cancer-detection/train.csv') test = pd.read_csv('../input/breast-cancer-detection/test.csv') train.columns = ['_'.join(col.split(' ')).lower() for col in train.columns] test.columns = ['_'.join(col.split(' ')).lower() for col in test.columns] a = train.columns b = test.columns train = train.drop('unnamed:_32', axis=1) test = test.drop('unnamed:_32', axis=1) a = train.isnull().sum() b = test.isnull().sum() corr = train.corr() fig, ax = plt.subplots(figsize=(7, 7)) sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values)
code
73072005/cell_12
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/breast-cancer-detection/train.csv') test = pd.read_csv('../input/breast-cancer-detection/test.csv') train.columns = ['_'.join(col.split(' ')).lower() for col in train.columns] test.columns = ['_'.join(col.split(' ')).lower() for col in test.columns] a = train.columns b = test.columns train = train.drop('unnamed:_32', axis=1) test = test.drop('unnamed:_32', axis=1) train.diagnosis.value_counts().plot(kind='bar')
code
73072005/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/breast-cancer-detection/train.csv') test = pd.read_csv('../input/breast-cancer-detection/test.csv') train.head()
code
105199235/cell_11
[ "text_plain_output_1.png" ]
!ls
code
105199235/cell_10
[ "text_plain_output_1.png" ]
!pip wheel --verbose --no-binary cython-bbox==0.1.3 cython-bbox -w /kaggle/working/ !pip wheel --verbose --no-binary lap==0.4.0 lap -w /kaggle/working/ !pip wheel --verbose --no-binary loguru-0.6.0 loguru -w /kaggle/working/ !pip wheel --verbose --no-binary thop-0.1.1.post2209072238 thop -w /kaggle/working/ !git clone https://github.com/ifzhang/ByteTrack.git !cd ByteTrack && python3 setup.py bdist_wheel && cp -r ./dist/* /kaggle/working/ !rm -rf /kaggle/working/ByteTrack
code
105199235/cell_12
[ "text_plain_output_1.png" ]
!pip install cython_bbox-0.1.3-cp37-cp37m-linux_x86_64.whl !pip install lap-0.4.0-cp37-cp37m-linux_x86_64.whl !pip install yolox-0.1.0-cp37-cp37m-linux_x86_64.whl
code
73096303/cell_21
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf tf.__version__ data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape) v_size = 2000 max_len = 100 e_dim = 64 batch_size = 256 pre_processing_layer = TextVectorization(max_tokens=v_size, output_sequence_length=max_len, name='Notes_preprocessing_layer') pre_processing_layer.adapt(X_train) vocab = pre_processing_layer.get_vocabulary() tf.keras.backend.clear_session() model = tf.keras.models.Sequential([tf.keras.layers.Embedding(input_dim=v_size, output_dim=e_dim, name='embedding', mask_zero=True), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(output_shape, activation='softmax')]) metrics = [tf.keras.metrics.CategoricalAccuracy()] model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False), metrics=metrics) train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)) valid_dataset = tf.data.Dataset.from_tensor_slices((X_val, y_val)) test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test)) options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA raw_train_ds = train_dataset.shuffle(X_train.shape[0]).batch(batch_size).with_options(options) raw_val_ds = valid_dataset.batch(batch_size).with_options(options) raw_test_ds = test_dataset.batch(batch_size).with_options(options) @tf.autograph.experimental.do_not_convert def vectorize_text(text, label): """ convert text to tokens """ text = tf.expand_dims(text, -1) return (pre_processing_layer(text), label) text_batch, label_batch = next(iter(raw_train_ds.shuffle(50))) first_review, first_label = (text_batch[0], label_batch[0]) print('Review: ', first_review) print('Label: ', tf.argmax(first_label)) print('Vectorized review', vectorize_text(first_review, first_label))
code
73096303/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape data['Recommended IND'].value_counts()
code
73096303/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.head()
code
73096303/cell_23
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf tf.__version__ data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape) v_size = 2000 max_len = 100 e_dim = 64 batch_size = 256 pre_processing_layer = TextVectorization(max_tokens=v_size, output_sequence_length=max_len, name='Notes_preprocessing_layer') pre_processing_layer.adapt(X_train) vocab = pre_processing_layer.get_vocabulary() tf.keras.backend.clear_session() model = tf.keras.models.Sequential([tf.keras.layers.Embedding(input_dim=v_size, output_dim=e_dim, name='embedding', mask_zero=True), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(output_shape, activation='softmax')]) metrics = [tf.keras.metrics.CategoricalAccuracy()] model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False), metrics=metrics) train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)) valid_dataset = tf.data.Dataset.from_tensor_slices((X_val, y_val)) test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test)) options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA raw_train_ds = train_dataset.shuffle(X_train.shape[0]).batch(batch_size).with_options(options) raw_val_ds = valid_dataset.batch(batch_size).with_options(options) raw_test_ds = test_dataset.batch(batch_size).with_options(options) @tf.autograph.experimental.do_not_convert def vectorize_text(text, label): """ convert text to tokens """ text = tf.expand_dims(text, -1) return (pre_processing_layer(text), label) text_batch, label_batch = next(iter(raw_train_ds.shuffle(50))) first_review, first_label = (text_batch[0], label_batch[0]) train_ds = raw_train_ds.map(vectorize_text) val_ds = raw_val_ds.map(vectorize_text) test_ds = raw_test_ds.map(vectorize_text) AUTOTUNE = tf.data.AUTOTUNE train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE) model.fit(train_ds, validation_data=val_ds, epochs=3, verbose=1)
code
73096303/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape
code
73096303/cell_2
[ "text_plain_output_1.png" ]
import tensorflow as tf tf.__version__
code
73096303/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape X = data['Review Text'].values X[:3]
code
73096303/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf tf.__version__ data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape) v_size = 2000 max_len = 100 e_dim = 64 batch_size = 256 tf.keras.backend.clear_session() model = tf.keras.models.Sequential([tf.keras.layers.Embedding(input_dim=v_size, output_dim=e_dim, name='embedding', mask_zero=True), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(output_shape, activation='softmax')]) metrics = [tf.keras.metrics.CategoricalAccuracy()] model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False), metrics=metrics) print('Ready to Train')
code
73096303/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.layers.experimental.preprocessing import TextVectorization from sklearn.model_selection import train_test_split import seaborn as sns import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib.colors import Normalize, rgb2hex from IPython.display import HTML import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73096303/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape data['Recommended IND'].isnull().sum()
code
73096303/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf tf.__version__ data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape) v_size = 2000 max_len = 100 e_dim = 64 batch_size = 256 tf.keras.backend.clear_session() model = tf.keras.models.Sequential([tf.keras.layers.Embedding(input_dim=v_size, output_dim=e_dim, name='embedding', mask_zero=True), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(output_shape, activation='softmax')]) metrics = [tf.keras.metrics.CategoricalAccuracy()] model.summary()
code
73096303/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape data['Review Text'].str.split().apply(lambda x: len(x)).describe()
code
73096303/cell_24
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf tf.__version__ data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape) v_size = 2000 max_len = 100 e_dim = 64 batch_size = 256 pre_processing_layer = TextVectorization(max_tokens=v_size, output_sequence_length=max_len, name='Notes_preprocessing_layer') pre_processing_layer.adapt(X_train) vocab = pre_processing_layer.get_vocabulary() tf.keras.backend.clear_session() model = tf.keras.models.Sequential([tf.keras.layers.Embedding(input_dim=v_size, output_dim=e_dim, name='embedding', mask_zero=True), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(output_shape, activation='softmax')]) metrics = [tf.keras.metrics.CategoricalAccuracy()] model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False), metrics=metrics) train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)) valid_dataset = tf.data.Dataset.from_tensor_slices((X_val, y_val)) test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test)) options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA raw_train_ds = train_dataset.shuffle(X_train.shape[0]).batch(batch_size).with_options(options) raw_val_ds = valid_dataset.batch(batch_size).with_options(options) raw_test_ds = test_dataset.batch(batch_size).with_options(options) @tf.autograph.experimental.do_not_convert def vectorize_text(text, label): """ convert text to tokens """ text = tf.expand_dims(text, -1) return (pre_processing_layer(text), label) text_batch, label_batch = next(iter(raw_train_ds.shuffle(50))) first_review, first_label = (text_batch[0], label_batch[0]) train_ds = raw_train_ds.map(vectorize_text) val_ds = raw_val_ds.map(vectorize_text) test_ds = raw_test_ds.map(vectorize_text) AUTOTUNE = tf.data.AUTOTUNE train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE) model.fit(train_ds, validation_data=val_ds, epochs=3, verbose=1) model.evaluate(test_ds)
code
73096303/cell_14
[ "text_plain_output_1.png" ]
print(X_train.shape, y_train.shape) print(X_val.shape, y_val.shape) print(X_test.shape, y_test.shape)
code
73096303/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf tf.__version__ data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape)
code
73096303/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape
code
90142380/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
angka = int(input('Masukkan Bilangan Angka = ')) biner = bin(angka).replace('0b', '') oktal = oct(angka).replace('0o', '') hexa = hex(angka).replace('0x', '') print(biner, oktal, hexa)
code
2043499/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from numba import jit import math import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) gift_pref = pd.read_csv('../input/child_wishlist_v2.csv', header=None).drop(0, 1).values child_pref = pd.read_csv('../input/gift_goodkids_v2.csv', header=None).drop(0, 1).values random_sub = pd.read_csv('../input/sample_submission_random_v2.csv').values.tolist() n_children = 1000000 n_gift_type = 1000 n_gift_quantity = 1000 n_gift_pref = 100 n_child_pref = 1000 twins = math.ceil(0.04 * n_children / 2.0) * 2 triplets = math.ceil(0.005 * n_children / 3.0) * 3 ratio_gift_happiness = 2 ratio_child_happiness = 2 @jit(nopython=True) def gcd(x, y): """ return greatest common divisor for two integers """ while y != 0: x, y = (y, x % y) return x @jit(nopython=True) def lcm(a, b): """Compute the lowest common multiple of a and b""" return a * b // gcd(a, b) @jit(nopython=True) def anh(pred, child_pref, gift_pref): n_children = 1000000 n_gift_type = 1000 n_gift_quantity = 1000 n_gift_pref = 100 n_child_pref = 1000 twins = math.ceil(0.04 * n_children / 2.0) * 2 triplets = math.ceil(0.005 * n_children / 3.0) * 3 ratio_gift_happiness = 2 ratio_child_happiness = 2 tmp_dict = np.zeros(n_gift_quantity, dtype=np.uint16) for i in np.arange(len(pred)): tmp_dict[pred[i][1]] += 1 for count in np.arange(n_gift_quantity): assert count <= n_gift_quantity for t1 in np.arange(0, triplets, 3): triplet1 = pred[t1] triplet2 = pred[t1 + 1] triplet3 = pred[t1 + 2] assert triplet1[1] == triplet2[1] and triplet2[1] == triplet3[1] for t1 in np.arange(triplets, triplets + twins, 2): twin1 = pred[t1] twin2 = pred[t1 + 1] assert twin1[1] == twin2[1] max_child_happiness = n_gift_pref * ratio_child_happiness max_gift_happiness = n_child_pref * ratio_gift_happiness total_child_happiness = 0 total_gift_happiness = np.zeros(n_gift_type) for i in np.arange(len(pred)): row = pred[i] child_id = row[0] gift_id = row[1] assert child_id < n_children assert gift_id < n_gift_type assert child_id >= 0 assert gift_id >= 0 if np.sum(gift_pref[child_id] == gift_id): child_happiness = (n_gift_pref - np.where(gift_pref[child_id] == gift_id)[0]) * ratio_child_happiness tmp_child_happiness = child_happiness[0] else: tmp_child_happiness = -1 if np.sum(child_pref[gift_id] == child_id): gift_happiness = (n_child_pref - np.where(child_pref[gift_id] == child_id)[0]) * ratio_gift_happiness tmp_gift_happiness = gift_happiness[0] else: tmp_gift_happiness = -1 total_child_happiness += tmp_child_happiness total_gift_happiness[gift_id] += tmp_gift_happiness denominator1 = n_children * max_child_happiness denominator2 = n_gift_quantity * max_gift_happiness * n_gift_type common_denom = lcm(denominator1, denominator2) multiplier = common_denom / denominator1 return float(math.pow(total_child_happiness * multiplier, 3) + math.pow(np.sum(total_gift_happiness), 3)) / float(math.pow(common_denom, 3)) anh(np.array(random_sub), child_pref, gift_pref)
code
2043499/cell_7
[ "text_plain_output_1.png" ]
''' INPUT_PATH = '../input/' def lcm(a, b): """Compute the lowest common multiple of a and b""" # in case of large numbers, using floor division return a * b // math.gcd(a, b) #from numba import jit #@jit(nopython=True) def avg_normalized_happiness(pred, gift, wish): n_children = 1000000 # n children to give n_gift_type = 1000 # n types of gifts available n_gift_quantity = 1000 # each type of gifts are limited to this quantity n_gift_pref = 100 # number of gifts a child ranks n_child_pref = 1000 # number of children a gift ranks twins = math.ceil(0.04 * n_children / 2.) * 2 # 4% of all population, rounded to the closest number triplets = math.ceil(0.005 * n_children / 3.) * 3 # 0.5% of all population, rounded to the closest number ratio_gift_happiness = 2 ratio_child_happiness = 2 # check if triplets have the same gift for t1 in np.arange(0, triplets, 3): triplet1 = pred[t1] triplet2 = pred[t1+1] triplet3 = pred[t1+2] # print(t1, triplet1, triplet2, triplet3) assert triplet1 == triplet2 and triplet2 == triplet3 # check if twins have the same gift for t1 in np.arange(triplets, triplets+twins, 2): twin1 = pred[t1] twin2 = pred[t1+1] # print(t1) assert twin1 == twin2 max_child_happiness = n_gift_pref * ratio_child_happiness max_gift_happiness = n_child_pref * ratio_gift_happiness total_child_happiness = 1000 total_gift_happiness = np.zeros(n_gift_type) for i in range(len(pred)): child_id = i gift_id = pred[i] # check if child_id and gift_id exist assert child_id < n_children assert gift_id < n_gift_type assert child_id >= 0 assert gift_id >= 0 child_happiness = (n_gift_pref - np.where(wish[child_id]==gift_id)[0]) * ratio_child_happiness if not child_happiness: child_happiness = -1 gift_happiness = ( n_child_pref - np.where(gift[gift_id]==child_id)[0]) * ratio_gift_happiness if not gift_happiness: gift_happiness = -1 total_child_happiness += child_happiness total_gift_happiness[gift_id] += gift_happiness denominator1 = n_children*max_child_happiness denominator2 = n_gift_quantity*max_gift_happiness*n_gift_type common_denom = lcm(denominator1, denominator2) multiplier = common_denom / denominator1 ret = float(math.pow(total_child_happiness*multiplier,3) + math.pow(np.sum(total_gift_happiness),3)) / float(math.pow(common_denom,3)) return ret def get_overall_hapiness(wish, gift): res_child = dict() for i in range(0, wish.shape[0]): for j in range(55): res_child[(i, wish[i][j])] = int(100* (1 + (wish.shape[1] - j)*2)) res_santa = dict() for i in range(gift.shape[0]): for j in range(gift.shape[1]): res_santa[(gift[i][j], i)] = int((1 + (gift.shape[1] - j)*2)) positive_cases = list(set(res_santa.keys()) | set(res_child.keys())) print('Positive case tuples (child, gift): {}'.format(len(positive_cases))) res = dict() for p in positive_cases: res[p] = 0 if p in res_child: res[p] += res_child[p] if p in res_santa: res[p] += res_santa[p] return res def sort_dict_by_values(a, reverse=True): sorted_x = sorted(a.items(), key=operator.itemgetter(1), reverse=reverse) return sorted_x def value_counts_for_list(lst): a = dict(Counter(lst)) a = sort_dict_by_values(a, True) return a def get_most_desired_gifts(wish, gift): best_gifts = value_counts_for_list(np.ravel(wish)) return best_gifts def recalc_hapiness(happiness, best_gifts, gift): recalc = dict() for b in best_gifts: recalc[b[0]] = b[1] / 2000000 for h in happiness: c, g = h happiness[h] /= recalc[g] # Make triples/twins more happy # if c <= 45000 and happiness[h] < 0.00001: # happiness[h] = 0.00001 return happiness def solve(): wish = pd.read_csv(INPUT_PATH + 'child_wishlist_v2.csv', header=None).as_matrix()[:, 1:] gift_init = pd.read_csv(INPUT_PATH + 'gift_goodkids_v2.csv', header=None).as_matrix()[:, 1:] gift = gift_init.copy() answ = np.zeros(len(wish), dtype=np.int32) answ[:] = -1 gift_count = np.zeros(len(gift), dtype=np.int32) happiness = get_overall_hapiness(wish, gift) best_gifts = get_most_desired_gifts(wish, gift) happiness = recalc_hapiness(happiness, best_gifts, gift) sorted_hapiness = sort_dict_by_values(happiness) print('Happiness sorted...') for i in range(len(sorted_hapiness)): child = sorted_hapiness[i][0][0] g = sorted_hapiness[i][0][1] if answ[child] != -1: continue if gift_count[g] >= 1000: continue if child <= 5000 and gift_count[g] < 997: if child % 3 == 0: answ[child] = g answ[child+1] = g answ[child+2] = g gift_count[g] += 3 elif child % 3 == 1: answ[child] = g answ[child-1] = g answ[child+1] = g gift_count[g] += 3 else: answ[child] = g answ[child-1] = g answ[child-2] = g gift_count[g] += 3 elif child > 5000 and child <= 45000 and gift_count[g] < 998: if child % 2 == 0: answ[child] = g answ[child - 1] = g gift_count[g] += 2 else: answ[child] = g answ[child + 1] = g gift_count[g] += 2 elif child > 45000: answ[child] = g gift_count[g] += 1 print('Left unhappy:', len(answ[answ == -1])) # unhappy children for child in range(45001, len(answ)): if answ[child] == -1: g = np.argmin(gift_count) answ[child] = g gift_count[g] += 1 if answ.min() == -1: print('Some children without present') exit() if gift_count.max() > 1000: print('Some error in kernel: {}'.format(gift_count.max())) exit() print('Start score calculation...') # score = avg_normalized_happiness(answ, gift_init, wish) # print('Predicted score: {:.8f}'.format(score)) score = avg_normalized_happiness(answ, gift, wish) print('Predicted score: {:.8f}'.format(score)) out = open('subm_{}.csv'.format(score), 'w') out.write('ChildId,GiftId ') for i in range(len(answ)): out.write(str(i) + ',' + str(answ[i]) + ' ') out.close() solve() '''
code
16110432/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape
code
16110432/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.nunique() plt.figure(figsize=(8, 8)) colors = ['LightBlue', 'Lightgreen'] explode = [0, 0.1] plt.pie(dataset['Gender'].value_counts(), explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140) plt.legend(labels=['Female', 'Male']) plt.title('Male v/s Female Distribution') plt.axis('off')
code
16110432/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.nunique() plt.figure(figsize=(20, 8)) sns.countplot(dataset['Spending_Score']) plt.title('Spending Score Distribution') plt.xlabel('Spending Score') plt.ylabel('Count') plt.show()
code
16110432/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.nunique() plt.figure(figsize=(15, 5)) sns.countplot(dataset['Age']) plt.title('Age Distribution') plt.xlabel('Age') plt.show()
code
16110432/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.nunique() dataset['Gender'].value_counts()
code
16110432/cell_2
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os print(os.listdir('../input'))
code
16110432/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.head()
code
16110432/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.info()
code
16110432/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.nunique()
code
16110432/cell_24
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.nunique() plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) sns.distplot(dataset['Age']) plt.title('Age Distribution') plt.xlabel('Age') plt.ylabel('Count') plt.subplot(1, 2, 2) sns.distplot(dataset['Annual_Income'], color='pink') plt.title('Annual Income Distribution') plt.xlabel('Annual Income') plt.ylabel('Count')
code
16110432/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.describe()
code
16110432/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.nunique() plt.figure(figsize=(15, 5)) sns.countplot(dataset['Annual_Income']) plt.title('Annual Income') plt.xlabel('Annual Income($)') plt.show()
code
16110432/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.rename(columns={'Annual Income (k$)': 'Annual_Income', 'Spending Score (1-100)': 'Spending_Score'}, inplace=True) dataset.shape dataset.nunique() colors = ['LightBlue', 'Lightgreen'] explode = [0, 0.1] plt.axis('off') plt.figure(figsize=(15, 8)) sns.heatmap(dataset.corr(), annot=True)
code
16110432/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Mall_Customers.csv') dataset.head()
code
17137806/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # plotting import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) labels = cota_total2.index sizes = cota_total2['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.axis('equal') labels = cota_total3.index sizes = cota_total3['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.axis('equal') plt.title('Evolução Gastos dos Partidos', loc='center', fontsize=12, fontweight=0, color='black') plt.xlabel('Ano') plt.ylabel('Gasto') plt.plot(cota_negativa_por_ano)
code
17137806/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) cota_total.plot(kind='bar', title='Gastos Partidos - Completo')
code
17137806/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape print(f'There are {nRow} rows and {nCol} columns')
code
17137806/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # plotting import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) labels = cota_total2.index sizes = cota_total2['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.axis('equal') labels = cota_total3.index sizes = cota_total3['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.axis('equal') cota_neg_ano_mes = pd.DataFrame(cota_negativa.groupby(['numano', 'nummes'])['vlrdocumento'].sum()) pd.DataFrame(cota.groupby(['numano', 'nummes'])['vlrdocumento'].sum()) val1 = pd.DataFrame(new_cota['tipo'].value_counts()) val1['perc'] = val1['tipo'] / np.cumsum(val1['tipo'], axis=0) val1 for i, v in val1['tipo'].iteritems(): plt.text(i, v, v, va='bottom', ha='center') plt.style.use('seaborn-darkgrid') palette = plt.get_cmap('Set1') num = 0 for column in cota_por_ano.drop('numano', axis=1): num += 1 plt.plot(cota_por_ano['numano'], cota_por_ano[column], marker='', color=palette(num), linewidth=1, alpha=0.9, label=column) plt.legend(loc=2, ncol=2) plt.title('A (bad) Spaghetti plot', loc='left', fontsize=12, fontweight=0, color='orange') plt.xlabel('Time') plt.ylabel('Score')
code
17137806/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # plotting import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) labels = cota_total2.index sizes = cota_total2['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.axis('equal') labels = cota_total3.index sizes = cota_total3['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') plt.show()
code
17137806/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt # plotting import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) labels = cota_total2.index sizes = cota_total2['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.axis('equal') labels = cota_total3.index sizes = cota_total3['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.axis('equal') cota_neg_ano_mes = pd.DataFrame(cota_negativa.groupby(['numano', 'nummes'])['vlrdocumento'].sum()) pd.DataFrame(cota.groupby(['numano', 'nummes'])['vlrdocumento'].sum()) val1 = pd.DataFrame(new_cota['tipo'].value_counts()) val1['perc'] = val1['tipo'] / np.cumsum(val1['tipo'], axis=0) val1 plt.figure(figsize=(6, 6)) for i, v in val1['tipo'].iteritems(): plt.bar(i, v, label=i) plt.text(i, v, v, va='bottom', ha='center') plt.title('Gastos x Reembolso') plt.show()
code
17137806/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts())
code
17137806/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) cota_neg_ano_mes = pd.DataFrame(cota_negativa.groupby(['numano', 'nummes'])['vlrdocumento'].sum()) pd.DataFrame(cota.groupby(['numano', 'nummes'])['vlrdocumento'].sum()) val1 = pd.DataFrame(new_cota['tipo'].value_counts()) val1['perc'] = val1['tipo'] / np.cumsum(val1['tipo'], axis=0) val1
code
17137806/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) cota_total
code
17137806/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) cota.describe()
code
17137806/cell_16
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) cota_neg_ano_mes = pd.DataFrame(cota_negativa.groupby(['numano', 'nummes'])['vlrdocumento'].sum()) pd.DataFrame(cota.groupby(['numano', 'nummes'])['vlrdocumento'].sum())
code
17137806/cell_17
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) cota_neg_ano_mes = pd.DataFrame(cota_negativa.groupby(['numano', 'nummes'])['vlrdocumento'].sum()) cota_negativa.describe()
code
17137806/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt # plotting import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) labels = cota_total2.index sizes = cota_total2['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') plt.show()
code
17137806/cell_12
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt # plotting import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape #partido_valor = pd.DataFrame() cota_negativa = pd.DataFrame() cota_positiva = pd.DataFrame() cota_negativa = cota[cota['vlrdocumento'] < 0] cota_positiva = cota[cota['vlrdocumento'] > 0] #cota_negativa['reembolso'] = pd.DataFrame() cota_negativa['tipo'] = 'Reembolso' cota_positiva['tipo'] = 'Gasto' #partido_valor['sgpartido'] = cota['sgpartido'] #partido_valor['vlrdocumento'] = cota['vlrdocumento'] #partido_valor['nulegislatura'] = cota['nulegislatura'] #cota['nulegislatura'].value_counts() #cota.groupby(['nulegislatura']).sum() cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False)) cota_total2 = cota_total.head(5) cota_total3 = cota_total.tail(5) #cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False) cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum()) cota_negativa_por_ano = pd.DataFrame(cota_negativa.groupby('numano')['vlrdocumento'].sum()) #partido_valor.groupby(['sgpartido']).sum() cota_por_ano cota_positiva new_cota = pd.DataFrame() new_cota = pd.concat([cota_positiva, cota_negativa]) pd.DataFrame(new_cota['tipo'].value_counts()) labels = cota_total2.index sizes = cota_total2['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.axis('equal') labels = cota_total3.index sizes = cota_total3['vlrdocumento'] explode = (0.1, 0.1, 0, 0, 0) plt.axis('equal') plt.title('Evolução Gastos dos Partidos', loc='center', fontsize=12, fontweight=0, color='black') plt.xlabel('Ano') plt.ylabel('Gasto') plt.plot(cota_por_ano)
code
17137806/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';') cota.dataframeName = 'cota_parlamentar_sp.csv' nRow, nCol = cota.shape cota.head(10)
code
73064668/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test_df = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') cat_features = [feature for feature in train_df.columns if 'cat' in feature] cont_features = [feature for feature in train_df.columns if 'cont' in feature] train_df.describe().T train_df.drop('id', axis=1, inplace=True) test_df.drop('id', axis=1, inplace=True) print('Missing values in train dataset:', sum(train_df.isnull().mean() * 100)) print('Missing values in test dataset:', sum(test_df.isnull().mean() * 100))
code
73064668/cell_11
[ "text_plain_output_1.png" ]
from pandas_profiling import ProfileReport import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test_df = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') cat_features = [feature for feature in train_df.columns if 'cat' in feature] cont_features = [feature for feature in train_df.columns if 'cont' in feature] train_df.describe().T train_df.drop('id', axis=1, inplace=True) test_df.drop('id', axis=1, inplace=True) report = ProfileReport(train_df) report
code
73064668/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73064668/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test_df = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') cat_features = [feature for feature in train_df.columns if 'cat' in feature] cont_features = [feature for feature in train_df.columns if 'cont' in feature] print('Rows and Columns in train dataset:', train_df.shape) print('Rows and Columns in test dataset:', test_df.shape)
code
73064668/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test_df = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') cat_features = [feature for feature in train_df.columns if 'cat' in feature] cont_features = [feature for feature in train_df.columns if 'cont' in feature] train_df.describe().T
code
73064668/cell_15
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test_df = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') cat_features = [feature for feature in train_df.columns if 'cat' in feature] cont_features = [feature for feature in train_df.columns if 'cont' in feature] train_df.describe().T train_df.drop('id', axis=1, inplace=True) test_df.drop('id', axis=1, inplace=True) x = train_df.drop(['target'], axis=1) y = train_df['target'] X_test = test_df.copy() from sklearn.preprocessing import OrdinalEncoder ordinal_encoder = OrdinalEncoder() x[cat_features] = ordinal_encoder.fit_transform(x[cat_features]) X_test[cat_features] = ordinal_encoder.transform(X_test[cat_features]) x.head()
code
105195066/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum() df_train.nunique() print('Products in Train Data: ', df_train['product'].unique()) print() print('Value Counts of Products:', df_train['product'].value_counts())
code
105195066/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') print('Countries in Test Dataset: ', df_test['country'].unique()) print('NULL VALUES : ', df_test['country'].isnull().sum())
code
105195066/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum()
code
105195066/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum() df_train.nunique() df_train['num_sold']
code
105195066/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') print('Products in test Data: ', df_test['product'].unique()) print() print('Value Counts of Products:', df_test['product'].value_counts())
code
105195066/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') sample
code
105195066/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum() df_train.nunique() plt.title('No of products Sold in Each Country') sns.barplot(data=df_train, y=df_train['num_sold'], x=df_train['country'])
code
105195066/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum() df_train.nunique() df_train.info()
code
105195066/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105195066/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.info()
code
105195066/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train
code
105195066/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum() df_train.nunique() print('Stores in Train Data: ', df_train['store'].unique()) print() print('Value Counts of Stores:', df_train['store'].value_counts())
code
105195066/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') print('Stores in Train Data: ', df_test['store'].unique()) print() print('Value Counts of Stores:', df_test['store'].value_counts())
code
105195066/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum() df_train.nunique() print('Num solf in Train Data: ', df_train['num_sold'].nunique()) print() print('Value Counts of Sold by :', df_train['num_sold'].value_counts())
code
105195066/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum() df_train.nunique()
code
105195066/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum() df_train.nunique() plt.title('No of products Sold in Each Store In Each Country') sns.barplot(data=df_train, y=df_train['num_sold'], x=df_train['country'], hue='store')
code
105195066/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') df_test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') sample = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') df_train.isnull().sum() df_train.nunique() print('Countries in Train Dataset: ', df_train['country'].unique()) print('NULL VALUES : ', df_train['country'].isnull().sum())
code
48163903/cell_13
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler import pandas as pd import re df_train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') df_test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') df_sub = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') def preprocessing(df): derlem = [] for i in range(len(df.text)): text = re.sub('https?://\\S+', '', df.text[i]) text = re.sub('http?://\\S+', '', text) text = re.sub('[^a-zA-Z]', ' ', text) text = re.sub('\\n', ' ', text) text = re.sub('\\s+', ' ', text).strip() text = text.lower() text = text.split() text = [WordNetLemmatizer().lemmatize(kelime) for kelime in text if not kelime in set(stopwords.words('english'))] text = ' '.join(text) derlem.append(text) df['clean_text'] = derlem return df df_test = preprocessing(df_test) df_train = preprocessing(df_train) tfidf = TfidfVectorizer(min_df=0.0, max_df=1.0, use_idf=True) X = tfidf.fit_transform(df_train.text).toarray() y = df_train.target from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() lr = LogisticRegression(solver='liblinear') lgr_classifier = Pipeline([('scale', scaler), ('lr', lr)]) lgr_classifier.fit(X_train, y_train) lgr_prediction = lgr_classifier.predict(X_test) accuracy_score(y_train, lgr_classifier.predict(X_train)) test_vector = tfidf.transform(df_test['clean_text']).toarray() prediction = lgr_classifier.predict(test_vector) prediction
code
48163903/cell_11
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() lr = LogisticRegression(solver='liblinear') lgr_classifier = Pipeline([('scale', scaler), ('lr', lr)]) lgr_classifier.fit(X_train, y_train) lgr_prediction = lgr_classifier.predict(X_test) accuracy_score(y_test, lgr_prediction)
code
48163903/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') df_test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') df_sub = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') df_train.head()
code
48163903/cell_14
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler import pandas as pd import re df_train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') df_test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') df_sub = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') def preprocessing(df): derlem = [] for i in range(len(df.text)): text = re.sub('https?://\\S+', '', df.text[i]) text = re.sub('http?://\\S+', '', text) text = re.sub('[^a-zA-Z]', ' ', text) text = re.sub('\\n', ' ', text) text = re.sub('\\s+', ' ', text).strip() text = text.lower() text = text.split() text = [WordNetLemmatizer().lemmatize(kelime) for kelime in text if not kelime in set(stopwords.words('english'))] text = ' '.join(text) derlem.append(text) df['clean_text'] = derlem return df df_test = preprocessing(df_test) df_train = preprocessing(df_train) tfidf = TfidfVectorizer(min_df=0.0, max_df=1.0, use_idf=True) X = tfidf.fit_transform(df_train.text).toarray() y = df_train.target from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() lr = LogisticRegression(solver='liblinear') lgr_classifier = Pipeline([('scale', scaler), ('lr', lr)]) lgr_classifier.fit(X_train, y_train) lgr_prediction = lgr_classifier.predict(X_test) accuracy_score(y_train, lgr_classifier.predict(X_train)) test_vector = tfidf.transform(df_test['clean_text']).toarray() prediction = lgr_classifier.predict(test_vector) prediction df_submission = pd.DataFrame() df_submission['id'] = df_test['id'] df_submission['target'] = prediction df_submission.head(10)
code
48163903/cell_10
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() lr = LogisticRegression(solver='liblinear') lgr_classifier = Pipeline([('scale', scaler), ('lr', lr)]) lgr_classifier.fit(X_train, y_train) lgr_prediction = lgr_classifier.predict(X_test) accuracy_score(y_train, lgr_classifier.predict(X_train))
code
48163903/cell_12
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() lr = LogisticRegression(solver='liblinear') lgr_classifier = Pipeline([('scale', scaler), ('lr', lr)]) lgr_classifier.fit(X_train, y_train) lgr_prediction = lgr_classifier.predict(X_test) print(classification_report(y_test, lgr_prediction)) print(confusion_matrix(y_test, lgr_prediction))
code