path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128049433/cell_25
[ "image_output_1.png" ]
from tensorflow.keras import models,layers import tensorflow as tf IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) def get_dataset(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=8) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return (train_ds, val_ds, test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) resize_and_rescale = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.Rescaling(1.0 / 255)]) data_augmentation = tf.keras.Sequential([layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'), layers.experimental.preprocessing.RandomRotation(0.3)]) n_classes = 4 input_shape = (BATCH_SIZE, 256, 256, 3) model = models.Sequential([resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(128, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(256, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(n_classes, activation='softmax')]) model.build(input_shape=input_shape) model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) history = model.fit(train_ds, epochs=60, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds)
code
128049433/cell_30
[ "text_plain_output_1.png" ]
from tensorflow.keras import models,layers import glob as gb import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) import os import glob as gb path = '//kaggle//input//corn-or-maize-leaf-disease-dataset//data' size = [] for folder in os.listdir(path): files = gb.glob(pathname=str(path + '//' + folder + '/*.jpg')) for file in files: image = plt.imread(file) size.append(image.shape) pd.Series(size).value_counts() class_names = dataset.class_names class_names plt.figure(figsize=(10,10)) for image_batch , label_batch in dataset.take(1): for i in range(12): ax = plt.subplot(3,4,i+1) plt.imshow(image_batch[i].numpy().astype('uint8')) plt.title(class_names[label_batch[i]]) plt.axis('off') def get_dataset(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=8) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return (train_ds, val_ds, test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) resize_and_rescale = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.Rescaling(1.0 / 255)]) data_augmentation = tf.keras.Sequential([layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'), layers.experimental.preprocessing.RandomRotation(0.3)]) n_classes = 4 input_shape = (BATCH_SIZE, 256, 256, 3) model = models.Sequential([resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(128, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(256, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(n_classes, activation='softmax')]) model.build(input_shape=input_shape) model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) history = model.fit(train_ds, epochs=60, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds) history.params history.history.keys() his_data = pd.DataFrame(history.history) plt.figure(figsize=(20, 5)) plt.subplot(1, 2, 1) plt.plot(his_data.loss, label='Training loss') plt.plot(his_data.val_loss, label='Validation loss') plt.xlabel('Epochs') plt.ylabel('loss') plt.title('Losses') plt.grid() plt.legend() plt.subplot(1, 2, 2) plt.plot(his_data.accuracy, label='Training accuracy') plt.plot(his_data.val_accuracy, label='Validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.title('Accuracy') plt.grid() plt.legend()
code
128049433/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import tensorflow as tf IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE)
code
128049433/cell_29
[ "text_plain_output_1.png" ]
from tensorflow.keras import models,layers import tensorflow as tf IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) def get_dataset(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=8) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return (train_ds, val_ds, test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) resize_and_rescale = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.Rescaling(1.0 / 255)]) data_augmentation = tf.keras.Sequential([layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'), layers.experimental.preprocessing.RandomRotation(0.3)]) n_classes = 4 input_shape = (BATCH_SIZE, 256, 256, 3) model = models.Sequential([resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(128, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(256, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(n_classes, activation='softmax')]) model.build(input_shape=input_shape) model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) history = model.fit(train_ds, epochs=60, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds) history.params history.history.keys()
code
128049433/cell_26
[ "text_plain_output_1.png" ]
from tensorflow.keras import models,layers import tensorflow as tf IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) def get_dataset(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=8) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return (train_ds, val_ds, test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) resize_and_rescale = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.Rescaling(1.0 / 255)]) data_augmentation = tf.keras.Sequential([layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'), layers.experimental.preprocessing.RandomRotation(0.3)]) n_classes = 4 input_shape = (BATCH_SIZE, 256, 256, 3) model = models.Sequential([resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(128, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(256, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(n_classes, activation='softmax')]) model.build(input_shape=input_shape) model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) history = model.fit(train_ds, epochs=60, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds) scores = model.evaluate(test_ds)
code
128049433/cell_11
[ "text_plain_output_1.png" ]
import glob as gb import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) import os import glob as gb path = '//kaggle//input//corn-or-maize-leaf-disease-dataset//data' size = [] for folder in os.listdir(path): files = gb.glob(pathname=str(path + '//' + folder + '/*.jpg')) for file in files: image = plt.imread(file) size.append(image.shape) pd.Series(size).value_counts() class_names = dataset.class_names class_names plt.figure(figsize=(10, 10)) for image_batch, label_batch in dataset.take(1): for i in range(12): ax = plt.subplot(3, 4, i + 1) plt.imshow(image_batch[i].numpy().astype('uint8')) plt.title(class_names[label_batch[i]]) plt.axis('off')
code
128049433/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128049433/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import glob as gb import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import glob as gb path = '//kaggle//input//corn-or-maize-leaf-disease-dataset//data' size = [] for folder in os.listdir(path): files = gb.glob(pathname=str(path + '//' + folder + '/*.jpg')) for file in files: image = plt.imread(file) size.append(image.shape) pd.Series(size).value_counts()
code
128049433/cell_32
[ "text_plain_output_1.png" ]
from tensorflow.keras import models,layers import glob as gb import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) import os import glob as gb path = '//kaggle//input//corn-or-maize-leaf-disease-dataset//data' size = [] for folder in os.listdir(path): files = gb.glob(pathname=str(path + '//' + folder + '/*.jpg')) for file in files: image = plt.imread(file) size.append(image.shape) pd.Series(size).value_counts() class_names = dataset.class_names class_names plt.figure(figsize=(10,10)) for image_batch , label_batch in dataset.take(1): for i in range(12): ax = plt.subplot(3,4,i+1) plt.imshow(image_batch[i].numpy().astype('uint8')) plt.title(class_names[label_batch[i]]) plt.axis('off') def get_dataset(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=8) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return (train_ds, val_ds, test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) resize_and_rescale = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.Rescaling(1.0 / 255)]) data_augmentation = tf.keras.Sequential([layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'), layers.experimental.preprocessing.RandomRotation(0.3)]) n_classes = 4 input_shape = (BATCH_SIZE, 256, 256, 3) model = models.Sequential([resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(128, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(256, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(n_classes, activation='softmax')]) model.build(input_shape=input_shape) model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) history = model.fit(train_ds, epochs=60, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds) scores = model.evaluate(test_ds) history.params history.history.keys() his_data = pd.DataFrame(history.history) import numpy as np for images_batch, labels_batch in test_ds.take(1): first_image = images_batch[0].numpy().astype('uint8') first_label = labels_batch[0].numpy() print('First Image to Predict :') plt.imshow(first_image) print('\nActual label:', class_names[first_label]) batch_prediction = model.predict(images_batch) print('\nPredicted label', class_names[np.argmax(batch_prediction[0])])
code
128049433/cell_28
[ "text_plain_output_1.png" ]
from tensorflow.keras import models,layers import tensorflow as tf IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) def get_dataset(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=8) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return (train_ds, val_ds, test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) resize_and_rescale = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.Rescaling(1.0 / 255)]) data_augmentation = tf.keras.Sequential([layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'), layers.experimental.preprocessing.RandomRotation(0.3)]) n_classes = 4 input_shape = (BATCH_SIZE, 256, 256, 3) model = models.Sequential([resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(128, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(256, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(n_classes, activation='softmax')]) model.build(input_shape=input_shape) model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) history = model.fit(train_ds, epochs=60, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds) history.params
code
128049433/cell_15
[ "text_plain_output_1.png" ]
def get_dataset(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=8) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return (train_ds, val_ds, test_ds) print('Length of Training Dataset is', len(train_ds)) print('\nLength of Validation Dataset is', len(val_ds)) print('\nLength of Testing Dataset is', len(test_ds))
code
128049433/cell_10
[ "text_plain_output_1.png" ]
import tensorflow as tf IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) class_names = dataset.class_names class_names len(dataset)
code
128049433/cell_27
[ "text_plain_output_1.png" ]
from tensorflow.keras import models,layers import tensorflow as tf IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) def get_dataset(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=8) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return (train_ds, val_ds, test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) resize_and_rescale = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.Rescaling(1.0 / 255)]) data_augmentation = tf.keras.Sequential([layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'), layers.experimental.preprocessing.RandomRotation(0.3)]) n_classes = 4 input_shape = (BATCH_SIZE, 256, 256, 3) model = models.Sequential([resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(128, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(256, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(n_classes, activation='softmax')]) model.build(input_shape=input_shape) model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) history = model.fit(train_ds, epochs=60, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds) history
code
128028409/cell_13
[ "text_plain_output_1.png" ]
import pygad import random target_url = 'https://filebox.ece.vt.edu/~mhsiao/ISCAS89/s27.bench' import urllib.request outputs = [] inputs = [] for line in urllib.request.urlopen(target_url): l = line.decode('utf-8') if '=' in l: parse = l.split(' = ') outputs.append(parse[0]) inputs.append(parse[1].split('(')[1].split(')')[0].split(', ')) n = len(outputs) edges = [] for i in range(n): out = outputs[i] for j in range(n): if out in inputs[j]: edges.append([i + 1, j + 1]) nodesLen = n edgesLen = len(edges) def IC(chromosome): zeros = 0 ones = 0 for i in chromosome: if i == '0': zeros += 1 else: ones += 1 return abs(zeros - ones) IC('0000000111111111111') def PC(chromosome, edges): partition0 = [] partition1 = [] for i in range(len(chromosome)): if chromosome[i] == '0': partition0.append(i + 1) else: partition1.append(i + 1) pc = 0 for i in edges: x, y = (i[0], i[1]) if x in partition0 and y in partition1 or (x in partition1 and y in partition0): pc += 1 return pc a = 100 b = 10 edgesTemp = [[1, 2], [1, 3], [2, 4], [3, 6], [4, 5], [5, 6]] def fitness(ga_instance, chromosome, chromosomeIdx): ic = IC(chromosome) pc = PC(chromosome, edges) cost = a * pc + b * ic * ic return 1.0 / cost def on_generation(ga_instance): pass import random def getInitialPopulation(chromosomeLen, chromosomesReq): population = [] for i in range(chromosomesReq): chromosome = [] for j in range(chromosomeLen): chromosome.append(random.randint(0, 1)) population.append(chromosome) return population num_generations = 50 num_parents_mating = 2 init_range_low = 0 init_range_high = 2 initial_pop = getInitialPopulation(nodesLen, 20) ga_instance = pygad.GA(num_generations=num_generations, num_parents_mating=num_parents_mating, fitness_func=fitness, sol_per_pop=10, num_genes=nodesLen, gene_type=int, init_range_low=init_range_low, init_range_high=init_range_high, gene_space=[0, 1], initial_population=initial_pop, parent_selection_type='sss', K_tournament=3, crossover_type='scattered', crossover_probability=0.6, mutation_type='random', mutation_probability=0.1, save_best_solutions=True, on_generation=on_generation) ga_instance.run()
code
128028409/cell_4
[ "text_plain_output_1.png" ]
target_url = 'https://filebox.ece.vt.edu/~mhsiao/ISCAS89/s27.bench' import urllib.request outputs = [] inputs = [] for line in urllib.request.urlopen(target_url): l = line.decode('utf-8') if '=' in l: parse = l.split(' = ') outputs.append(parse[0]) inputs.append(parse[1].split('(')[1].split(')')[0].split(', ')) n = len(outputs) edges = [] for i in range(n): out = outputs[i] for j in range(n): if out in inputs[j]: edges.append([i + 1, j + 1]) nodesLen = n edgesLen = len(edges) print(nodesLen, edgesLen) print(edges)
code
128028409/cell_6
[ "text_plain_output_1.png" ]
target_url = 'https://filebox.ece.vt.edu/~mhsiao/ISCAS89/s27.bench' import urllib.request outputs = [] inputs = [] for line in urllib.request.urlopen(target_url): l = line.decode('utf-8') if '=' in l: parse = l.split(' = ') outputs.append(parse[0]) inputs.append(parse[1].split('(')[1].split(')')[0].split(', ')) n = len(outputs) edges = [] for i in range(n): out = outputs[i] for j in range(n): if out in inputs[j]: edges.append([i + 1, j + 1]) def PC(chromosome, edges): partition0 = [] partition1 = [] for i in range(len(chromosome)): if chromosome[i] == '0': partition0.append(i + 1) else: partition1.append(i + 1) pc = 0 for i in edges: x, y = (i[0], i[1]) if x in partition0 and y in partition1 or (x in partition1 and y in partition0): pc += 1 return pc print(PC('000111', [[1, 2], [1, 3], [2, 4], [3, 6], [4, 5], [5, 6]]))
code
128028409/cell_2
[ "text_plain_output_1.png" ]
target_url = 'https://filebox.ece.vt.edu/~mhsiao/ISCAS89/s27.bench' import urllib.request outputs = [] inputs = [] for line in urllib.request.urlopen(target_url): l = line.decode('utf-8') if '=' in l: parse = l.split(' = ') outputs.append(parse[0]) inputs.append(parse[1].split('(')[1].split(')')[0].split(', ')) print('node\toutputs', '\t', 'inputs\n') for i in range(len(outputs)): print(i + 1, '\t', outputs[i], '\t\t', inputs[i])
code
128028409/cell_8
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
!pip install pygad import pygad
code
128028409/cell_15
[ "text_plain_output_1.png" ]
import pygad import random target_url = 'https://filebox.ece.vt.edu/~mhsiao/ISCAS89/s27.bench' import urllib.request outputs = [] inputs = [] for line in urllib.request.urlopen(target_url): l = line.decode('utf-8') if '=' in l: parse = l.split(' = ') outputs.append(parse[0]) inputs.append(parse[1].split('(')[1].split(')')[0].split(', ')) n = len(outputs) edges = [] for i in range(n): out = outputs[i] for j in range(n): if out in inputs[j]: edges.append([i + 1, j + 1]) nodesLen = n edgesLen = len(edges) def IC(chromosome): zeros = 0 ones = 0 for i in chromosome: if i == '0': zeros += 1 else: ones += 1 return abs(zeros - ones) IC('0000000111111111111') def PC(chromosome, edges): partition0 = [] partition1 = [] for i in range(len(chromosome)): if chromosome[i] == '0': partition0.append(i + 1) else: partition1.append(i + 1) pc = 0 for i in edges: x, y = (i[0], i[1]) if x in partition0 and y in partition1 or (x in partition1 and y in partition0): pc += 1 return pc a = 100 b = 10 edgesTemp = [[1, 2], [1, 3], [2, 4], [3, 6], [4, 5], [5, 6]] def fitness(ga_instance, chromosome, chromosomeIdx): ic = IC(chromosome) pc = PC(chromosome, edges) cost = a * pc + b * ic * ic return 1.0 / cost def on_generation(ga_instance): pass import random def getInitialPopulation(chromosomeLen, chromosomesReq): population = [] for i in range(chromosomesReq): chromosome = [] for j in range(chromosomeLen): chromosome.append(random.randint(0, 1)) population.append(chromosome) return population num_generations = 50 num_parents_mating = 2 init_range_low = 0 init_range_high = 2 initial_pop = getInitialPopulation(nodesLen, 20) ga_instance = pygad.GA(num_generations=num_generations, num_parents_mating=num_parents_mating, fitness_func=fitness, sol_per_pop=10, num_genes=nodesLen, gene_type=int, init_range_low=init_range_low, init_range_high=init_range_high, gene_space=[0, 1], initial_population=initial_pop, parent_selection_type='sss', K_tournament=3, crossover_type='scattered', crossover_probability=0.6, mutation_type='random', mutation_probability=0.1, save_best_solutions=True, on_generation=on_generation) ga_instance.run() solution, solution_fitness, solution_idx = ga_instance.best_solution() print('Parameters of the best solution : {solution}'.format(solution=solution)) print('Fitness value of the best solution = {solution_fitness}'.format(solution_fitness=solution_fitness)) print(solution_idx)
code
128028409/cell_16
[ "text_plain_output_1.png" ]
from tqdm import tqdm target_url = 'https://filebox.ece.vt.edu/~mhsiao/ISCAS89/s27.bench' import urllib.request outputs = [] inputs = [] for line in urllib.request.urlopen(target_url): l = line.decode('utf-8') if '=' in l: parse = l.split(' = ') outputs.append(parse[0]) inputs.append(parse[1].split('(')[1].split(')')[0].split(', ')) n = len(outputs) edges = [] for i in range(n): out = outputs[i] for j in range(n): if out in inputs[j]: edges.append([i + 1, j + 1]) nodesLen = n edgesLen = len(edges) def IC(chromosome): zeros = 0 ones = 0 for i in chromosome: if i == '0': zeros += 1 else: ones += 1 return abs(zeros - ones) IC('0000000111111111111') def PC(chromosome, edges): partition0 = [] partition1 = [] for i in range(len(chromosome)): if chromosome[i] == '0': partition0.append(i + 1) else: partition1.append(i + 1) pc = 0 for i in edges: x, y = (i[0], i[1]) if x in partition0 and y in partition1 or (x in partition1 and y in partition0): pc += 1 return pc a = 100 b = 10 edgesTemp = [[1, 2], [1, 3], [2, 4], [3, 6], [4, 5], [5, 6]] def fitness(ga_instance, chromosome, chromosomeIdx): ic = IC(chromosome) pc = PC(chromosome, edges) cost = a * pc + b * ic * ic return 1.0 / cost from tqdm import tqdm maxFitness = 0 res = '' for i in tqdm(range(1 << nodesLen)): s = '' for j in range(nodesLen): if i & 1 << j == 0: s += '0' else: s += '1' val = fitness(0, s, 0) if val > maxFitness: maxFitness = val res = s print(f'{res}<------------>{maxFitness}')
code
128028409/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
target_url = 'https://filebox.ece.vt.edu/~mhsiao/ISCAS89/s27.bench' import urllib.request outputs = [] inputs = [] for line in urllib.request.urlopen(target_url): l = line.decode('utf-8') if '=' in l: parse = l.split(' = ') outputs.append(parse[0]) inputs.append(parse[1].split('(')[1].split(')')[0].split(', ')) print('edges\n') n = len(outputs) edges = [] for i in range(n): out = outputs[i] for j in range(n): if out in inputs[j]: edges.append([i + 1, j + 1]) print(i + 1, '\t', j + 1)
code
128028409/cell_14
[ "text_plain_output_1.png" ]
import pygad import random target_url = 'https://filebox.ece.vt.edu/~mhsiao/ISCAS89/s27.bench' import urllib.request outputs = [] inputs = [] for line in urllib.request.urlopen(target_url): l = line.decode('utf-8') if '=' in l: parse = l.split(' = ') outputs.append(parse[0]) inputs.append(parse[1].split('(')[1].split(')')[0].split(', ')) n = len(outputs) edges = [] for i in range(n): out = outputs[i] for j in range(n): if out in inputs[j]: edges.append([i + 1, j + 1]) nodesLen = n edgesLen = len(edges) def IC(chromosome): zeros = 0 ones = 0 for i in chromosome: if i == '0': zeros += 1 else: ones += 1 return abs(zeros - ones) IC('0000000111111111111') def PC(chromosome, edges): partition0 = [] partition1 = [] for i in range(len(chromosome)): if chromosome[i] == '0': partition0.append(i + 1) else: partition1.append(i + 1) pc = 0 for i in edges: x, y = (i[0], i[1]) if x in partition0 and y in partition1 or (x in partition1 and y in partition0): pc += 1 return pc a = 100 b = 10 edgesTemp = [[1, 2], [1, 3], [2, 4], [3, 6], [4, 5], [5, 6]] def fitness(ga_instance, chromosome, chromosomeIdx): ic = IC(chromosome) pc = PC(chromosome, edges) cost = a * pc + b * ic * ic return 1.0 / cost def on_generation(ga_instance): pass import random def getInitialPopulation(chromosomeLen, chromosomesReq): population = [] for i in range(chromosomesReq): chromosome = [] for j in range(chromosomeLen): chromosome.append(random.randint(0, 1)) population.append(chromosome) return population num_generations = 50 num_parents_mating = 2 init_range_low = 0 init_range_high = 2 initial_pop = getInitialPopulation(nodesLen, 20) ga_instance = pygad.GA(num_generations=num_generations, num_parents_mating=num_parents_mating, fitness_func=fitness, sol_per_pop=10, num_genes=nodesLen, gene_type=int, init_range_low=init_range_low, init_range_high=init_range_high, gene_space=[0, 1], initial_population=initial_pop, parent_selection_type='sss', K_tournament=3, crossover_type='scattered', crossover_probability=0.6, mutation_type='random', mutation_probability=0.1, save_best_solutions=True, on_generation=on_generation) ga_instance.run() ga_instance.plot_fitness()
code
128028409/cell_12
[ "text_plain_output_1.png" ]
import pygad import random target_url = 'https://filebox.ece.vt.edu/~mhsiao/ISCAS89/s27.bench' import urllib.request outputs = [] inputs = [] for line in urllib.request.urlopen(target_url): l = line.decode('utf-8') if '=' in l: parse = l.split(' = ') outputs.append(parse[0]) inputs.append(parse[1].split('(')[1].split(')')[0].split(', ')) n = len(outputs) edges = [] for i in range(n): out = outputs[i] for j in range(n): if out in inputs[j]: edges.append([i + 1, j + 1]) nodesLen = n edgesLen = len(edges) def IC(chromosome): zeros = 0 ones = 0 for i in chromosome: if i == '0': zeros += 1 else: ones += 1 return abs(zeros - ones) IC('0000000111111111111') def PC(chromosome, edges): partition0 = [] partition1 = [] for i in range(len(chromosome)): if chromosome[i] == '0': partition0.append(i + 1) else: partition1.append(i + 1) pc = 0 for i in edges: x, y = (i[0], i[1]) if x in partition0 and y in partition1 or (x in partition1 and y in partition0): pc += 1 return pc a = 100 b = 10 edgesTemp = [[1, 2], [1, 3], [2, 4], [3, 6], [4, 5], [5, 6]] def fitness(ga_instance, chromosome, chromosomeIdx): ic = IC(chromosome) pc = PC(chromosome, edges) cost = a * pc + b * ic * ic return 1.0 / cost def on_generation(ga_instance): pass import random def getInitialPopulation(chromosomeLen, chromosomesReq): population = [] for i in range(chromosomesReq): chromosome = [] for j in range(chromosomeLen): chromosome.append(random.randint(0, 1)) population.append(chromosome) return population num_generations = 50 num_parents_mating = 2 init_range_low = 0 init_range_high = 2 initial_pop = getInitialPopulation(nodesLen, 20) ga_instance = pygad.GA(num_generations=num_generations, num_parents_mating=num_parents_mating, fitness_func=fitness, sol_per_pop=10, num_genes=nodesLen, gene_type=int, init_range_low=init_range_low, init_range_high=init_range_high, gene_space=[0, 1], initial_population=initial_pop, parent_selection_type='sss', K_tournament=3, crossover_type='scattered', crossover_probability=0.6, mutation_type='random', mutation_probability=0.1, save_best_solutions=True, on_generation=on_generation)
code
128028409/cell_5
[ "image_output_2.png", "image_output_1.png" ]
def IC(chromosome): zeros = 0 ones = 0 for i in chromosome: if i == '0': zeros += 1 else: ones += 1 return abs(zeros - ones) IC('0000000111111111111')
code
17102411/cell_13
[ "text_plain_output_1.png" ]
data.shape data.isnull().sum() data.address[1] data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1) data['cost'] = data['cost'].replace(',', '', regex=True) data['rest_type'].value_counts()
code
17102411/cell_9
[ "text_plain_output_1.png" ]
data.shape data.isnull().sum() data.address[1]
code
17102411/cell_4
[ "text_html_output_1.png" ]
data.shape data.info()
code
17102411/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd data.shape data.isnull().sum() data.address[1] data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1) data['cost'] = data['cost'].replace(',', '', regex=True) data[['votes', 'cost']] = data[['votes', 'cost']].apply(pd.to_numeric) grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata.shape newdata.index = newdata['name'] pd.DataFrame(newdata.groupby(['cuisines'])['cuisines'].agg(['count']).sort_values('count', ascending=False)).head(10)
code
17102411/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd data.shape data.isnull().sum() data.address[1] data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1) data['cost'] = data['cost'].replace(',', '', regex=True) data[['votes', 'cost']] = data[['votes', 'cost']].apply(pd.to_numeric) grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata.shape newdata.describe(include='all')
code
17102411/cell_6
[ "text_html_output_1.png" ]
data.shape data.tail(3)
code
17102411/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd data.shape data.isnull().sum() data.address[1] data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1) data['cost'] = data['cost'].replace(',', '', regex=True) data[['votes', 'cost']] = data[['votes', 'cost']].apply(pd.to_numeric) grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata.shape newdata.index = newdata['name'] newdata.drop(['name', 'url', 'phone', 'listed_in(city)', 'listed_in(type)_x', 'address', 'dish_liked', 'listed_in(type)_y', 'menu_item', 'cuisines', 'reviews_list'], axis=1, inplace=True) newdata.head(3)
code
17102411/cell_7
[ "text_html_output_1.png" ]
data.shape data['menu_item'].value_counts()
code
17102411/cell_18
[ "text_html_output_1.png" ]
import pandas as pd data.shape data.isnull().sum() data.address[1] data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1) data['cost'] = data['cost'].replace(',', '', regex=True) data[['votes', 'cost']] = data[['votes', 'cost']].apply(pd.to_numeric) grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata.shape
code
17102411/cell_8
[ "text_html_output_1.png" ]
data.shape data.isnull().sum()
code
17102411/cell_16
[ "text_html_output_1.png" ]
import pandas as pd data.shape data.isnull().sum() data.address[1] data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1) data['cost'] = data['cost'].replace(',', '', regex=True) data[['votes', 'cost']] = data[['votes', 'cost']].apply(pd.to_numeric) grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata.head(3)
code
17102411/cell_3
[ "text_plain_output_1.png" ]
data.shape
code
17102411/cell_12
[ "text_plain_output_1.png" ]
data.shape data.isnull().sum() data.address[1] data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1) data['cost'] = data['cost'].replace(',', '', regex=True) data['listed_in(type)'].value_counts()
code
17102411/cell_5
[ "text_plain_output_1.png" ]
data.shape data.head(3)
code
16133275/cell_9
[ "text_plain_output_1.png" ]
from IPython.display import Image from sklearn.model_selection import train_test_split from tensorflow.keras import layers import cv2 import matplotlib.pyplot as plt import numpy as np import os import random import tensorflow as tf import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) X_data = [] for file in onlyfiles: face = cv2.imread('../input/utkface_aligned_cropped/UTKFace/' + file, cv2.IMREAD_COLOR) face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (32, 32)) X_data.append(face) X_data = np.array(X_data) X_data.shape X = np.squeeze(X_data) (plt.xticks([]), plt.yticks([])) X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33) y_train = [y_train[:, 1], y_train[:, 0]] y_valid = [y_valid[:, 1], y_valid[:, 0]] def gen_model(): inputs = tf.keras.layers.Input(shape=(32, 32, 3)) x = inputs x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(84, 3, activation='relu')(x) x = layers.Dropout(0.3)(x) x = layers.Flatten()(x) x1 = layers.Dense(64, activation='relu')(x) x2 = layers.Dense(64, activation='relu')(x) x1 = layers.Dense(1, activation='sigmoid', name='sex_out')(x1) x2 = layers.Dense(1, activation='relu', name='age_out')(x2) model = tf.keras.models.Model(inputs=inputs, outputs=[x1, x2]) model.compile(optimizer='Adam', loss=['binary_crossentropy', 'mae']) return model model = gen_model() Image('model.png') import random random_id = random.random() model.summary() callbacks = [tf.keras.callbacks.EarlyStopping(patience=75, monitor='val_loss', restore_best_weights=True), tf.keras.callbacks.TensorBoard(log_dir='./logs/' + str(random_id))] model.fit(X_train, y_train, epochs=2000, batch_size=240, validation_data=(X_valid, y_valid), callbacks=callbacks, shuffle=True) model.evaluate(X_valid, y_valid)
code
16133275/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import numpy as np import os import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) X_data = [] for file in onlyfiles: face = cv2.imread('../input/utkface_aligned_cropped/UTKFace/' + file, cv2.IMREAD_COLOR) face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (32, 32)) X_data.append(face) X_data = np.array(X_data) X_data.shape
code
16133275/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split print(os.listdir('../input/utkface_aligned_cropped/'))
code
16133275/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from IPython.display import Image from tensorflow.keras import layers import tensorflow as tf def gen_model(): inputs = tf.keras.layers.Input(shape=(32, 32, 3)) x = inputs x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(84, 3, activation='relu')(x) x = layers.Dropout(0.3)(x) x = layers.Flatten()(x) x1 = layers.Dense(64, activation='relu')(x) x2 = layers.Dense(64, activation='relu')(x) x1 = layers.Dense(1, activation='sigmoid', name='sex_out')(x1) x2 = layers.Dense(1, activation='relu', name='age_out')(x2) model = tf.keras.models.Model(inputs=inputs, outputs=[x1, x2]) model.compile(optimizer='Adam', loss=['binary_crossentropy', 'mae']) tf.keras.utils.plot_model(model, 'model.png', show_shapes=True) return model model = gen_model() Image('model.png')
code
16133275/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from IPython.display import Image from sklearn.model_selection import train_test_split from tensorflow.keras import layers import cv2 import matplotlib.pyplot as plt import numpy as np import os import random import tensorflow as tf import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) X_data = [] for file in onlyfiles: face = cv2.imread('../input/utkface_aligned_cropped/UTKFace/' + file, cv2.IMREAD_COLOR) face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (32, 32)) X_data.append(face) X_data = np.array(X_data) X_data.shape X = np.squeeze(X_data) (plt.xticks([]), plt.yticks([])) X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33) y_train = [y_train[:, 1], y_train[:, 0]] y_valid = [y_valid[:, 1], y_valid[:, 0]] def gen_model(): inputs = tf.keras.layers.Input(shape=(32, 32, 3)) x = inputs x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(84, 3, activation='relu')(x) x = layers.Dropout(0.3)(x) x = layers.Flatten()(x) x1 = layers.Dense(64, activation='relu')(x) x2 = layers.Dense(64, activation='relu')(x) x1 = layers.Dense(1, activation='sigmoid', name='sex_out')(x1) x2 = layers.Dense(1, activation='relu', name='age_out')(x2) model = tf.keras.models.Model(inputs=inputs, outputs=[x1, x2]) model.compile(optimizer='Adam', loss=['binary_crossentropy', 'mae']) return model model = gen_model() Image('model.png') import random random_id = random.random() model.summary() callbacks = [tf.keras.callbacks.EarlyStopping(patience=75, monitor='val_loss', restore_best_weights=True), tf.keras.callbacks.TensorBoard(log_dir='./logs/' + str(random_id))] model.fit(X_train, y_train, epochs=2000, batch_size=240, validation_data=(X_valid, y_valid), callbacks=callbacks, shuffle=True)
code
16133275/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import os import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) print(y.shape) print(y[0])
code
16133275/cell_10
[ "text_plain_output_1.png" ]
from IPython.display import Image from sklearn.model_selection import train_test_split from tensorflow.keras import layers import cv2 import matplotlib.pyplot as plt import numpy as np import os import random import tensorflow as tf import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) X_data = [] for file in onlyfiles: face = cv2.imread('../input/utkface_aligned_cropped/UTKFace/' + file, cv2.IMREAD_COLOR) face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (32, 32)) X_data.append(face) X_data = np.array(X_data) X_data.shape X = np.squeeze(X_data) (plt.xticks([]), plt.yticks([])) X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33) y_train = [y_train[:, 1], y_train[:, 0]] y_valid = [y_valid[:, 1], y_valid[:, 0]] def gen_model(): inputs = tf.keras.layers.Input(shape=(32, 32, 3)) x = inputs x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.Conv2D(64, 3, activation='relu')(x) x = layers.MaxPool2D(2)(x) x = layers.Dropout(0.3)(x) x = layers.Conv2D(84, 3, activation='relu')(x) x = layers.Dropout(0.3)(x) x = layers.Flatten()(x) x1 = layers.Dense(64, activation='relu')(x) x2 = layers.Dense(64, activation='relu')(x) x1 = layers.Dense(1, activation='sigmoid', name='sex_out')(x1) x2 = layers.Dense(1, activation='relu', name='age_out')(x2) model = tf.keras.models.Model(inputs=inputs, outputs=[x1, x2]) model.compile(optimizer='Adam', loss=['binary_crossentropy', 'mae']) return model model = gen_model() Image('model.png') import random random_id = random.random() model.summary() callbacks = [tf.keras.callbacks.EarlyStopping(patience=75, monitor='val_loss', restore_best_weights=True), tf.keras.callbacks.TensorBoard(log_dir='./logs/' + str(random_id))] model.fit(X_train, y_train, epochs=2000, batch_size=240, validation_data=(X_valid, y_valid), callbacks=callbacks, shuffle=True) model.evaluate(X_valid, y_valid) print(y_valid[0][1], y_valid[1][1]) print(model.predict([[X_valid[1]]]))
code
16133275/cell_5
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np import os import numpy as np import pandas as pd import cv2 from IPython.display import Image import matplotlib.pyplot as plt import os import tensorflow as tf from tensorflow.keras import layers from sklearn.model_selection import train_test_split onlyfiles = os.listdir('../input/utkface_aligned_cropped/UTKFace') y = np.array([[[i.split('_')[0]], [i.split('_')[1]]] for i in onlyfiles]) X_data = [] for file in onlyfiles: face = cv2.imread('../input/utkface_aligned_cropped/UTKFace/' + file, cv2.IMREAD_COLOR) face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (32, 32)) X_data.append(face) X_data = np.array(X_data) X_data.shape X = np.squeeze(X_data) plt.imshow(X[1], interpolation='bicubic') (plt.xticks([]), plt.yticks([])) plt.show() print(y[1])
code
50230470/cell_4
[ "text_plain_output_1.png" ]
import random def random_agent(observation, configuration): return random.randrange(configuration.banditCount)
code
50230470/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from kaggle_environments import make from kaggle_environments import make env = make('n-arm-bandit', debug=True) env.reset() env.run(['random_agent.py', 'my-sub-file.py']) env.reset() env.run(['my-sub-file.py', 'my-sub-file.py']) env.render(mode='ipython', width=800, height=700)
code
50230470/cell_2
[ "text_plain_output_1.png" ]
!pip install kaggle-environments --upgrade
code
50230470/cell_3
[ "text_plain_output_1.png" ]
import json import numpy as np import pandas as pd basic_state = None reward_full = 0 step_ending = None def basic_mab(measurement, structure): no_reward_step = 0.01 decay_rate = 0.99 global basic_state, reward_full, step_ending if measurement.step == 0: basic_state = [[1, 1] for i in range(structure['banditCount'])] else: reward_final = measurement['given_reward'] - reward_full reward_full = measurement['given_reward'] player = int(step_ending == measurement.lastActions[1]) if reward_final > 0: basic_state[measurement.lastActions[player]][0] += reward_final else: basic_state[measurement.lastActions[player]][1] += no_reward_step basic_state[measurement.lastActions[0]][0] = (basic_state[measurement.lastActions[0]][0] - 1) * decay_rate + 1 basic_state[measurement.lastActions[1]][0] = (basic_state[measurement.lastActions[1]][0] - 1) * decay_rate + 1 best_proba = -1 agent_optimal = None for k in range(structure['banditCount']): proba = np.random.beta(basic_state[k][0], basic_state[k][1]) if proba > best_proba: best_proba = proba agent_optimal = k step_ending = agent_optimal return agent_optimal
code
50230470/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from kaggle_environments import make from kaggle_environments import make env = make('n-arm-bandit', debug=True) env.reset() env.run(['random_agent.py', 'my-sub-file.py']) env.render(mode='ipython', width=800, height=700)
code
17124444/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes.csv') data.head()
code
17124444/cell_6
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes.csv') y = data['Outcome'] feature_data = data.drop('Outcome', axis=1) scaled_features = StandardScaler().fit_transform(feature_data.values) scaled_data = pd.DataFrame(scaled_features, index=feature_data.index, columns=feature_data.columns) scaled_data.head()
code
17124444/cell_11
[ "text_html_output_1.png" ]
import operator dic = {} dic = {1: 1.2, 2: 1.56, 3: 5.2, 6: 7.1, 4: 2.7} sorted(dic.items(), key=lambda item: item[1], reverse=True)
code
17124444/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17124444/cell_7
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes.csv') y = data['Outcome'] feature_data = data.drop('Outcome', axis=1) scaled_features = StandardScaler().fit_transform(feature_data.values) scaled_data = pd.DataFrame(scaled_features, index=feature_data.index, columns=feature_data.columns) train_scaled = pd.concat([scaled_data, y], axis=1) train_scaled.head()
code
17124444/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/diabetes.csv') y = data['Outcome'] feature_data = data.drop('Outcome', axis=1) correlation = data.corr() plt.figure(figsize=(10, 10)) sns.heatmap(correlation, annot=True) plt.title('Correlation between different fearures')
code
17124444/cell_14
[ "text_html_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes.csv') y = data['Outcome'] feature_data = data.drop('Outcome', axis=1) scaled_features = StandardScaler().fit_transform(feature_data.values) scaled_data = pd.DataFrame(scaled_features, index=feature_data.index, columns=feature_data.columns) train_scaled = pd.concat([scaled_data, y], axis=1) def EuclideanD(d1, d2, length): distance = 0 for l in range(length): distance += np.square(d1[l] - d2[l]) return np.sqrt(distance) def KNN(train, test, k): result = [] length = test.shape[1] for y in range(len(test)): distances = {} sort = {} for x in range(len(train)): dist = EuclideanD(test.iloc[y], train.iloc[x], length) '\n basically we are iterating over the no. of features by calculating test.shape[1], and in EucliedeanD, we calculate distance \n for the data point by using Euclidean formula for the respective features of a data point and then return sigma(distance)\n ' distances[x] = dist sorted_d = sorted(distances.items(), key=lambda item: item[1]) neighbours = [] for x in range(k): neighbours.append(sorted_d[x][0]) majorityclassvotes = {} for i in range(len(neighbours)): response = train.iloc[neighbours[i]][-1] if response in majorityclassvotes: majorityclassvotes[response] += 1 else: majorityclassvotes[response] = 1 majorityvotesorted = sorted(majorityclassvotes.items(), key=lambda item: item[1], reverse=True) result.append(majorityvotesorted[0][0]) return result test_data = [[0.42, 0.8, 0.25, 0.7, -0.12, 0.65, 0.36, 1.9], [-0.3, 0.5, 0.7, 0.2, -0.34, 0.86, 0.56, 2.8]] test = pd.DataFrame(test_data) k = 3 result = KNN(train_scaled, test, k) from sklearn.neighbors import KNeighborsClassifier neigh = KNeighborsClassifier(n_neighbors=3) neigh.fit(train_scaled.iloc[:, 0:8], train_scaled['Outcome']) print(neigh.predict(test))
code
17124444/cell_12
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/diabetes.csv') y = data['Outcome'] feature_data = data.drop('Outcome', axis=1) scaled_features = StandardScaler().fit_transform(feature_data.values) scaled_data = pd.DataFrame(scaled_features, index=feature_data.index, columns=feature_data.columns) train_scaled = pd.concat([scaled_data, y], axis=1) def EuclideanD(d1, d2, length): distance = 0 for l in range(length): distance += np.square(d1[l] - d2[l]) return np.sqrt(distance) def KNN(train, test, k): result = [] length = test.shape[1] for y in range(len(test)): distances = {} sort = {} for x in range(len(train)): dist = EuclideanD(test.iloc[y], train.iloc[x], length) '\n basically we are iterating over the no. of features by calculating test.shape[1], and in EucliedeanD, we calculate distance \n for the data point by using Euclidean formula for the respective features of a data point and then return sigma(distance)\n ' distances[x] = dist sorted_d = sorted(distances.items(), key=lambda item: item[1]) neighbours = [] for x in range(k): neighbours.append(sorted_d[x][0]) majorityclassvotes = {} for i in range(len(neighbours)): response = train.iloc[neighbours[i]][-1] if response in majorityclassvotes: majorityclassvotes[response] += 1 else: majorityclassvotes[response] = 1 majorityvotesorted = sorted(majorityclassvotes.items(), key=lambda item: item[1], reverse=True) result.append(majorityvotesorted[0][0]) return result test_data = [[0.42, 0.8, 0.25, 0.7, -0.12, 0.65, 0.36, 1.9], [-0.3, 0.5, 0.7, 0.2, -0.34, 0.86, 0.56, 2.8]] test = pd.DataFrame(test_data) k = 3 result = KNN(train_scaled, test, k) print(result)
code
74054331/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/top-indian-colleges/College_data.csv') df.dtypes df.isnull().sum() import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(10, 5)) sns.barplot(x=df.columns, y=df.isnull().sum() / len(df)) plt.xticks(rotation=90) plt.show()
code
74054331/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/top-indian-colleges/College_data.csv') df.head()
code
74054331/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/top-indian-colleges/College_data.csv') df.dtypes df.isnull().sum() import seaborn as sns import matplotlib.pyplot as plt plt.xticks(rotation=90) df_plot = df.groupby(by=['State']).College_Name.nunique() plt.figure(figsize=(20, 7)) plt.xticks(rotation=90) sns.barplot(x=df_plot.index, y=df_plot)
code
74054331/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74054331/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/top-indian-colleges/College_data.csv') df.dtypes df.isnull().sum()
code
74054331/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/top-indian-colleges/College_data.csv') df.dtypes df.isnull().sum() import seaborn as sns import matplotlib.pyplot as plt plt.xticks(rotation=90) df_plot = df.groupby(by=['State']).College_Name.nunique() plt.xticks(rotation=90) g = df.groupby(by=['State', 'Stream']).College_Name.nunique() g = g.reset_index() g = g.rename(columns={'College_Name': 'Counts'}) sns.catplot(data=g, col='State', x='Stream', y='Counts', col_wrap=1, kind='bar', sharex=False, height=7, aspect=2)
code
74054331/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/top-indian-colleges/College_data.csv') df.dtypes
code
73084093/cell_21
[ "text_html_output_2.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_html_output_3.png" ]
from keras.layers import Dense from keras.layers import Dense, LSTM from keras.layers import Dropout from keras.layers import LSTM from keras.models import Sequential from keras.models import Sequential from pmdarima.arima import ADFTest from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import MinMaxScaler import keras import numpy as np import plotly.graph_objs as go import yfinance as yf import yfinance as yf stock_name = 'AMD' data = yf.download(stock_name, start='2020-03-26', end='2021-03-29') from sklearn.preprocessing import MinMaxScaler import math import matplotlib.pyplot as plt import keras import pandas as pd import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout from keras.layers import * from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from keras.callbacks import EarlyStopping from keras.models import Sequential from keras.layers import Dense, LSTM def lstm(stock_name, data): data = data.filter(['Close']) dataset = data.values training_data_len = int(np.ceil(len(dataset) * 0.8)) scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) train_data = scaled_data[0:int(training_data_len), :] x_train = [] y_train = [] for i in range(60, len(train_data)): x_train.append(train_data[i - 60:i, 0]) y_train.append(train_data[i, 0]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) model = Sequential() model.add(LSTM(128, return_sequences=True, input_shape=(x_train.shape[1], 1))) model.add(Dropout(0.35)) model.add(LSTM(64, return_sequences=False)) model.add(Dropout(0.3)) model.add(Dense(25, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=1, epochs=21) test_data = scaled_data[training_data_len - 60:, :] x_test = [] y_test = dataset[training_data_len:, :] for i in range(60, len(test_data)): x_test.append(test_data[i - 60:i, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) rmse = np.sqrt(np.mean((predictions - y_test) ** 2)) train = data[:training_data_len] valid = data[training_data_len:] train_gr = np.reshape(train, (203,)) train_gr = train_gr['Close'] valid_gr = np.reshape(valid, (50,)) valid_gr = valid_gr['Close'] preds_gr = np.reshape(predictions, (50,)) x_train = list(range(0, len(train_data))) x_valid = list(range(len(train_data) - 1, len(dataset))) fig = go.Figure() fig.add_trace(go.Scatter(x=x_train, y=train_gr, mode='lines+markers', marker=dict(size=4), name='train')) fig.add_trace(go.Scatter(x=x_valid, y=valid_gr, mode='lines+markers', marker=dict(size=4), name='valid')) fig.add_trace(go.Scatter(x=x_valid, y=preds_gr, mode='lines+markers', marker=dict(size=4), name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} LSTM data', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) data_new = yf.download(stock_name, start='2021-03-01', end='2021-04-30') data_new = data_new.filter(['Close']) dataset = data_new.values training_data_len = len(dataset) scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) test_data = scaled_data[training_data_len - len(data_new):, :] x_test = [] y_test = dataset[training_data_len:, :] for i in range(20, len(test_data)): x_test.append(test_data[i - 20:i, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) hist_data_new = yf.download(stock_name, start='2021-04-01', end='2021-05-04') hist_data_new = hist_data_new.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) hist_data_new = hist_data_new['Close'] hist_data_new = np.array(hist_data_new) pred_lstm = model.predict(x_test) pred_lstm = pred_lstm[:-1] pred_lstm = scaler.inverse_transform(pred_lstm) preds_gr = np.reshape(pred_lstm, (22,)) fig = go.Figure() fig.add_trace(go.Scatter(x=list(range(0, 21)), y=hist_data_new, mode='lines+markers', name='historical')) fig.add_trace(go.Scatter(x=list(range(0, 21)), y=preds_gr, mode='lines+markers', name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} LSTM prediction', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) return (pred_lstm, rmse) data_adf = data.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) data_adf = data_adf['Close'] from pmdarima.arima import ADFTest adf_test = ADFTest(alpha=0.05) adf_test.should_diff(data_adf)
code
73084093/cell_6
[ "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_1.png" ]
!pip install yfinance --quiet !pip install pmdarima --quiet
code
73084093/cell_8
[ "text_html_output_1.png", "text_plain_output_1.png" ]
!pip install statsmodels==0.11.0rc1 --quiet !pip install -Iv pulp==1.6.8 --quiet
code
73084093/cell_16
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dense, LSTM from keras.layers import Dropout from keras.layers import LSTM from keras.models import Sequential from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import MinMaxScaler import keras import numpy as np import plotly.graph_objs as go import yfinance as yf import yfinance as yf stock_name = 'AMD' data = yf.download(stock_name, start='2020-03-26', end='2021-03-29') from sklearn.preprocessing import MinMaxScaler import math import matplotlib.pyplot as plt import keras import pandas as pd import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout from keras.layers import * from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from keras.callbacks import EarlyStopping from keras.models import Sequential from keras.layers import Dense, LSTM def lstm(stock_name, data): data = data.filter(['Close']) dataset = data.values training_data_len = int(np.ceil(len(dataset) * 0.8)) scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) train_data = scaled_data[0:int(training_data_len), :] x_train = [] y_train = [] for i in range(60, len(train_data)): x_train.append(train_data[i - 60:i, 0]) y_train.append(train_data[i, 0]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) model = Sequential() model.add(LSTM(128, return_sequences=True, input_shape=(x_train.shape[1], 1))) model.add(Dropout(0.35)) model.add(LSTM(64, return_sequences=False)) model.add(Dropout(0.3)) model.add(Dense(25, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=1, epochs=21) test_data = scaled_data[training_data_len - 60:, :] x_test = [] y_test = dataset[training_data_len:, :] for i in range(60, len(test_data)): x_test.append(test_data[i - 60:i, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) rmse = np.sqrt(np.mean((predictions - y_test) ** 2)) train = data[:training_data_len] valid = data[training_data_len:] train_gr = np.reshape(train, (203,)) train_gr = train_gr['Close'] valid_gr = np.reshape(valid, (50,)) valid_gr = valid_gr['Close'] preds_gr = np.reshape(predictions, (50,)) x_train = list(range(0, len(train_data))) x_valid = list(range(len(train_data) - 1, len(dataset))) fig = go.Figure() fig.add_trace(go.Scatter(x=x_train, y=train_gr, mode='lines+markers', marker=dict(size=4), name='train')) fig.add_trace(go.Scatter(x=x_valid, y=valid_gr, mode='lines+markers', marker=dict(size=4), name='valid')) fig.add_trace(go.Scatter(x=x_valid, y=preds_gr, mode='lines+markers', marker=dict(size=4), name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} LSTM data', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) data_new = yf.download(stock_name, start='2021-03-01', end='2021-04-30') data_new = data_new.filter(['Close']) dataset = data_new.values training_data_len = len(dataset) scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) test_data = scaled_data[training_data_len - len(data_new):, :] x_test = [] y_test = dataset[training_data_len:, :] for i in range(20, len(test_data)): x_test.append(test_data[i - 20:i, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) hist_data_new = yf.download(stock_name, start='2021-04-01', end='2021-05-04') hist_data_new = hist_data_new.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) hist_data_new = hist_data_new['Close'] hist_data_new = np.array(hist_data_new) pred_lstm = model.predict(x_test) pred_lstm = pred_lstm[:-1] pred_lstm = scaler.inverse_transform(pred_lstm) preds_gr = np.reshape(pred_lstm, (22,)) fig = go.Figure() fig.add_trace(go.Scatter(x=list(range(0, 21)), y=hist_data_new, mode='lines+markers', name='historical')) fig.add_trace(go.Scatter(x=list(range(0, 21)), y=preds_gr, mode='lines+markers', name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} LSTM prediction', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) return (pred_lstm, rmse) lstm_pred, lstm_rmse = lstm(stock_name, data)
code
73084093/cell_17
[ "text_plain_output_1.png" ]
print(lstm_pred.shape)
code
73084093/cell_24
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dense, LSTM from keras.layers import Dropout from keras.layers import LSTM from keras.models import Sequential from keras.models import Sequential from pmdarima.arima import ADFTest from pmdarima.arima import ADFTest from pylab import rcParams from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import MinMaxScaler import keras import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import plotly.graph_objs as go import pmdarima as pm import warnings import yfinance as yf import yfinance as yf stock_name = 'AMD' data = yf.download(stock_name, start='2020-03-26', end='2021-03-29') from sklearn.preprocessing import MinMaxScaler import math import matplotlib.pyplot as plt import keras import pandas as pd import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout from keras.layers import * from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from keras.callbacks import EarlyStopping from keras.models import Sequential from keras.layers import Dense, LSTM def lstm(stock_name, data): data = data.filter(['Close']) dataset = data.values training_data_len = int(np.ceil(len(dataset) * 0.8)) scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) train_data = scaled_data[0:int(training_data_len), :] x_train = [] y_train = [] for i in range(60, len(train_data)): x_train.append(train_data[i - 60:i, 0]) y_train.append(train_data[i, 0]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) model = Sequential() model.add(LSTM(128, return_sequences=True, input_shape=(x_train.shape[1], 1))) model.add(Dropout(0.35)) model.add(LSTM(64, return_sequences=False)) model.add(Dropout(0.3)) model.add(Dense(25, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=1, epochs=21) test_data = scaled_data[training_data_len - 60:, :] x_test = [] y_test = dataset[training_data_len:, :] for i in range(60, len(test_data)): x_test.append(test_data[i - 60:i, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) rmse = np.sqrt(np.mean((predictions - y_test) ** 2)) train = data[:training_data_len] valid = data[training_data_len:] train_gr = np.reshape(train, (203,)) train_gr = train_gr['Close'] valid_gr = np.reshape(valid, (50,)) valid_gr = valid_gr['Close'] preds_gr = np.reshape(predictions, (50,)) x_train = list(range(0, len(train_data))) x_valid = list(range(len(train_data) - 1, len(dataset))) fig = go.Figure() fig.add_trace(go.Scatter(x=x_train, y=train_gr, mode='lines+markers', marker=dict(size=4), name='train')) fig.add_trace(go.Scatter(x=x_valid, y=valid_gr, mode='lines+markers', marker=dict(size=4), name='valid')) fig.add_trace(go.Scatter(x=x_valid, y=preds_gr, mode='lines+markers', marker=dict(size=4), name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} LSTM data', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) data_new = yf.download(stock_name, start='2021-03-01', end='2021-04-30') data_new = data_new.filter(['Close']) dataset = data_new.values training_data_len = len(dataset) scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) test_data = scaled_data[training_data_len - len(data_new):, :] x_test = [] y_test = dataset[training_data_len:, :] for i in range(20, len(test_data)): x_test.append(test_data[i - 20:i, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) hist_data_new = yf.download(stock_name, start='2021-04-01', end='2021-05-04') hist_data_new = hist_data_new.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) hist_data_new = hist_data_new['Close'] hist_data_new = np.array(hist_data_new) pred_lstm = model.predict(x_test) pred_lstm = pred_lstm[:-1] pred_lstm = scaler.inverse_transform(pred_lstm) preds_gr = np.reshape(pred_lstm, (22,)) fig = go.Figure() fig.add_trace(go.Scatter(x=list(range(0, 21)), y=hist_data_new, mode='lines+markers', name='historical')) fig.add_trace(go.Scatter(x=list(range(0, 21)), y=preds_gr, mode='lines+markers', name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} LSTM prediction', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) return (pred_lstm, rmse) data_adf = data.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) data_adf = data_adf['Close'] from pmdarima.arima import ADFTest adf_test = ADFTest(alpha=0.05) adf_test.should_diff(data_adf) import os import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd import matplotlib.pyplot as plt import pmdarima as pm plt.style.use('fivethirtyeight') from pylab import rcParams rcParams['figure.figsize'] = (10, 6) from statsmodels.tsa.arima_model import ARIMA from pmdarima.arima import ADFTest from pmdarima.datasets import load_wineind import random def arima(stock_name, data): df_close = data['Close'] df_log = df_close train_data, test_data = (df_log[3:int(len(df_log) * 0.9)], df_log[int(len(df_log) * 0.9):]) test_values = len(df_log) * 0.01 + 1.0 x_train = list(range(0, 224)) x_test = list(range(224, int(len(data)))) fig = go.Figure() fig.add_trace(go.Scatter(x=x_train, y=train_data, mode='lines+markers', marker=dict(size=4), name='train')) fig.add_trace(go.Scatter(x=x_test, y=test_data, mode='lines+markers', marker=dict(size=4), name='test')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} ARIMA data', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) model = pm.auto_arima(df_log, start_p=0, d=None, start_q=0, max_p=5, max_d=5, max_q=5, start_P=0, D=1, start_Q=0, max_P=5, max_D=5, max_Q=5, m=7, seasonal=True, error_action='warn', trace=True, supress_warnings=True, stepwise=True, random_state=20, n_fits=50) model.summary() exo_data = data['Volume'] exo_data = exo_data[int(len(exo_data) * 0.9):] preds = model.predict(n_periods=22, X=exo_data) preds = np.vstack(preds) hist_data = yf.download(stock_name, start='2021-04-01', end='2021-05-04') hist_data = hist_data.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) hist_data = hist_data['Close'] hist_data = np.array(hist_data) rmse = np.sqrt(np.mean((preds - hist_data) ** 2)) preds_gr = np.reshape(preds, (22,)) fig = go.Figure() fig.add_trace(go.Scatter(x=list(range(0, 21)), y=hist_data, mode='lines+markers', name='historical')) fig.add_trace(go.Scatter(x=list(range(0, 21)), y=preds_gr, mode='lines+markers', name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} ARIMA prediction', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) return (preds, rmse) arima_pred, arima_rmse = arima(stock_name, data) print(arima_pred.shape)
code
73084093/cell_10
[ "text_plain_output_1.png" ]
import yfinance as yf import yfinance as yf stock_name = 'AMD' data = yf.download(stock_name, start='2020-03-26', end='2021-03-29')
code
73084093/cell_27
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense from keras.layers import Dense, LSTM from keras.layers import Dropout from keras.layers import LSTM from keras.models import Sequential from keras.models import Sequential from pmdarima.arima import ADFTest from pmdarima.arima import ADFTest from pylab import rcParams from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import MinMaxScaler from statsmodels.tsa.statespace.sarimax import SARIMAX import keras import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import plotly.graph_objs as go import pmdarima as pm import warnings import yfinance as yf import yfinance as yf stock_name = 'AMD' data = yf.download(stock_name, start='2020-03-26', end='2021-03-29') from sklearn.preprocessing import MinMaxScaler import math import matplotlib.pyplot as plt import keras import pandas as pd import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout from keras.layers import * from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from keras.callbacks import EarlyStopping from keras.models import Sequential from keras.layers import Dense, LSTM def lstm(stock_name, data): data = data.filter(['Close']) dataset = data.values training_data_len = int(np.ceil(len(dataset) * 0.8)) scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) train_data = scaled_data[0:int(training_data_len), :] x_train = [] y_train = [] for i in range(60, len(train_data)): x_train.append(train_data[i - 60:i, 0]) y_train.append(train_data[i, 0]) x_train, y_train = (np.array(x_train), np.array(y_train)) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) model = Sequential() model.add(LSTM(128, return_sequences=True, input_shape=(x_train.shape[1], 1))) model.add(Dropout(0.35)) model.add(LSTM(64, return_sequences=False)) model.add(Dropout(0.3)) model.add(Dense(25, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=1, epochs=21) test_data = scaled_data[training_data_len - 60:, :] x_test = [] y_test = dataset[training_data_len:, :] for i in range(60, len(test_data)): x_test.append(test_data[i - 60:i, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) rmse = np.sqrt(np.mean((predictions - y_test) ** 2)) train = data[:training_data_len] valid = data[training_data_len:] train_gr = np.reshape(train, (203,)) train_gr = train_gr['Close'] valid_gr = np.reshape(valid, (50,)) valid_gr = valid_gr['Close'] preds_gr = np.reshape(predictions, (50,)) x_train = list(range(0, len(train_data))) x_valid = list(range(len(train_data) - 1, len(dataset))) fig = go.Figure() fig.add_trace(go.Scatter(x=x_train, y=train_gr, mode='lines+markers', marker=dict(size=4), name='train')) fig.add_trace(go.Scatter(x=x_valid, y=valid_gr, mode='lines+markers', marker=dict(size=4), name='valid')) fig.add_trace(go.Scatter(x=x_valid, y=preds_gr, mode='lines+markers', marker=dict(size=4), name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} LSTM data', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) data_new = yf.download(stock_name, start='2021-03-01', end='2021-04-30') data_new = data_new.filter(['Close']) dataset = data_new.values training_data_len = len(dataset) scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data = scaler.fit_transform(dataset) test_data = scaled_data[training_data_len - len(data_new):, :] x_test = [] y_test = dataset[training_data_len:, :] for i in range(20, len(test_data)): x_test.append(test_data[i - 20:i, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) hist_data_new = yf.download(stock_name, start='2021-04-01', end='2021-05-04') hist_data_new = hist_data_new.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) hist_data_new = hist_data_new['Close'] hist_data_new = np.array(hist_data_new) pred_lstm = model.predict(x_test) pred_lstm = pred_lstm[:-1] pred_lstm = scaler.inverse_transform(pred_lstm) preds_gr = np.reshape(pred_lstm, (22,)) fig = go.Figure() fig.add_trace(go.Scatter(x=list(range(0, 21)), y=hist_data_new, mode='lines+markers', name='historical')) fig.add_trace(go.Scatter(x=list(range(0, 21)), y=preds_gr, mode='lines+markers', name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} LSTM prediction', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) return (pred_lstm, rmse) data_adf = data.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) data_adf = data_adf['Close'] from pmdarima.arima import ADFTest adf_test = ADFTest(alpha=0.05) adf_test.should_diff(data_adf) import os import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd import matplotlib.pyplot as plt import pmdarima as pm plt.style.use('fivethirtyeight') from pylab import rcParams rcParams['figure.figsize'] = (10, 6) from statsmodels.tsa.arima_model import ARIMA from pmdarima.arima import ADFTest from pmdarima.datasets import load_wineind import random def arima(stock_name, data): df_close = data['Close'] df_log = df_close train_data, test_data = (df_log[3:int(len(df_log) * 0.9)], df_log[int(len(df_log) * 0.9):]) test_values = len(df_log) * 0.01 + 1.0 x_train = list(range(0, 224)) x_test = list(range(224, int(len(data)))) fig = go.Figure() fig.add_trace(go.Scatter(x=x_train, y=train_data, mode='lines+markers', marker=dict(size=4), name='train')) fig.add_trace(go.Scatter(x=x_test, y=test_data, mode='lines+markers', marker=dict(size=4), name='test')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} ARIMA data', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) model = pm.auto_arima(df_log, start_p=0, d=None, start_q=0, max_p=5, max_d=5, max_q=5, start_P=0, D=1, start_Q=0, max_P=5, max_D=5, max_Q=5, m=7, seasonal=True, error_action='warn', trace=True, supress_warnings=True, stepwise=True, random_state=20, n_fits=50) model.summary() exo_data = data['Volume'] exo_data = exo_data[int(len(exo_data) * 0.9):] preds = model.predict(n_periods=22, X=exo_data) preds = np.vstack(preds) hist_data = yf.download(stock_name, start='2021-04-01', end='2021-05-04') hist_data = hist_data.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) hist_data = hist_data['Close'] hist_data = np.array(hist_data) rmse = np.sqrt(np.mean((preds - hist_data) ** 2)) preds_gr = np.reshape(preds, (22,)) fig = go.Figure() fig.add_trace(go.Scatter(x=list(range(0, 21)), y=hist_data, mode='lines+markers', name='historical')) fig.add_trace(go.Scatter(x=list(range(0, 21)), y=preds_gr, mode='lines+markers', name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} ARIMA prediction', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) return (preds, rmse) from statsmodels.tsa.statespace.sarimax import SARIMAX data3 = data['Close'] train3_data, test3_data = (data3[3:int(len(data3) * 0.9)], data3[int(len(data3) * 0.9):]) x_train = list(range(0, 224)) x_test = list(range(224, int(len(data3)))) exo_data = data['Volume'] exo_data = exo_data[int(len(exo_data) * 0.9):] fig = go.Figure() fig.add_trace(go.Scatter(x=x_train, y=train3_data, mode='lines+markers', marker=dict(size=4), name='train')) fig.add_trace(go.Scatter(x=x_test, y=test3_data, mode='lines+markers', marker=dict(size=4), name='test')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} SARIMAX data', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) fig.show() model = SARIMAX(train3_data, order=(3, 1, 2)) arima_model = model.fit(X=exo_data, disp=-1) print(arima_model.summary()) preds3 = arima_model.predict(n_periods=22, alpha=0.05) preds3 = np.vstack(preds3) preds3 = preds3[-22:] hist_data = yf.download(stock_name, start='2021-04-01', end='2021-05-04') hist_data = hist_data.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis=1) hist_data = hist_data['Close'] hist_data = np.array(hist_data) rmse = np.sqrt(np.mean((preds3 - hist_data) ** 2)) print(f'RMSE SARIMAX: {rmse}') preds_gr = np.reshape(preds3, (22,)) fig = go.Figure() fig.add_trace(go.Scatter(x=list(range(0, 21)), y=hist_data, mode='lines+markers', name='historical')) fig.add_trace(go.Scatter(x=list(range(0, 21)), y=preds_gr, mode='lines+markers', name='predictions')) fig.update_layout(legend_orientation='h', legend=dict(x=0.5, xanchor='center'), title_text=f'{stock_name} SARIMAX prediction', title_x=0.5, xaxis_title='Timestep', yaxis_title='Stock price', margin=dict(l=0, r=0, t=30, b=0)) fig.show()
code
130019203/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape X = df.drop('Sentiment', axis=1) Y = df['Sentiment'] X
code
130019203/cell_23
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import PorterStemmer from tensorflow.keras.preprocessing.text import one_hot import pandas as pd import re df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape X = df.drop('Sentiment', axis=1) Y = df['Sentiment'] messages = X.copy() import re import nltk from nltk.stem import PorterStemmer from nltk.corpus import stopwords ps = PorterStemmer() corpus = [] for i in range(len(messages)): review = re.sub('[^a-zA-Z]', ' ', messages['Headline'][i]) review = review.lower() review = review.split() review = [ps.stem(word) for word in review if not word in stopwords.words('english')] review = ' '.join(review) corpus.append(review) voc_size = 5000 onehot_repr = [one_hot(words, voc_size) for words in corpus] onehot_repr
code
130019203/cell_33
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import PorterStemmer from sklearn.metrics import accuracy_score from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout from tensorflow.keras.layers import Embedding from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import one_hot import numpy as np import pandas as pd import re df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape X = df.drop('Sentiment', axis=1) Y = df['Sentiment'] messages = X.copy() import re import nltk from nltk.stem import PorterStemmer from nltk.corpus import stopwords ps = PorterStemmer() corpus = [] for i in range(len(messages)): review = re.sub('[^a-zA-Z]', ' ', messages['Headline'][i]) review = review.lower() review = review.split() review = [ps.stem(word) for word in review if not word in stopwords.words('english')] review = ' '.join(review) corpus.append(review) voc_size = 5000 onehot_repr = [one_hot(words, voc_size) for words in corpus] sent_length = 20 embedded_docs = pad_sequences(onehot_repr, padding='pre', maxlen=sent_length) from tensorflow.keras.layers import Dropout embedding_vector_features = 40 model = Sequential() model.add(Embedding(voc_size, embedding_vector_features, input_length=sent_length)) model.add(Dropout(0.5)) model.add(LSTM(200)) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) X_final = np.array(embedded_docs) y_final = np.array(Y) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64) y_pred = np.argmax(model.predict(X_test), axis=-1) from sklearn.metrics import accuracy_score print(f'{accuracy_score(y_test, y_pred) * 100}%')
code
130019203/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.head()
code
130019203/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape
code
130019203/cell_18
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import PorterStemmer import pandas as pd import re df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape X = df.drop('Sentiment', axis=1) Y = df['Sentiment'] messages = X.copy() import re import nltk from nltk.stem import PorterStemmer from nltk.corpus import stopwords ps = PorterStemmer() corpus = [] for i in range(len(messages)): review = re.sub('[^a-zA-Z]', ' ', messages['Headline'][i]) review = review.lower() review = review.split() review = [ps.stem(word) for word in review if not word in stopwords.words('english')] review = ' '.join(review) corpus.append(review) corpus
code
130019203/cell_32
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import PorterStemmer from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout from tensorflow.keras.layers import Embedding from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import one_hot import pandas as pd import re df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape X = df.drop('Sentiment', axis=1) Y = df['Sentiment'] messages = X.copy() import re import nltk from nltk.stem import PorterStemmer from nltk.corpus import stopwords ps = PorterStemmer() corpus = [] for i in range(len(messages)): review = re.sub('[^a-zA-Z]', ' ', messages['Headline'][i]) review = review.lower() review = review.split() review = [ps.stem(word) for word in review if not word in stopwords.words('english')] review = ' '.join(review) corpus.append(review) voc_size = 5000 onehot_repr = [one_hot(words, voc_size) for words in corpus] sent_length = 20 embedded_docs = pad_sequences(onehot_repr, padding='pre', maxlen=sent_length) from tensorflow.keras.layers import Dropout embedding_vector_features = 40 model = Sequential() model.add(Embedding(voc_size, embedding_vector_features, input_length=sent_length)) model.add(Dropout(0.5)) model.add(LSTM(200)) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64)
code
130019203/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape df.info()
code
130019203/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape X = df.drop('Sentiment', axis=1) Y = df['Sentiment'] messages = X.copy() messages
code
130019203/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape X = df.drop('Sentiment', axis=1) Y = df['Sentiment'] Y
code
130019203/cell_27
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import PorterStemmer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import one_hot import pandas as pd import re df = pd.read_csv('/kaggle/input/covid19-india-news-headlines-for-nlp/raw_data.csv', usecols=['Headline', 'Sentiment', 'Description']) df.shape X = df.drop('Sentiment', axis=1) Y = df['Sentiment'] messages = X.copy() import re import nltk from nltk.stem import PorterStemmer from nltk.corpus import stopwords ps = PorterStemmer() corpus = [] for i in range(len(messages)): review = re.sub('[^a-zA-Z]', ' ', messages['Headline'][i]) review = review.lower() review = review.split() review = [ps.stem(word) for word in review if not word in stopwords.words('english')] review = ' '.join(review) corpus.append(review) voc_size = 5000 onehot_repr = [one_hot(words, voc_size) for words in corpus] sent_length = 20 embedded_docs = pad_sequences(onehot_repr, padding='pre', maxlen=sent_length) embedded_docs
code
74068297/cell_21
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] data.isna().sum() data = data.replace({'<=50K': 0, '>50K': 1}) data = data.replace({'<=50K': 0, '>50K': 1}) list_columns = [] list_categorical_col = [] list_numerical_col = [] def get_col(df: 'dataframe', type_descr: 'numpy') -> list: """ Function get list columns Args: type_descr np.number, np.object -> return list with all columns np.number -> return list numerical columns np.object -> return list object columns """ try: col = (df.describe(include=type_descr).columns) # pandas.core.indexes.base.Index except ValueError: print(f'Dataframe not contains {type_descr} columns !', end='\n') else: return col.tolist() list_numerical_col = get_col(df=data, type_descr=np.number) list_categorical_col = get_col(df=data, type_descr=np.object) list_columns = get_col(df=data, type_descr=[np.object, np.number]) x = data[list_numerical_col].hist(figsize=[25, 22], density=True, bins=25, grid=False, color='orange', zorder=2, rwidth=0.9)
code
74068297/cell_25
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] data.isna().sum() data = data.replace({'<=50K': 0, '>50K': 1}) data = data.replace({'<=50K': 0, '>50K': 1}) data.corr()
code
74068297/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] data.isna().sum() data = data.replace({'<=50K': 0, '>50K': 1}) data = data.replace({'<=50K': 0, '>50K': 1}) sns.pairplot(data, kind='scatter', diag_kind='kde', corner=True, hue='income')
code
74068297/cell_20
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] data.isna().sum() data = data.replace({'<=50K': 0, '>50K': 1}) data = data.replace({'<=50K': 0, '>50K': 1}) list_columns = [] list_categorical_col = [] list_numerical_col = [] def get_col(df: 'dataframe', type_descr: 'numpy') -> list: """ Function get list columns Args: type_descr np.number, np.object -> return list with all columns np.number -> return list numerical columns np.object -> return list object columns """ try: col = (df.describe(include=type_descr).columns) # pandas.core.indexes.base.Index except ValueError: print(f'Dataframe not contains {type_descr} columns !', end='\n') else: return col.tolist() list_numerical_col = get_col(df=data, type_descr=np.number) list_categorical_col = get_col(df=data, type_descr=np.object) list_columns = get_col(df=data, type_descr=[np.object, np.number]) list_categorical_col
code
74068297/cell_6
[ "text_html_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape
code
74068297/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] data.isna().sum() data = data.replace({'<=50K': 0, '>50K': 1}) data = data.replace({'<=50K': 0, '>50K': 1}) data.corr() sns.heatmap(data.corr(), annot=True, cmap='PiYG')
code
74068297/cell_19
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] data.isna().sum() data = data.replace({'<=50K': 0, '>50K': 1}) data = data.replace({'<=50K': 0, '>50K': 1}) list_columns = [] list_categorical_col = [] list_numerical_col = [] def get_col(df: 'dataframe', type_descr: 'numpy') -> list: """ Function get list columns Args: type_descr np.number, np.object -> return list with all columns np.number -> return list numerical columns np.object -> return list object columns """ try: col = (df.describe(include=type_descr).columns) # pandas.core.indexes.base.Index except ValueError: print(f'Dataframe not contains {type_descr} columns !', end='\n') else: return col.tolist() list_numerical_col = get_col(df=data, type_descr=np.number) list_categorical_col = get_col(df=data, type_descr=np.object) list_columns = get_col(df=data, type_descr=[np.object, np.number]) list_numerical_col
code
74068297/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74068297/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.head()
code
74068297/cell_28
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] data.isna().sum() data = data.replace({'<=50K': 0, '>50K': 1}) data = data.replace({'<=50K': 0, '>50K': 1}) data.corr() eda_percentage = data['income'].value_counts(normalize=True).rename_axis('income').reset_index(name='Percentage') ax = sns.barplot(x='income', y='Percentage', data=eda_percentage.head(10), palette='RdGy_r') eda_percentage
code
74068297/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] data.isna().sum() data = data.replace({'<=50K': 0, '>50K': 1}) data = data.replace({'<=50K': 0, '>50K': 1}) data.corr() eda_percentage = data['income'].value_counts(normalize = True).rename_axis('income').reset_index(name = 'Percentage') ax = sns.barplot(x = 'income', y = 'Percentage', data = eda_percentage.head(10), palette='RdGy_r') eda_percentage plt.figure(figsize=(12, 6)) order_list = ['Less than 18', '19-30', '31-40', '41-50', '51-60', '61-70', 'Greater than 70'] sns.countplot(data['age_group'], hue=data['income'], palette='autumn_r', order=order_list) plt.title('Income of Individuals of Different Age Groups', fontsize=18) plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.legend(fontsize=16)
code
74068297/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/adult-census-income/adult.csv', na_values='?') data.shape data.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] data.isna().sum()
code
90153288/cell_21
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt df_mr = df.drop(columns=[i for i in df.columns if i not in ['popularity', 'tempo', 'instrumentalness']]) df_mr.tempo = df_mr.tempo.apply(lambda row: str(row).replace('?', '0') if row == '?' else str(row)) df_mr.tempo = df_mr.tempo.apply(pd.to_numeric, errors='coerce') df_mr = df_mr.dropna() x = df_mr[['instrumentalness', 'tempo']] y = df_mr['popularity'] from sklearn import linear_model regr = linear_model.LinearRegression() regr.fit(x, y) regr.coef_
code
90153288/cell_23
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt df_mr = df.drop(columns=[i for i in df.columns if i not in ['popularity', 'tempo', 'instrumentalness']]) df_mr.tempo = df_mr.tempo.apply(lambda row: str(row).replace('?', '0') if row == '?' else str(row)) df_mr.tempo = df_mr.tempo.apply(pd.to_numeric, errors='coerce') df_mr = df_mr.dropna() x = df_mr[['instrumentalness', 'tempo']] y = df_mr['popularity'] from sklearn import linear_model regr = linear_model.LinearRegression() regr.fit(x, y) regr.coef_ regr.predict([[0.0045, 55.25]])
code
90153288/cell_30
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt df_mr = df.drop(columns=[i for i in df.columns if i not in ['popularity', 'tempo', 'instrumentalness']]) df_mr.tempo = df_mr.tempo.apply(lambda row: str(row).replace('?', '0') if row == '?' else str(row)) df_mr.tempo = df_mr.tempo.apply(pd.to_numeric, errors='coerce') df_mr = df_mr.dropna() x = df_mr[['instrumentalness', 'tempo']] y = df_mr['popularity'] def fit(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df_mr[['tempo', 'instrumentalness']] y = df_mr['popularity'] f = fit(x, y) x = df_mr[['tempo']] y = df_mr['popularity'] def find_theta(X, y): m = X.shape[0] X = np.append(X, np.ones((m, 1)), axis=1) theta = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, y)) return theta def predict(X): X = np.append(X, np.ones((X.shape[0], 1)), axis=1) preds = np.dot(X, theta) return preds theta = find_theta(x, y) print(theta) preds = predict(x) fig = plt.figure(figsize=(8, 6)) plt.plot(x, y, 'y.') plt.plot(x, preds, 'c-') plt.xlabel('Input') plt.ylabel('target')
code