markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
---|---|---|---|---|
Create output control (OC) data using words | #--oc data
spd = {(0,199): ['print budget', 'save head'],
(0,200): [],
(0,399): ['print budget', 'save head'],
(0,400): [],
(0,599): ['print budget', 'save head'],
(0,600): [],
(0,799): ['print budget', 'save head'],
(0,800): [],
(0,999): ['print budget', 'save head'],
(1,0): [],
(1,59): ['print budget', 'save head'],
(1,60): [],
(1,119): ['print budget', 'save head'],
(1,120): [],
(2,0): [],
(2,59): ['print budget', 'save head'],
(2,60): [],
(2,119): ['print budget', 'save head'],
(2,120): [],
(2,179): ['print budget', 'save head']} | original_libraries/flopy-master/examples/Notebooks/swiex4.ipynb | mjasher/gac | gpl-2.0 |
Create the model with the freshwater well (Simulation 1) | modelname = 'swiex4_s1'
ml = mf.Modflow(modelname, version='mf2005', exe_name=exe_name, model_ws=workspace)
discret = mf.ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol, laycbd=0,
delr=delr, delc=delc, top=botm[0], botm=botm[1:],
nper=nper, perlen=perlen, nstp=nstp)
bas = mf.ModflowBas(ml, ibound=ibound, strt=ihead)
lpf = mf.ModflowLpf(ml, laytyp=laytyp, hk=hk, vka=vka)
wel = mf.ModflowWel(ml, stress_period_data=base_well_data)
ghb = mf.ModflowGhb(ml, stress_period_data=ghb_data)
rch = mf.ModflowRch(ml, rech=rch_data)
swi = mf.ModflowSwi2(ml, nsrf=1, istrat=1, toeslope=toeslope, tipslope=tipslope, nu=nu,
zeta=z, ssz=ssz, isource=iso, nsolver=1,
adaptive=adaptive, nadptmx=nadptmx, nadptmn=nadptmn,
nobs=nobs, iswiobs=iswiobs, obsnam=obsnam, obslrc=obslrc)
oc = mf.ModflowOc(ml, stress_period_data=spd)
pcg = mf.ModflowPcg(ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50) | original_libraries/flopy-master/examples/Notebooks/swiex4.ipynb | mjasher/gac | gpl-2.0 |
Create the model with the saltwater well (Simulation 2) | modelname2 = 'swiex4_s2'
ml2 = mf.Modflow(modelname2, version='mf2005', exe_name=exe_name, model_ws=workspace)
discret = mf.ModflowDis(ml2, nlay=nlay, nrow=nrow, ncol=ncol, laycbd=0,
delr=delr, delc=delc, top=botm[0], botm=botm[1:],
nper=nper, perlen=perlen, nstp=nstp)
bas = mf.ModflowBas(ml2, ibound=ibound, strt=ihead)
lpf = mf.ModflowLpf(ml2, laytyp=laytyp, hk=hk, vka=vka)
wel = mf.ModflowWel(ml2, stress_period_data=swwells_well_data)
ghb = mf.ModflowGhb(ml2, stress_period_data=ghb_data)
rch = mf.ModflowRch(ml2, rech=rch_data)
swi = mf.ModflowSwi2(ml2, nsrf=1, istrat=1, toeslope=toeslope, tipslope=tipslope, nu=nu,
zeta=z, ssz=ssz, isource=iso, nsolver=1,
adaptive=adaptive, nadptmx=nadptmx, nadptmn=nadptmn,
nobs=nobs, iswiobs=iswiobs, obsnam=obsnam, obslrc=obslrc)
oc = mf.ModflowOc(ml2, stress_period_data=spd)
pcg = mf.ModflowPcg(ml2, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50) | original_libraries/flopy-master/examples/Notebooks/swiex4.ipynb | mjasher/gac | gpl-2.0 |
Load the simulation 1 ZETA data and ZETA observations. | #--read base model zeta
zfile = fu.CellBudgetFile(os.path.join(ml.model_ws, modelname+'.zta'))
kstpkper = zfile.get_kstpkper()
zeta = []
for kk in kstpkper:
zeta.append(zfile.get_data(kstpkper=kk, text='ZETASRF 1')[0])
zeta = np.array(zeta)
#--read swi obs
zobs = np.genfromtxt(os.path.join(ml.model_ws, modelname+'.zobs'), names=True) | original_libraries/flopy-master/examples/Notebooks/swiex4.ipynb | mjasher/gac | gpl-2.0 |
Load the simulation 2 ZETA data and ZETA observations. | #--read saltwater well model zeta
zfile2 = fu.CellBudgetFile(os.path.join(ml2.model_ws, modelname2+'.zta'))
kstpkper = zfile2.get_kstpkper()
zeta2 = []
for kk in kstpkper:
zeta2.append(zfile2.get_data(kstpkper=kk, text='ZETASRF 1')[0])
zeta2 = np.array(zeta2)
#--read swi obs
zobs2 = np.genfromtxt(os.path.join(ml2.model_ws, modelname2+'.zobs'), names=True) | original_libraries/flopy-master/examples/Notebooks/swiex4.ipynb | mjasher/gac | gpl-2.0 |
Define figure dimensions and colors used for plotting ZETA surfaces | #--figure dimensions
fwid, fhgt = 8.00, 5.50
flft, frgt, fbot, ftop = 0.125, 0.95, 0.125, 0.925
#--line color definition
icolor = 5
colormap = plt.cm.jet #winter
cc = []
cr = np.linspace(0.9, 0.0, icolor)
for idx in cr:
cc.append(colormap(idx)) | original_libraries/flopy-master/examples/Notebooks/swiex4.ipynb | mjasher/gac | gpl-2.0 |
Recreate Figure 9 from the SWI2 documentation (http://pubs.usgs.gov/tm/6a46/). | plt.rcParams.update({'legend.fontsize': 6, 'legend.frameon' : False})
fig = plt.figure(figsize=(fwid, fhgt), facecolor='w')
fig.subplots_adjust(wspace=0.25, hspace=0.25, left=flft, right=frgt, bottom=fbot, top=ftop)
#--first plot
ax = fig.add_subplot(2, 2, 1)
#--axes limits
ax.set_xlim(-1500, 1500)
ax.set_ylim(-50, -10)
for idx in xrange(5):
#--layer 1
ax.plot(xcell, zeta[idx, 0, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx], label='{:2d} years'.format(years[idx]))
#--layer 2
ax.plot(xcell, zeta[idx, 1, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx], label='_None')
ax.plot([-1500, 1500], [-30, -30], color='k', linewidth=1.0)
#--legend
plt.legend(loc='lower left')
#--axes labels and text
ax.set_xlabel('Horizontal distance, in meters')
ax.set_ylabel('Elevation, in meters')
ax.text(0.025, .55, 'Layer 1', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.025, .45, 'Layer 2', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.975, .1, 'Recharge conditions', transform=ax.transAxes, va='center', ha='right', size='8')
#--second plot
ax = fig.add_subplot(2, 2, 2)
#--axes limits
ax.set_xlim(-1500, 1500)
ax.set_ylim(-50, -10)
for idx in xrange(5, len(years)):
#--layer 1
ax.plot(xcell, zeta[idx, 0, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx-5], label='{:2d} years'.format(years[idx]))
#--layer 2
ax.plot(xcell, zeta[idx, 1, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx-5], label='_None')
ax.plot([-1500, 1500], [-30, -30], color='k', linewidth=1.0)
#--legend
plt.legend(loc='lower left')
#--axes labels and text
ax.set_xlabel('Horizontal distance, in meters')
ax.set_ylabel('Elevation, in meters')
ax.text(0.025, .55, 'Layer 1', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.025, .45, 'Layer 2', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.975, .1, 'Freshwater well withdrawal', transform=ax.transAxes, va='center', ha='right', size='8')
#--third plot
ax = fig.add_subplot(2, 2, 3)
#--axes limits
ax.set_xlim(-1500, 1500)
ax.set_ylim(-50, -10)
for idx in xrange(5, len(years)):
#--layer 1
ax.plot(xcell, zeta2[idx, 0, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx-5], label='{:2d} years'.format(years[idx]))
#--layer 2
ax.plot(xcell, zeta2[idx, 1, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx-5], label='_None')
ax.plot([-1500, 1500], [-30, -30], color='k', linewidth=1.0)
#--legend
plt.legend(loc='lower left')
#--axes labels and text
ax.set_xlabel('Horizontal distance, in meters')
ax.set_ylabel('Elevation, in meters')
ax.text(0.025, .55, 'Layer 1', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.025, .45, 'Layer 2', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.975, .1, 'Freshwater and saltwater\nwell withdrawals', transform=ax.transAxes,
va='center', ha='right', size='8')
#--fourth plot
ax = fig.add_subplot(2, 2, 4)
#--axes limits
ax.set_xlim(0, 30)
ax.set_ylim(-50, -10)
t = zobs['TOTIM'][999:] / 365 - 200.
tz2 = zobs['layer1_001'][999:]
tz3 = zobs2['layer1_001'][999:]
for i in xrange(len(t)):
if zobs['layer2_001'][i+999] < -30. - 0.1:
tz2[i] = zobs['layer2_001'][i+999]
if zobs2['layer2_001'][i+999] < 20. - 0.1:
tz3[i] = zobs2['layer2_001'][i+999]
ax.plot(t, tz2, linestyle='solid', color='r', linewidth=0.75, label='Freshwater well')
ax.plot(t, tz3, linestyle='dotted', color='r', linewidth=0.75, label='Freshwater and saltwater well')
ax.plot([0, 30], [-30, -30], 'k', linewidth=1.0, label='_None')
#--legend
leg = plt.legend(loc='lower right', numpoints=1)
#--axes labels and text
ax.set_xlabel('Time, in years')
ax.set_ylabel('Elevation, in meters')
ax.text(0.025, .55, 'Layer 1', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.025, .45, 'Layer 2', transform=ax.transAxes, va='center', ha='left', size='7'); | original_libraries/flopy-master/examples/Notebooks/swiex4.ipynb | mjasher/gac | gpl-2.0 |
Initial matchup chart | DataFrame(data = init.data, index = init.row_names, columns = init.col_names) | python/examples/super_street_fighter_2_turbo.ipynb | ajul/zerosum | bsd-3-clause |
Matchup chart after balancing with a logistic handicap | DataFrame(data = opt.F, index = init.row_names, columns = init.col_names) | python/examples/super_street_fighter_2_turbo.ipynb | ajul/zerosum | bsd-3-clause |
can be used to run a test where the platform is configured to
- disable the "sched_is_big_little" flag (if present)
- set to 50ms the "sched_migration_cost_ns"
Nortice that a value written in a file is verified only if the file path is
prefixed by a '/'. Otherwise, the write never fails, e.g. if the file does not exists.
Support to freeze user-space across a test
https://github.com/ARM-software/lisa/pull/227
Executor learned the "freeze_userspace" conf flag. When this flag is present, LISA uses the devlib freezer to freeze as much of userspace as possible while the experiment workload is executing, in order to reduce system noise.
The Executor example notebook:
https://github.com/ARM-software/lisa/blob/master/ipynb/examples/utils/executor_example.ipynb
gives an example of using this feature.
Trace module
Tasks name pre-loading
When the Trace module is initialized, by default all the tasks in that trace are identified and exposed via the usual getTask() method: | from trace import Trace
import json
with open('/home/patbel01/Code/lisa/results/LisaInANutshell_Backup/platform.json', 'r') as fh:
platform = json.load(fh)
trace = Trace('/home/patbel01/Code/lisa/results/LisaInANutshell_Backup/trace.dat',
['sched_switch'], platform
))
logging.info("%d tasks loaded from trace", len(trace.getTasks()))
logging.info("The rt-app task in this trace has these PIDs:")
logging.info(" %s", trace.getTasks()['rt-app']) | ipynb/deprecated/releases/ReleaseNotes_v16.12.ipynb | ARM-software/lisa | apache-2.0 |
Now evaluate your learned model using the test set. Measure the total error of your prediction | Y_test = F_Regression(X_test, W)
error = Loss_Regression(Y_test, Z_test)
print("Evaluation error: ", error) | Intro ML Semcomp/semcomp17_ml/semcomp17_ml_answer.ipynb | marcelomiky/PythonCodes | mit |
Network Architecture
The encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below.
<img src='assets/convolutional_autoencoder.png' width=500px>
Here our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data.
What's going on with the decoder
Okay, so the decoder has these "Upsample" layers that you might not have seen before. First off, I'll discuss a bit what these layers aren't. Usually, you'll see transposed convolution layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer. The TensorFlow API provides us with an easy way to create the layers, tf.nn.conv2d_transpose.
However, transposed convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In this Distill article from Augustus Odena, et al, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with tf.image.resize_images, followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling.
Exercise: Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by a factor of 2. Odena et al claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in tf.image.resize_images or use tf.image.resize_nearest_neighbor. | learning_rate = 0.001
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32, [None,28,28,1], name='inputs')
targets_ = tf.placeholder(tf.float32, [None,28,28,1], name='labels')
### Encoder
conv1 = tf.layers.conv2d(inputs=inputs_, filters=16, kernel_size=(3,3),
padding='same', activation=tf.nn.relu, name='enc_conv1')
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2,2),
strides=(2,2), padding='same', name='enc_maxpool1')
# Now 14x14x16
conv2 = tf.layers.conv2d(inputs=maxpool1, filters=8, kernel_size=(3,3),
padding='same', activation=tf.nn.relu, name='enc_conv2')
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2,2),
strides=(2,2), padding='same', name='enc_maxpool2')
# Now 7x7x8
conv3 = tf.layers.conv2d(inputs=maxpool2, filters=8, kernel_size=(3,3),
padding='same', activation=tf.nn.relu, name='enc_conv3')
# Now 7x7x8
encoded = tf.layers.max_pooling2d(inputs=conv3, pool_size=(2,2),
strides=(2,2), padding='same', name='encoded')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_bilinear(images=encoded, size=(7,7), name='dec_upsample1')
# Now 7x7x8
conv4 = tf.layers.conv2d(inputs=upsample1, filters=8, kernel_size=(3,3),
padding='same', activation=tf.nn.relu, name='dec_conv4')
# Now 7x7x8
upsample2 = tf.image.resize_bilinear(images=conv4, size=(14,14), name='dec_upsample2')
# Now 14x14x8
conv5 = tf.layers.conv2d(inputs=upsample2, filters=8, kernel_size=(3,3),
padding='same', activation=tf.nn.relu, name='dec_conv5')
# Now 14x14x8
upsample3 = tf.image.resize_bilinear(images=conv5, size=(28,28), name='dec_upsample3')
# Now 28x28x8
conv6 = tf.layers.conv2d(inputs=upsample3, filters=16, kernel_size=(3,3),
padding='same', activation=tf.nn.relu, name='dec_conv6')
# Now 28x28x16
logits = tf.layers.conv2d(inputs=conv6, filters=1, kernel_size=(3,3),
padding='same', activation=None, name='logits')
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits, name='decoded')
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets_, name='loss')
# Get cost and define the optimizer
cost = tf.reduce_mean(loss, name='cost')
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost) | autoencoder/Convolutional_Autoencoder.ipynb | otavio-r-filho/AIND-Deep_Learning_Notebooks | mit |
Denoising
As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images.
Since this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before.
Exercise: Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers. | learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs=inputs_, filters=32, kernel_size=(3,3),
padding='same', activation=tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2,2),
strides=(2,2), padding='same')
# Now 14x14x32
conv2 = tf.layers.conv2d(inputs=inputs_, filters=32, kernel_size=(3,3),
padding='same', activation=tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2,2),
strides=(2,2), padding='same')
# Now 7x7x32
conv3 = tf.layers.conv2d(inputs=inputs_, filters=16, kernel_size=(3,3),
padding='same', activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2,2),
strides=(2,2), padding='same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_bilinear(images=encoded, size=(7,7))
# Now 7x7x16
conv4 = tf.layers.conv2d(inputs=inputs_, filters=16, kernel_size=(3,3),
padding='same', activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_bilinear(images=encoded, size=(14,14))
# Now 14x14x16
conv5 = tf.layers.conv2d(inputs=inputs_, filters=32, kernel_size=(3,3),
padding='same', activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_bilinear(images=encoded, size=(28,28))
# Now 28x28x32
conv6 = tf.layers.conv2d(inputs=inputs_, filters=32, kernel_size=(3,3),
padding='same', activation=tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(inputs=conv6, filters=1, kernel_size=(3,3),
padding='same', activation=None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits, name='output')
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets_)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 100
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost)) | autoencoder/Convolutional_Autoencoder.ipynb | otavio-r-filho/AIND-Deep_Learning_Notebooks | mit |
Similarity Scores
Links to information about distance metrics:
Implementing the Five Most Popular Similarity Measures in Python
Scikit-Learn Distance Metric
Python Distance Library
Numeric distances are fairly easy, but can be record specific (e.g. phone numbers can compare area codes, city codes, etc. to determine similarity). We will compare text similarity in this section: | # Typographic Distances
print distance.levenshtein("lenvestein", "levenshtein")
print distance.hamming("hamming", "hamning")
# Compare glyphs, syllables, or phonemes
t1 = ("de", "ci", "si", "ve")
t2 = ("de", "ri", "si", "ve")
print distance.levenshtein(t1, t2)
# Sentence Comparison
sent1 = "The quick brown fox jumped over the lazy dogs."
sent2 = "The lazy foxes are jumping over the crazy Dog."
print distance.nlevenshtein(sent1.split(), sent2.split(), method=1)
# Normalization
print distance.hamming("fat", "cat", normalized=True)
print distance.nlevenshtein("abc", "acd", method=1) # shortest alignment
print distance.nlevenshtein("abc", "acd", method=2) # longest alignment
# Set measures
print distance.sorensen("decide", "resize")
print distance.jaccard("decide", "resize") | Entity Resolution Workshop.ipynb | DistrictDataLabs/entity-resolution | apache-2.0 |
Preprocessed Text Score
Use text preprocessing with NLTK to split long strings into parts, and normalize them using Wordnet. | def tokenize(sent):
"""
When passed in a sentence, tokenizes and normalizes the string,
returning a list of lemmata.
"""
lemmatizer = nltk.WordNetLemmatizer()
for token in nltk.wordpunct_tokenize(sent):
token = token.lower()
yield lemmatizer.lemmatize(token)
def normalized_jaccard(*args):
try:
return distance.jaccard(*[tokenize(arg) for arg in args])
except UnicodeDecodeError:
return 0.0
print normalized_jaccard(sent1, sent2) | Entity Resolution Workshop.ipynb | DistrictDataLabs/entity-resolution | apache-2.0 |
Similarity Vectors | def similarity(prod1, prod2):
"""
Returns a similarity vector of match scores:
[name_score, description_score, manufacturer_score, price_score]
"""
pair = (prod1, prod2)
names = [r.get('name', None) or r.get('title', None) for r in pair]
descr = [r.get('description') for r in pair]
manuf = [r.get('manufacturer') for r in pair]
price = [float(r.get('price')) for r in pair]
return [
normalized_jaccard(*names),
normalized_jaccard(*descr),
normalized_jaccard(*manuf),
abs(1.0/(1+ (price[0] - price[1]))),
]
print similarity(X, Y) | Entity Resolution Workshop.ipynb | DistrictDataLabs/entity-resolution | apache-2.0 |
Weighted Pairwise Matching | THRESHOLD = 0.90
WEIGHTS = (0.6, 0.1, 0.2, 0.1)
matches = 0
for azprod in amazon.values():
for googprod in google.values():
vector = similarity(azprod, googprod)
score = sum(map(lambda v: v[0]*v[1], zip(WEIGHTS, vector)))
if score > THRESHOLD:
matches += 1
print "{0:0.3f}: {1} {2}".format(
score, azprod['id'], googprod['id'].split("/")[-1]
)
print "\n{} matches discovered".format(matches) | Entity Resolution Workshop.ipynb | DistrictDataLabs/entity-resolution | apache-2.0 |
Download the data from the source website if necessary. | #url = 'http://mattmahoney.net/dc/'
import urllib.request
url = urllib.request.urlretrieve("http://mattmahoney.net/dc/")
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print ('Found and verified', filename)
else:
print (statinfo.st_size)
raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
#filename = maybe_download("text8.zip",31344016) | 5_word2vec.ipynb | recepkabatas/Spark | apache-2.0 |
Read the data into a string. | filename=("text8.zip")
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return f.read(name).split()
f.close()
words = read_data(filename)
print ('Data size', len(words)) | 5_word2vec.ipynb | recepkabatas/Spark | apache-2.0 |
Function to generate a training batch for the skip-gram model. | data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(int(batch_size / num_skips)):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print (batch[i], '->', labels[i, 0])
print (reverse_dictionary[batch[i]], '->', reverse_dictionary[labels[i, 0]]) | 5_word2vec.ipynb | recepkabatas/Spark | apache-2.0 |
Train a skip-gram model. | batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Variables.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,
train_labels, num_sampled, vocabulary_size))
# Optimizer.
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
num_steps = 100001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print ("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_data, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print ("Average loss at step", step, ":", average_loss)
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log = "%s %s," % (log, close_word)
print (log)
final_embeddings = normalized_embeddings.eval()
num_points = 400
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
words = [reverse_dictionary[i] for i in range(1, num_points+1)]
plot(two_d_embeddings, words)
from pyspark import SparkContext
from pyspark.mllib.feature import Word2Vec
#sc = SparkContext(appName='Word2Vec')
inp = sc.textFile("url.txt").map(lambda row: row.split(" "))
word2vec = Word2Vec()
model = word2vec.fit(inp) #Results in exception...
print(model.getVectors)
print(model.getVectors)
model.call
model.findSynonyms
model.load
model.save
model.transform
model.getVectors
sc
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.mllib.feature import Word2Vec
USAGE = ("bin/spark-submit --driver-memory 4g "
"examples/src/main/python/mllib/word2vec.py text8_lines")
if __name__ == "__main__":
if len(sys.argv) < 2:
print(USAGE)
sys.exit("Argument for file not provided")
file_path = sys.argv[1]
file_path="url.txt"
# sc = SparkContext(appName='Word2Vec')
inp = sc.textFile(file_path).map(lambda row: row.split(" "))
word2vec = Word2Vec()
model = word2vec.fit(inp)
synonyms = model.findSynonyms('1', 5)
for word, cosine_distance in synonyms:
print("{}: {}".format(word, cosine_distance))
sc.stop()
from pyspark.mllib.feature import HashingTF, IDF
# Load documents (one per line).
documents = sc.textFile("url.txt").map(lambda line: line.split(" "))
hashingTF = HashingTF()
tf = hashingTF.transform(documents)
# While applying HashingTF only needs a single pass to the data, applying IDF needs two passes:
# First to compute the IDF vector and second to scale the term frequencies by IDF.
tf.cache()
idf = IDF().fit(tf)
tfidf = idf.transform(tf)
# spark.mllib's IDF implementation provides an option for ignoring terms
# which occur in less than a minimum number of documents.
# In such cases, the IDF for these terms is set to 0.
# This feature can be used by passing the minDocFreq value to the IDF constructor.
idfIgnore = IDF(minDocFreq=2).fit(tf)
tfidfIgnore = idfIgnore.transform(tf)
from pyspark.mllib.feature import Word2Vec
inp = sc.textFile("data/mllib/sample_lda_data.txt").map(lambda row: row.split(" "))
word2vec = Word2Vec()
model = word2vec.fit(inp)
synonyms = model.findSynonyms('1', 5)
for word, cosine_distance in synonyms:
print("{}: {}".format(word, cosine_distance)) | 5_word2vec.ipynb | recepkabatas/Spark | apache-2.0 |
The above algorithm is known as "trial by division".
Keep track of all primes discovered so far, and test divide them, in increasing order, into a candidate number, until:
(A) either one of the primes goes evenly, in which case move on to the next odd
or
(B) until we know our candidate is a next prime, in which case yield it and append it to the growing list.
If we get passed the 2nd root of the candidate, we conclude no larger factor will work, as we would have encountered it already as the smaller of the factor pair.
Passing this 2nd root milestone triggers plan B. Then we advance to the next candidate, ad infinitum.
Python pauses at each yield statement however, handing control back to the calling sequence, in this case a "list comprehension" containing a next() function for advancing to the next yield.
Coprimes, Totatives, and the Totient of a Number
From here, we jump to the idea of numbers being coprime to one another. A synonym for coprime is "stranger." Given two ordinary positive integers, they're strangers if they have no prime factors in common. For that to be true, they'd have no shared factors at all (not counting 1).
Guido van Rossum, the inventor of Python, gives us a pretty little implementation of what's known as Euclid's Method, an algorithm that's thousands of years old. It'll find the largest factor any two numbers have in common (gcd = "greatest common divisor").
Here it is: | def gcd(a, b):
while b:
a, b = b, a % b
return a
print(gcd(81, 18))
print(gcd(12, 44))
print(gcd(117, 17)) # strangers | Silicon Forest Math Series | RSA.ipynb | 4dsolutions/Python5 | mit |
How does Euclid's Method work? That's a great question and one your teacher should be able to explain. First see if you might figure it out for yourself...
Here's one explanation:
If a smaller number divides a larger one without remainder then we're done, and that will always happen when that smaller number is 1 if not before.
If there is a remainder, what then? Lets work through an example.
81 % 18 returns a remainder of 9 in the first cycle. 18 didn't go into 81 evenly but if another smaller number goes into both 9, the remainder, and 18, then we have our answer.
9 itself does the trick and we're done. | print(81 % 18) # 18 goes into
print(18 % 9) # so the new b becomes the answer | Silicon Forest Math Series | RSA.ipynb | 4dsolutions/Python5 | mit |
Suppose we had asked for gcd(18, 81) instead? 18 is the remainder (no 81s go into it) whereas b was 81, so the while loop simply flips the two numbers around to give the example above.
The gcd function now gives us the means to compute totients and totatives of a number. The totatives of N are the strangers less than N, whereas the totient is the number of such strangers. | def totatives(N):
# list comprehension!
return [x for x in range(1,N) if gcd(x,N)==1] # strangers only
def T(N):
"""
Returns the number of numbers between (1, N) that
have no factors in common with N: called the
'totient of N' (sometimes phi is used in the docs)
"""
return len(totatives(N)) # how many strangers did we find?
print("Totient of 100:", T(100))
print("Totient of 1000:", T(1000)) | Silicon Forest Math Series | RSA.ipynb | 4dsolutions/Python5 | mit |
Where to go next is in the direction of Euler's Theorem, a generalization of Fermat's Little Theorem. The built-in pow(m, n, N) function will raise m to the n modulo N in an efficient manner. | def powers(N):
totient = T(N)
print("Totient of {}:".format(N), totient)
for t in totatives(N):
values = [pow(t, n, N) for n in range(totient + 1)]
cycle = values[:values.index(1, 1)] # first 1 after initial 1
print("{:>2}".format(len(cycle)), cycle)
powers(17) | Silicon Forest Math Series | RSA.ipynb | 4dsolutions/Python5 | mit |
Above we see repeating cycles of numbers, with the length of the cycles all dividing 16, the totient of the prime number 17.
pow(14, 2, 17) is 9, pow(14, 3, 17) is 7, and so on, coming back around the 14 at pow(14, 17, 17) where 17 is 1 modulo 16.
Numbers raised to any kth power modulo N, where k is 1 modulo the totient of N, end up staying the same number. For example, pow(m, (n * T(N)) + 1, N) == m for any n. | from random import randint
def check(N):
totient = T(N)
for t in totatives(N):
n = randint(1, 10)
print(t, pow(t, (n * totient) + 1, N))
check(17) | Silicon Forest Math Series | RSA.ipynb | 4dsolutions/Python5 | mit |
In public key cryptography, RSA in particular, a gigantic composite N is formed from two primes p and q.
N's totient will then be (p - 1) * (q - 1). For example if N = 17 * 23 (both primes) then T(N) = 16 * 22. | p = 17
q = 23
T(p*q) == (p-1)*(q-1) | Silicon Forest Math Series | RSA.ipynb | 4dsolutions/Python5 | mit |
From this totient, we'll be able to find pairs (e, d) such that (e * d) modulo T(N) == 1.
We may find d, given e and T(N), by means of the Extended Euclidean Algorithm (xgcd below).
Raising some numeric message m to the eth power modulo N will encrypt the message, giving c.
Raising the encrypted message c to the dth power will cycle it back around to its starting value, thereby decrypting it.
c = pow(m, e, N)
m = pow(c, d, N)
where (e * d) % T(N) == 1.
For example: | p = 37975227936943673922808872755445627854565536638199
q = 40094690950920881030683735292761468389214899724061
RSA_100 = p * q
totient = (p - 1) * (q - 1)
# https://en.wikibooks.org/wiki/
# Algorithm_Implementation/Mathematics/
# Extended_Euclidean_algorithm
def xgcd(b, n):
x0, x1, y0, y1 = 1, 0, 0, 1
while n != 0:
q, b, n = b // n, n, b % n
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return b, x0, y0
# x = mulinv(b) mod n, (x * b) % n == 1
def mulinv(b, n):
g, x, _ = xgcd(b, n)
if g == 1:
return x % n
e = 3
d = mulinv(e, totient)
print((e*d) % totient)
import binascii
m = int(binascii.hexlify(b"I'm a secret"), 16)
print(m) # decimal encoding of byte string
c = pow(m, e, RSA_100) # raise to eth power
print(c)
m = pow(c, d, RSA_100) # raise to dth power
print(m)
binascii.unhexlify(hex(m)[2:]) # m is back where we started. | Silicon Forest Math Series | RSA.ipynb | 4dsolutions/Python5 | mit |
La solution précédente est valide mais elle a l'inconvénient d'utiliser un indice en passage de paramètre, indice qui doit systématiquement être fixé à 0 lors de l'appel de la méthode. Une solution consisterait à "encapsuler" l'appel de cette méthode dans une autre méthode qui aurait une spécification plus simple, mais c'est un peu "trop facile". La solution ci-dessous est nettement plus élégante, même si elle a l'inconvénient de construire une nouvelle chaîne de caractères à chaque appel récursif. | def rechercheRecursiveBis(chaine,carac):
'''
:entree: chaine (string)
:entree: caractère (string)
:sortie: present (booleen)
:pre-conditions: carac doit être un caractère seul, la chaîne peut être vide.
:post-condions: le booléen est fixé à vrai si la chaîne contient le caractère et à faux sinon (y compris dans le cas de la chaîne vide)
>>> rechercheRecursiveBis("Bonjour", "j")
True
>>> rechercheRecursiveBis("Bonjour", "r")
True
>>> rechercheRecursiveBis("", "j")
False
>>> rechercheRecursiveBis("Bonjour", "a")
False
'''
if len(chaine)==0:
a=False
elif chaine[0]==carac:
a=True
else:
a=rechercheRecursiveBis(chaine[1:],carac)
return a
print(rechercheRecursiveBis("bonjour",'a'))
print(rechercheRecursiveBis("bonjour",'j'))
print(rechercheRecursiveBis("bonjour",'r'))
print(rechercheRecursiveBis("","r"))
| 2015-12-03 - TD16 - Récursivité et tableaux.ipynb | ameliecordier/iutdoua-info_algo2015 | cc0-1.0 |
Exercice 2. Écrire une méthode récursive pour calculer la somme des éléments d'une liste. Vous écrirez également le contrat. | def sommeRec(l):
'''
:entree l: une liste de nombres (entiers ou flottants)
:sortie somme: la somme des éléments de la liste
:pre-conditions: la liste peut être vide
:post-condition: somme contient la somme des éléments de la liste, et est donc du même type que les éléments.
>>> sommeRec([1, 2, 3])
6
>>> sommeRec([])
0
>>> sommeRec([6, 42.2, 34])
82.2
'''
somme=0
if len(l)>1:
somme=l[0]+sommeRec(l[1:])
elif len(l)==1:
somme+=l[0]
return somme
print(sommeRec([1, 2, 3]))
print(sommeRec([]))
print(sommeRec([6, 42.2, 34]))
%doctest sommeRec | 2015-12-03 - TD16 - Récursivité et tableaux.ipynb | ameliecordier/iutdoua-info_algo2015 | cc0-1.0 |
Encore une fois, la solution ci-dessus est correcte mais elle est loin d'être "simple" et facile à lire pour quelqu'un d'autre que celui ou celle qui a écrit l'algorithme. On va donc (ci-dessous) proposer une ré-écriture plus simple. | def sommeRecBis(tab):
'''
:entree l: une liste de nombres (entiers ou flottants)
:sortie somme: la somme des éléments de la liste
:pre-conditions: la liste peut être vide
:post-condition: somme contient la somme des éléments de la liste, et est donc du même type que les éléments.
>>> sommeRecBis([1, 2, 3])
6
>>> sommeRecBis([])
0
>>> sommeRecBis([6, 42.2, 34])
82.2
'''
if len(tab) == 0:
somme = 0
else:
somme = tab[0]+sommeRecBis(tab[1:])
return somme
print(sommeRecBis([1, 2, 3]))
print(sommeRecBis([]))
print(sommeRecBis([6, 42.2, 34]))
%doctest sommeRecBis | 2015-12-03 - TD16 - Récursivité et tableaux.ipynb | ameliecordier/iutdoua-info_algo2015 | cc0-1.0 |
Exerice 3. Écrire un algorithme qui permet de rechercher un nombre dans un tableau trié. Proposez une solution récursive et une solution non récursive. | def rechercheTab(tab,a):
'''
:entree tab: un tableau de nombres (entiers ou flottants) triés
:entree a: le nombre recherché
:sortie i: l'indice de la case du tableau dans laquelle se trouve le nombre.
:pré-conditions: le tableau est trié par ordre croissant de valeur.
:post-condition: l'indice de la première occurrence trouvée du nombre est renvoyé.
Si le nombre n'est pas présent dans le tableau, on retourne -1.
:Remarque : on appelle ce type de recherche "recherche par dichotomie".
>>> rechercheTab([0,1,2,3,4],1)
1
'''
i=-1
b=len(tab)//2
if tab[b]==a:
i=b
elif b == 0:
i = -1
elif tab[b]>a: # Si la valeur du milieu du tableau est plus grande que la valeur recherchée, on recherche dans la partie gauche du tableau
i=rechercheTab(tab[:b],a)
else: # Sinon, on recherche dans la partie droite et on gère le décalage des indices.
i=rechercheTab(tab[b:],a)
if i != -1:
i = i+b
return i
print(rechercheTab([0,1,2,3,4],1))
print(rechercheTab([0,1,2,3,4],5))
print(rechercheTab([0,1,2,3,4],4))
print(rechercheTab([0,1.3,2.7,3.4],0))
| 2015-12-03 - TD16 - Récursivité et tableaux.ipynb | ameliecordier/iutdoua-info_algo2015 | cc0-1.0 |
From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship:
- Survived: Outcome of survival (0 = No; 1 = Yes)
- Pclass: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class)
- Name: Name of passenger
- Sex: Sex of the passenger
- Age: Age of the passenger (Some entries contain NaN)
- SibSp: Number of siblings and spouses of the passenger aboard
- Parch: Number of parents and children of the passenger aboard
- Ticket: Ticket number of the passenger
- Fare: Fare paid by the passenger
- Cabin Cabin number of the passenger (Some entries contain NaN)
- Embarked: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton)
Since we're interested in the outcome of survival for each passenger or crew member, we can remove the Survived feature from this dataset and store it as its own separate variable outcomes. | # Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head()) | Titanic_Survival_Exploration/Titanic_Survival_Exploration-V1.ipynb | pushpajnc/models | mit |
The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcome[i].
To measure the performance of our predictions, we need a metric to score our predictions against the true outcomes of survival. Since we are interested in how accurate our predictions are, we will calculate the proportion of passengers where our prediction of their survival is correct. | def accuracy_score(truth, pred):
""" Returns accuracy score for input truth and predictions. """
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions) | Titanic_Survival_Exploration/Titanic_Survival_Exploration-V1.ipynb | pushpajnc/models | mit |
Making Predictions
If we were asked to make a prediction about any passenger aboard the RMS Titanic whom we knew nothing about, then the best prediction we could make would be that they did not survive. This is because we can assume that a majority of the passengers (more than 50%) did not survive the ship sinking.
The predictions_0 function below will always predict that a passenger did not survive. | def predictions_0(data):
""" Model with no features. Always predicts a passenger did not survive. """
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
print accuracy_score(outcomes, predictions) | Titanic_Survival_Exploration/Titanic_Survival_Exploration-V1.ipynb | pushpajnc/models | mit |
Using the RMS Titanic data, a prediction would be 61.62% accurate that none of the passengers survived.
Let's take a look at whether the feature Sex has any indication of survival rates among passengers using the survival_stats function. This function is defined in the titanic_visualizations.py. The first two parameters passed to the function are the RMS Titanic data and passenger survival outcomes, respectively. The third parameter indicates which feature we want to plot survival statistics across. | survival_stats(data, outcomes, 'Sex') | Titanic_Survival_Exploration/Titanic_Survival_Exploration-V1.ipynb | pushpajnc/models | mit |
Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction: If a passenger was female, then we will predict that they survived. Otherwise, we will predict the passenger did not survive. | def predictions_1(data):
""" Model with one feature:
- Predict a passenger survived if they are female. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if(passenger['Sex'] == 'female'):
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
print accuracy_score(outcomes, predictions) | Titanic_Survival_Exploration/Titanic_Survival_Exploration-V1.ipynb | pushpajnc/models | mit |
Therefore, the prediction that all female passengers survived and the remaining passengers did not survive, would be 78.68% accurate.
Using just the Sex feature for each passenger, we are able to increase the accuracy of our predictions by a significant margin. Now, let's consider using an additional feature to see if we can further improve our predictions. For example, consider all of the male passengers aboard the RMS Titanic: Can we find a subset of those passengers that had a higher rate of survival? Let's start by looking at the Age of each male, by again using the survival_stats function. This time, we'll use a fourth parameter to filter out the data so that only passengers with the Sex 'male' will be included. | survival_stats(data, outcomes, 'Age', ["Sex == 'male'"]) | Titanic_Survival_Exploration/Titanic_Survival_Exploration-V1.ipynb | pushpajnc/models | mit |
Examining the survival statistics, the majority of males younger then 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction: If a passenger was female, then we will predict they survive. If a passenger was male and younger than 10, then we will also predict they survive. Otherwise, we will predict they do not survive. | def predictions_2(data):
""" Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'female':
predictions.append(1)
elif passenger['Age'] < 10:
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data) | Titanic_Survival_Exploration/Titanic_Survival_Exploration-V1.ipynb | pushpajnc/models | mit |
Prediction: all female passengers and all male passengers younger than 10 survived | print accuracy_score(outcomes, predictions) | Titanic_Survival_Exploration/Titanic_Survival_Exploration-V1.ipynb | pushpajnc/models | mit |
Thus, the accuracy increases with above prediction to 79.35%
Adding the feature Age as a condition in conjunction with Sex improves the accuracy by a small margin more than with simply using the feature Sex alone. | survival_stats(data, outcomes, 'Sex')
survival_stats(data, outcomes, 'Pclass')
survival_stats(data, outcomes, 'Pclass',["Sex == 'female'"])
survival_stats(data, outcomes, 'SibSp', ["Sex == 'female'", "Pclass == 3"])
survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Age < 18"])
survival_stats(data, outcomes, 'Pclass', ["Sex == 'male'", "Age < 15"])
survival_stats(data, outcomes, 'Age',["Sex == 'female'"])
survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Pclass == 1"] )
survival_stats(data, outcomes, 'Sex', ["Age < 10", "Pclass == 1"] )
survival_stats(data, outcomes, 'SibSp', ["Sex == 'male'"])
def predictions_3(data):
""" Model with multiple features. Makes a prediction with an accuracy of at least 80%. """
predictions = []
for _, passenger in data.iterrows():
if ( 'Master' in passenger['Name'] and np.isnan(passenger['Age'])) :
predictions.append(1)
continue
if ( passenger['Sex'] == 'male' and passenger['Age'] > 20
and passenger['Age'] < 41 and passenger['Pclass'] == 1) :
predictions.append(1)
continue
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'female':
if(passenger['Pclass'] < 3):
predictions.append(1)
elif passenger['SibSp'] < 1:
predictions.append(1)
else:
predictions.append(0)
elif (passenger['Age'] < 10):
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
print len(predictions)
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
print accuracy_score(outcomes, predictions) | Titanic_Survival_Exploration/Titanic_Survival_Exploration-V1.ipynb | pushpajnc/models | mit |
Exercise 02.1
Split the training set in two sets with 70% and 30% of the data, respectively.
Partir la base de datos es dos partes de 70% |
# Insert code here
random_sample = np.random.rand(n_obs)
X_train, X_test = X[random_sample<0.6], X[random_sample>=0.6]
Y_train, Y_test = Y[random_sample<0.6], Y[random_sample>=0.6]
print(Y_train.shape, Y_test.shape) | exercises/02-Churn model-solution.ipynb | MonicaGutierrez/PracticalMachineLearningClass | mit |
Exercise 02.2
Train a logistic regression using the 70% set
Entrenar una regresion logistica usando la particion del 70% |
# Insert code here
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(X_train, Y_train) | exercises/02-Churn model-solution.ipynb | MonicaGutierrez/PracticalMachineLearningClass | mit |
Exercise 02.3
a) Create a confusion matrix using the prediction on the 30% set.
b) Estimate the accuracy of the model in the 30% set
a) Estimar la matriz de confusion en la base del 30%.
b) Calcular el poder de prediccion usando la base del 30%. | # Insert code here
y_pred = clf.predict(X_test)
from sklearn.metrics import confusion_matrix
confusion_matrix(Y_test, y_pred)
(Y_test == y_pred).mean() | exercises/02-Churn model-solution.ipynb | MonicaGutierrez/PracticalMachineLearningClass | mit |
Below I'm running images through the VGG network in batches.
Exercise: Below, build the VGG network. Also get the codes from the first fully connected layer (make sure you get the ReLUd values). | # Set the batch size higher if you can fit in in your GPU memory
batch_size = 30
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
# TODO: Build the vgg network here
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# TODO: Get the values from the relu6 layer of the VGG network
feed_dict = {input_: images}
codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels) | transfer-learning/Transfer_Learning.ipynb | JJINDAHOUSE/deep-learning | mit |
Data prep
As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels!
Exercise: From scikit-learn, use LabelBinarizer to create one-hot encoded vectors from the labels. | from sklearn.preprocessing import LabelBinarizer
# Your one-hot encoded labels array here
lb = LabelBinarizer()
lb.fit(labels)
labels_vecs = lb.transform(labels) | transfer-learning/Transfer_Learning.ipynb | JJINDAHOUSE/deep-learning | mit |
Training
Here, we'll train the network.
Exercise: So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. Use the get_batches function I wrote before to get your batches like for x, y in get_batches(train_x, train_y). Or write your own! | epochs = 10
iteration = 0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in get_batches(train_x, train_y):
feed = {
inputs_: x,
labels_: y
}
loss, _ = sess.run([cost, optimizer], feed_dict = feed)
print("Epoch: {}/{}".format(e+1, epochs),
"Iteration:{}".format(iteration),
"Training loss: {:.5f}".format(loss))
iteration += 1
if iteration % 5 == 0:
feed = {inputs_: val_x,
labels_: val_y}
val_acc = sess.run(accuracy, feed_dict=feed)
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Validation Acc: {:.4f}".format(val_acc))
# TODO: Your training code here
saver.save(sess, "checkpoints/flowers.ckpt") | transfer-learning/Transfer_Learning.ipynb | JJINDAHOUSE/deep-learning | mit |
Getting insights
Retrieve your project thanks to its name.
Build your first model, on the central table, from the default schema. | prj = PredicSis.project('Outbound Mail Campaign') | 23.how_to_build_a_first_model_SDK/Build your first model.ipynb | jeanbaptistepriez/predicsis-ai-faq-tuto | gpl-3.0 |
Build a model from the default schema | mdl = prj.default_schema().fit('My first model')
mdl.auc() | 23.how_to_build_a_first_model_SDK/Build your first model.ipynb | jeanbaptistepriez/predicsis-ai-faq-tuto | gpl-3.0 |
In the cell above, I have loaded two datasets. The first dataset "reviews" is a list of 25,000 movie reviews that people wrote about various movies. The second dataset is a list of whether or not each review is a “positive” review or “negative” review. | reviews[0]
labels[0] | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
I want you to pretend that you’re a neural network for a moment. Consider a few examples from the two datasets below. Do you see any correlation between these two datasets? | print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
Well, let’s consider several different granularities. At the paragraph level, no two paragraphs are the same, so there can be no “correlation” per-say. You have to see two things occur at the same time more than once in order for there to be considered “correlation”. What about at the character level? I’m guessing the letter “b” is used just as much in positive reviews as it is in negative reviews. How about word level? Ah, I think there's some correlation between the words in these reviews and whether or not the review is positive or negative. | from collections import Counter
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 10):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30] | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
Wow, there’s really something to this theory! As we can see, there are clearly terms in movie reviews that have correlation with our output labels. So, if we think there might be strong correlation between the words present in a particular review and the sentiment of that review, what should our network take as input and then predict? Let me put it a different way: If we think that there is correlation between the “vocabulary” of a particular review and the sentiment of that review, what should be the input and output to our neural network? The input should be the “vocabulary of the review” and the output should be whether or not the review is positive or negative!
Now that we have some idea that this task is possible (and where we want the network to find correlation), let’s try to train a neural network to predict sentiment based on the vocabulary of a movie review.
Transforming Text to Numbers
The next challenge is to transform our datasets into something that the neural network can read.
As I’m sure you’ve learned, neural networks are made up of layers of interconnected “neurons”. The first layer is where our input data “goes in” to the network. Any particular “input neuron” can take exactly two kinds of inputs, binary inputs and “real valued” inputs. Previously, you’ve been training networks on raw, continuous data, real valued inputs. However, now we’re modeling whether different input terms “exist” or “do not exist” in a movie review. When we model something that either “exists” or “does not exiest” or when something is either “true” or “false”, we want to use “binary” inputs to our neural network. This use of binary values is called "one-hot encoding". Let me show you what I mean.
Example Predictions | from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png') | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
The Input
Let’s say our entire movie review corpus has 10,000 words. Given a single movie review ("This was a horrible, terrible movie"), we’re going to put a “1” in the input of our neural network for every word that exists in the review, and a 0 everywhere else. So, given our 10,000 words, a movie review with 6 words would have 6 neurons with a “1” and 9,994 neurons with a “0”. The picture above is a miniturized version of this, displaying how we input a "1" for the words "horrible" and "terrible" while inputting a "0" for the word "excellent" because it was not present in the review.
The Output
In the same way, we want our network to either predict that the input is “positive” or “negative”. Now, our networks can’t write “positive” or “negative”, so we’re going to instead have another single neuron that represents “positive” when it is a “1” and “negative” when it is a “0”. In this way, our network can give us a number that we will interpret as “positive” or “negative”.
Big Picture
What we’re actually doing here is creating a “derivative dataset” from our movie reviews. Neural networks, after all, can’t read text. So, what we’re doing is identifying the “source of correlation” in our two datasets and creating a derivative dataset made up of numbers that preserve the patterns that we care about. In our input dataset, that pattern is the existence or non-existence of a particular word. In our output dataset, that pattern is whether a statement is positive or negative. Now we’ve converted our patterns into something our network can understand! Our network is going to look for correlation between the 1s and 0s in our input and the 1s and 0s in our output, and if it can do so it has learned to predict the sentiment of movie reviews. Now that our data is ready for the network, let’s start building the network.
Creating the Input Data
As we just learned above, in order for our neural network to predict on a movie review, we have to be able to create an input layer of 1s and 0s that correlates with the words present in a review. Let's start by creating a function that can take a review and generate this layer of 1s and 0s.
In order to create this function, we first must decide how many input neurons we need. The answer is quite simple. Since we want our network's input to be able to represent the presence or absence of any word in the vocabulary, we need one node per vocabulary term. So, our input layer size is the size of our vocabulary. Let's calculate that. | vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
Creating the Target Data
And now we want to do the same thing for our target predictions | def get_target_for_label(label):
if(label == 'POSITIVE'):
return 1
else:
return 0
get_target_for_label(labels[0])
get_target_for_label(labels[1]) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
Making our Network Train and Run Faster
Even though this network is very trainable on a laptop, we can really get a lot more performance out of it, and doing so is all about understanding how the neural network is interacting with our data (again, "modeling the problem"). Let's take a moment to consider how layer_1 is generated. First, we're going to create a smaller layer_0 so that we can easily picture all the values in our notebook. | layer_0 = np.zeros(10)
layer_0 | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
Now, let's set a few of the inputs to 1s, and create a sample weight matrix | layer_0[4] = 1
layer_0[9] = 1
layer_0
weights_0_1 = np.random.randn(10,5) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
So, given these pieces, layer_1 is created in the following way.... | layer_1 = layer_0.dot(weights_0_1)
layer_1 | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
layer_1 is generated by performing vector->matrix multiplication, however, most of our input neurons are turned off! Thus, there's actually a lot of computation being wasted. Consider the network below. | Image(filename='sentiment_network_sparse.png') | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
First Inefficiency: "0" neurons waste computation
If you recall from previous lessons, each edge from one neuron to another represents a single value in our weights_0_1 matrix. When we forward propagate, we take our input neuron's value, multiply it by each weight attached to that neuron, and then sum all the resulting values in the next layer. So, in this case, if only "excellent" was turned on, then all of the multiplications comein gout of "horrible" and "terrible" are wasted computation! All of the weights coming out of "horrible" and "terrible" are being multiplied by 0, thus having no affect on our values in layer_1. | Image(filename='sentiment_network_sparse_2.png') | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
Second Inefficiency: "1" neurons don't need to multiply!
When we're forward propagating, we multiply our input neuron's value by the weights attached to it. However, in this case, when the neuron is turned on, it's always turned on to exactly 1. So, there's no need for multiplication, what if we skipped this step?
The Solution: Create layer_1 by adding the vectors for each word.
Instead of generating a huge layer_0 vector and then performing a full vector->matrix multiplication across our huge weights_0_1 matrix, we can simply sum the rows of weights_0_1 that correspond to the words in our review. The resulting value of layer_1 will be exactly the same as if we had performed a full matrix multiplication at a fraction of the computational cost. This is called a "lookup table" or an "embedding layer". | #inefficient thing we did before
layer_1 = layer_0.dot(weights_0_1)
layer_1
# new, less expensive lookup table version
layer_1 = weights_0_1[4] + weights_0_1[9]
layer_1 | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
See how they generate exactly the same value? Let's update our new neural network to do this. | import time
import sys
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
np.random.seed(1)
self.pre_process_data(reviews)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self,reviews):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
self.layer_1 = np.zeros((1,hidden_nodes))
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def train(self, training_reviews_raw, training_labels):
training_reviews = list()
for review in training_reviews_raw:
indices = set()
for word in review.split(" "):
if(word in self.word2index.keys()):
indices.add(self.word2index[word])
training_reviews.append(list(indices))
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
# Hidden layer
# layer_1 = self.layer_0.dot(self.weights_0_1)
self.layer_1 *= 0
for index in review:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# Update the weights
self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
for index in review:
self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
# Hidden layer
self.layer_1 *= 0
unique_indices = set()
for word in review.lower().split(" "):
if word in self.word2index.keys():
unique_indices.add(self.word2index[word])
for index in unique_indices:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],learning_rate=0.01)
# train the network
mlp.train(reviews[:-1000],labels[:-1000]) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
And wallah! Our network learns 10x faster than before while making exactly the same predictions! | # evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:]) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
Our network even tests over twice as fast as well!
Making Learning Faster & Easier by Reducing Noise
So at first this might seem like the same thing we did in the previous section. However, while the previous section was about looking for computational waste and triming it out, this section is about looking for noise in our data and trimming it out. When we reduce the "noise" in our data, the neural network can identify correlation must faster and with greater accuracy. Whereas our technique will be simple, many recently developed state-of-the-art techniques (most notably attention and batch normalization) are all about reducing the amount of noise that your network has to filter through. The more obvious you can make the correaltion to your neural network, the better.
Our network is looking for correlation between movie review vocabularies and output positive/negative labels. In order to do this, our network has to come to understand over 70,000 different words in our vocabulary! That's a ton of knowledge that the network has to learn!
This begs the questions, are all the words in the vocabulary actually relevant to sentiment? A few pages ago, we counted how often words occured in positive reviews relative to negative reviews and created a ratio. We could then sort words by this ratio and see the words with the most positive and negative affinity. If you remember, the output looked like this: | # words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.io import output_notebook
output_notebook()
hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="Word Positive/Negative Affinity Distribution")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
In this graph "0" means that a word has no affinitity for either positive or negative. AS you can see, the vast majority of our words don't have that much direct affinity! So, our network is having to learn about lots of terms that are likely irrelevant to the final prediction. If we remove some of the most irrelevant words, our network will have fewer words that it has to learn about, allowing it to focus more on the words that matters.
Furthermore, check out this graph of simple word frequency | frequency_frequency = Counter()
for word, cnt in total_counts.most_common():
frequency_frequency[cnt] += 1
hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="The frequency distribution of the words in our corpus")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
As you can see, the vast majority of words in our corpus only happen once or twice. Unfortunately, this isn't enough for any of those words to be correlated with anything. Correlation requires seeing two things occur at the same time on multiple occasions so that you can identify a pattern. We should eliminate these very low frequency terms as well.
In the next network, we eliminate both low frequency words (via a min_count parameters) and words with low positive/negative affiliation | import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,min_count = 10,polarity_cutoff = 0.1,hidden_nodes = 10, learning_rate = 0.1):
np.random.seed(1)
self.pre_process_data(reviews, polarity_cutoff, min_count)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self,reviews, polarity_cutoff,min_count):
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt >= 50):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
review_vocab = set()
for review in reviews:
for word in review.split(" "):
if(total_counts[word] > min_count):
if(word in pos_neg_ratios.keys()):
if((pos_neg_ratios[word] >= polarity_cutoff) or (pos_neg_ratios[word] <= -polarity_cutoff)):
review_vocab.add(word)
else:
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
self.layer_1 = np.zeros((1,hidden_nodes))
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def train(self, training_reviews_raw, training_labels):
training_reviews = list()
for review in training_reviews_raw:
indices = set()
for word in review.split(" "):
if(word in self.word2index.keys()):
indices.add(self.word2index[word])
training_reviews.append(list(indices))
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
# Hidden layer
# layer_1 = self.layer_0.dot(self.weights_0_1)
self.layer_1 *= 0
for index in review:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# Update the weights
self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
for index in review:
self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step
if(layer_2 >= 0.5 and label == 'POSITIVE'):
correct_so_far += 1
if(layer_2 < 0.5 and label == 'NEGATIVE'):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
# Hidden layer
self.layer_1 *= 0
unique_indices = set()
for word in review.lower().split(" "):
if word in self.word2index.keys():
unique_indices.add(self.word2index[word])
for index in unique_indices:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
if(layer_2[0] >= 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:]) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
So, using these techniques, we are able to achieve a slightly higher testing score while training 2x faster than before. Furthermore, if we really crank up these metrics, we can get some pretty extreme speed with minimal loss in quality (if, for example, your business use case requires running very fast) | mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:]) | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
What's Going On in the Weights? | mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
import matplotlib.colors as colors
words_to_visualize = list()
for word, ratio in pos_neg_ratios.most_common(500):
if(word in mlp.word2index.keys()):
words_to_visualize.append(word)
for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:
if(word in mlp.word2index.keys()):
words_to_visualize.append(word)
colors_list = list()
vectors_list = list()
for word in words_to_visualize:
if word in pos_neg_ratios.keys():
vectors_list.append(mlp.weights_0_1[mlp.word2index[word]])
if(pos_neg_ratios[word] > 0):
colors_list.append("#"+colors.rgb2hex([0,min(255,pos_neg_ratios[word] * 1),0])[3:])
else:
colors_list.append("#000000")
# colors_list.append("#"+colors.rgb2hex([0,0,min(255,pos_neg_ratios[word] * 1)])[3:])
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
words_top_ted_tsne = tsne.fit_transform(vectors_list)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="vector T-SNE for most polarized words")
source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],
x2=words_top_ted_tsne[:,1],
names=words_to_visualize))
p.scatter(x="x1", y="x2", size=8, source=source,color=colors_list)
word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
# p.add_layout(word_labels)
show(p)
# green indicates positive words, black indicates negative words | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network-checkpoint.ipynb | xpharry/Udacity-DLFoudation | mit |
Question 0:
That plot looks pretty nice but isn't publication-ready. Luckily, matplotlib
has a wide array of plot customizations.
Skim through the first part of the tutorial at
https://www.labri.fr/perso/nrougier/teaching/matplotlib
to create the plot below. There is a lot of extra information there which we suggest
you read on your own time. For now, just look for what you need to make the plot.
Specifically, you'll have to change the x and y limits, add a title, and add
a legend. | plt.plot(xs, ys, label='cosine')
plt.plot(xs, np.sin(xs), label='sine')
plt.xlim(0, 2 * np.pi)
plt.ylim(-1.1, 1.1)
plt.title('Graphs of sin(x) and cos(x)')
plt.legend(loc='lower left', frameon=False)
plt.savefig('q1.png') | sp17/labs/lab03/lab03_solution.ipynb | DS-100/sp17-materials | gpl-3.0 |
seaborn
Now, we'll learn how to use the seaborn Python library. seaborn
is built on top of matplotlib and provides many helpful functions
for statistical plotting that matplotlib and pandas don't have.
Generally speaking, we'll use seaborn for more complex statistical plots,
pandas for simple plots (eg. line / scatter plots), and
matplotlib for plot customization.
Nearly all seaborn functions are designed to operate on pandas
dataframes. Most of these functions assume that the dataframe is in
a specific format called long-form, where each column of the dataframe
is a particular feature and each row of the dataframe a single datapoint.
For example, this dataframe is long-form:
country year avgtemp
1 Sweden 1994 6
2 Denmark 1994 6
3 Norway 1994 3
4 Sweden 1995 5
5 Denmark 1995 8
6 Norway 1995 11
7 Sweden 1996 7
8 Denmark 1996 8
9 Norway 1996 7
But this dataframe of the same data is not:
country avgtemp.1994 avgtemp.1995 avgtemp.1996
1 Sweden 6 5 7
2 Denmark 6 8 8
3 Norway 3 11 7
Note that the bike_trips dataframe is long-form.
For more about long-form data, see https://stanford.edu/~ejdemyr/r-tutorials/wide-and-long.
For now, just remember that we typically prefer long-form data and it makes plotting using
seaborn easy as well.
Question 2:
Use seaborn's barplot function to make a bar chart showing the average
number of registered riders on each day of the week over the
entire bike_trips dataset.
Here's a link to the seaborn API: http://seaborn.pydata.org/api.html
See if you can figure it out by reading the docs and talking with your partner.
Once you have the plot, discuss it with your partner. What trends do you
notice? What do you suppose causes these trends?
Notice that barplot draws error bars for each category. It uses bootstrapping
to make those. | sns.barplot(x='weekday', y='registered', data=bike_trips) | sp17/labs/lab03/lab03_solution.ipynb | DS-100/sp17-materials | gpl-3.0 |
Question 3: Now for a fancier plot that seaborn makes really easy to produce.
Use the distplot function to plot a histogram of all the total rider counts in the
bike_trips dataset. | sns.distplot(bike_trips['cnt']) | sp17/labs/lab03/lab03_solution.ipynb | DS-100/sp17-materials | gpl-3.0 |
Notice that seaborn will fit a curve to the histogram of the data. Fancy!
Question 4: Discuss this plot with your partner. What shape does the distribution
have? What does that imply about the rider counts?
Question 5:
Use seaborn to make side-by-side boxplots of the number of casual riders (just
checked out a bike for that day) and registered riders (have a bikeshare membership).
The boxplot function will plot all the columns of the dataframe you pass in.
Once you make the plot, you'll notice that there are many outliers that make
the plot hard to see. To mitigate this, change the y-scale to be logarithmic.
That's a plot customization so you'll use matplotlib. The boxplot function returns
a matplotlib Axes object which represents a single plot and
has a set_yscale function.
The result should look like: | ax = sns.boxplot(data=bike_trips[['casual', 'registered']])
ax.set_yscale('log')
plt.savefig('q5.png') | sp17/labs/lab03/lab03_solution.ipynb | DS-100/sp17-materials | gpl-3.0 |
Question 6: Discuss with your partner what the plot tells you about the
distribution of casual vs. the distribution of registered riders.
Question 7: Let's take a closer look at the number of registered vs. casual riders.
Use the lmplot function to make a scatterplot. Put the number of casual
riders on the x-axis and the number of registered riders on the y-axis.
Each point should correspond to a single row in your bike_trips dataframe. | sns.lmplot('casual', 'registered', bike_trips) | sp17/labs/lab03/lab03_solution.ipynb | DS-100/sp17-materials | gpl-3.0 |
Question 8: What do you notice about that plot? Discuss with
your partner. Notice that seaborn automatically fits a line of best
fit to the plot. Does that line seem to be relevant?
You should note that lm_plot allows you to pass in fit_line=False to
avoid plotting lines of best fit when you feel they are unnecessary
or misleading.
Question 9: There seem to be two main groups in the scatterplot. Let's
see if we can separate them out.
Use lmplot to make the scatterplot again. This time, use the hue parameter
to color points for weekday trips differently from weekend trips. You should
get something that looks like: | sns.lmplot('casual', 'registered', bike_trips, hue='workingday',
scatter_kws={'s': 6})
plt.savefig('q9.png')
# Note that the legend for workingday isn't super helpful. 0 in this case
# means "not a working day" and 1 means "working day". Try fixing the legend
# to be more descriptive. | sp17/labs/lab03/lab03_solution.ipynb | DS-100/sp17-materials | gpl-3.0 |
Question 10: Discuss the plot with your partner. Was splitting the data
by working day informative? One of the best-fit lines looks valid but the other
doesn't. Why do you suppose that is?
Question 11 (bonus): Eventually, you'll want to be able to pose a
question yourself and answer it using a visualization. Here's a question
you can think about:
How do the number of casual and registered riders change throughout the day,
on average?
See if you can make a plot to answer this. | riders_by_hour = (bike_trips.groupby('hr')
.agg({'casual': 'mean', 'registered': 'mean'}))
riders_by_hour.plot.line() | sp17/labs/lab03/lab03_solution.ipynb | DS-100/sp17-materials | gpl-3.0 |
Want to learn more?
We recommend checking out the seaborn tutorials on your own time. http://seaborn.pydata.org/tutorial.html
The matplotlib tutorial we linked in Question 1 is also a great refresher on common matplotlib functions: https://www.labri.fr/perso/nrougier/teaching/matplotlib/
Here's a great blog post about the differences between Python's visualization libraries:
https://dansaber.wordpress.com/2016/10/02/a-dramatic-tour-through-pythons-data-visualization-landscape-including-ggplot-and-altair/
Submission
Change i_definitely_finished to True and run the cells below to submit the lab. You may resubmit as many times you want. We will be grading you on effort/completion. | i_definitely_finished = True
_ = ok.grade('qcompleted')
_ = ok.backup()
_ = ok.submit() | sp17/labs/lab03/lab03_solution.ipynb | DS-100/sp17-materials | gpl-3.0 |
9. Adaptive learning rate
exponential_decay
consine_decay
linear_cosine_decay
consine_decay_restarts
polynomial decay
piecewise_constant_decay | def create_estimator(params, run_config):
wide_columns, deep_columns = create_feature_columns()
def _update_optimizer(initial_learning_rate, decay_steps):
# learning_rate = tf.train.exponential_decay(
# initial_learning_rate,
# global_step=tf.train.get_global_step(),
# decay_steps=decay_steps,
# decay_rate=0.9
# )
learning_rate = tf.train.cosine_decay_restarts(
initial_learning_rate,
tf.train.get_global_step(),
first_decay_steps=50,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
)
tf.summary.scalar('learning_rate', learning_rate)
return tf.train.AdamOptimizer(learning_rate=learning_rate)
estimator = tf.estimator.DNNLinearCombinedClassifier(
n_classes=len(TARGET_LABELS),
label_vocabulary=TARGET_LABELS,
weight_column=WEIGHT_COLUMN_NAME,
dnn_feature_columns=deep_columns,
dnn_optimizer=lambda: _update_optimizer(params.learning_rate, params.max_steps),
dnn_hidden_units=params.hidden_units,
dnn_dropout=params.dropout,
batch_norm=True,
linear_feature_columns=wide_columns,
linear_optimizer='Ftrl',
config=run_config
)
return estimator
params.learning_rate = 0.1
params.max_steps = 1000
run_config = tf.estimator.RunConfig(
tf_random_seed=RANDOM_SEED,
save_checkpoints_steps=200,
model_dir=model_dir,
)
if COLAB:
from tensorboardcolab import *
TensorBoardColab(graph_path=model_dir)
estimator = create_estimator(params, run_config)
run_experiment(estimator, params, run_config) | 00_Miscellaneous/tfx/01_tf_estimator_deepdive.ipynb | GoogleCloudPlatform/tf-estimator-tutorials | apache-2.0 |
Coordinate transformations
Coordinates in astronomy often come in equatorial coordinates, specified by right ascension (RA) and declination (DEC). | import astropy.coordinates as coord
c1 = coord.SkyCoord(ra=150*u.degree, dec=-17*u.degree)
c2 = coord.SkyCoord(ra='21:15:32.141', dec=-17*u.degree, unit=(u.hourangle,u.degree)) | day3/Astropy-Demo.ipynb | timothydmorton/usrp-sciprog | mit |
If we wanted this coordinate on the celestial sphere to another system (of the celestial sphere), which is tied to our Galaxy, we can do this: | c1.transform_to(coord.Galactic) | day3/Astropy-Demo.ipynb | timothydmorton/usrp-sciprog | mit |
Note: It may take a 4-5 minutes to see result of different batches.
MobileNetV2
These flower photos are much larger than handwritting recognition images in MNIST. They are about 10 times as many pixels per axis and there are three color channels, making the information here over 200 times larger!
How do our current techniques stand up? Copy your best model architecture over from the <a href="2_mnist_models.ipynb">MNIST models lab</a> and see how well it does after training for 5 epochs of 50 steps.
TODO 2.a Copy over the most accurate model from 2_mnist_models.ipynb or build a new CNN Keras model. | eval_path = "gs://cloud-ml-data/img/flower_photos/eval_set.csv"
nclasses = len(CLASS_NAMES)
hidden_layer_1_neurons = 400
hidden_layer_2_neurons = 100
dropout_rate = 0.25
num_filters_1 = 64
kernel_size_1 = 3
pooling_size_1 = 2
num_filters_2 = 32
kernel_size_2 = 3
pooling_size_2 = 2
layers = [
Conv2D(num_filters_1, kernel_size=kernel_size_1,
activation='relu',
input_shape=(IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS)),
MaxPooling2D(pooling_size_1),
Conv2D(num_filters_2, kernel_size=kernel_size_2,
activation='relu'),
MaxPooling2D(pooling_size_2),
Flatten(),
Dense(hidden_layer_1_neurons, activation='relu'),
Dense(hidden_layer_2_neurons, activation='relu'),
Dropout(dropout_rate),
Dense(nclasses),
Softmax()
]
old_model = Sequential(layers)
old_model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_ds = load_dataset(train_path, BATCH_SIZE)
eval_ds = load_dataset(eval_path, BATCH_SIZE, training=False)
old_model.fit_generator(
train_ds,
epochs=5,
steps_per_epoch=5,
validation_data=eval_ds,
validation_steps=VALIDATION_STEPS
) | courses/machine_learning/deepdive2/image_classification/solutions/3_tf_hub_transfer_learning.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
If your model is like mine, it learns a little bit, slightly better then random, but ugh, it's too slow! With a batch size of 32, 5 epochs of 5 steps is only getting through about a quarter of our images. Not to mention, this is a much larger problem then MNIST, so wouldn't we need a larger model? But how big do we need to make it?
Enter Transfer Learning. Why not take advantage of someone else's hard work? We can take the layers of a model that's been trained on a similar problem to ours and splice it into our own model.
Tensorflow Hub is a database of models, many of which can be used for Transfer Learning. We'll use a model called MobileNet which is an architecture optimized for image classification on mobile devices, which can be done with TensorFlow Lite. Let's compare how a model trained on ImageNet data compares to one built from scratch.
The tensorflow_hub python package has a function to include a Hub model as a layer in Keras. We'll set the weights of this model as un-trainable. Even though this is a compressed version of full scale image classification models, it still has over four hundred thousand paramaters! Training all these would not only add to our computation, but it is also prone to over-fitting. We'll add some L2 regularization and Dropout to prevent that from happening to our trainable weights.
TODO 2.b: Add a Hub Keras Layer at the top of the model using the handle provided. | module_selection = "mobilenet_v2_100_224"
module_handle = "https://tfhub.dev/google/imagenet/{}/feature_vector/4" \
.format(module_selection)
transfer_model = tf.keras.Sequential([
hub.KerasLayer(module_handle, trainable=False),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Dense(
nclasses,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(0.0001))
])
transfer_model.build((None,)+(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
transfer_model.summary() | courses/machine_learning/deepdive2/image_classification/solutions/3_tf_hub_transfer_learning.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
Data Preparation
For the first iteration, we'll only use data after 2009. This is when most modern statistics began to be kept (though not all of them did). | model_data = matches[matches['season'] >= 2010] | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
To keep model simple, exclude draws. Mark them as victories for the away team instead. | for idx, row in model_data.iterrows():
if row['winner'] == 'draw':
model_data.at[idx,'winner'] = 'away' | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
We want to split the data into test and train in a stratified manner, i.e. we don't want to favour a certain season, or a part of the season. So we'll take a portion (25%) of games from each round. | # How many games do we get per round?
round_counts = {}
curr_round = 1
matches_in_round = 0
for idx,row in model_data.iterrows():
if curr_round != row['round']:
if matches_in_round not in round_counts:
round_counts[matches_in_round] = 1
else:
round_counts[matches_in_round] += 1
curr_round = row['round']
matches_in_round = 1
continue
else:
matches_in_round += 1
round_counts
# Taking a minimum 25% of each round
from math import ceil
test_sample_size = {}
for num_games in round_counts:
test_sample_size[num_games] = ceil(num_games/4)
rounds_in_season = get_season_rounds(model_data)
teams_in_season = get_season_teams(model_data) | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Create test and training data | # test set
from copy import deepcopy
test_data = pd.DataFrame()
for season, max_round in rounds_in_season.items():
for rnd in range(1, max_round):
round_matches = model_data[(model_data['season']==season) & (model_data['round']==rnd)]
num_test = test_sample_size[len(round_matches)]
round_test_set = round_matches.sample(num_test)
test_data = test_data.append(round_test_set)
# training set
training_data = model_data.drop(test_data.index) | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Capture all of the 'diff' columns in the model, too | diff_cols = [col for col in model_data.columns if col[0:4] == 'diff'] | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Define features | features = [col
for col
in ['h_career_' + col for col in player_cols_to_agg] + \
['h_season_' + col for col in player_cols_to_agg] + \
['a_career_' + col for col in player_cols_to_agg] + \
['a_season_' + col for col in player_cols_to_agg] + \
['h_' + col for col in ladder_cols] + \
['h_' + col + '_form' for col in ladder_cols] + \
['a_' + col for col in ladder_cols] + \
['a_' + col + '_form' for col in ladder_cols] + \
['h_career_' + col for col in misc_columns] + \
['h_season_' + col for col in misc_columns] + \
['a_career_' + col for col in misc_columns] + \
['a_season_' + col for col in misc_columns] + \
diff_cols
]
# REMOVE PERCENTAGE FOR NOW
features.remove('h_percentage')
features.remove('a_percentage')
features.remove('diff_percentage')
target = 'winner' | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Set up test and train datasets | X_train = training_data[features]
y_train = training_data[target]
X_test = test_data[features]
y_test = test_data[target] | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Fill the NaN values | X_train.fillna(0,inplace=True)
y_train.fillna(0,inplace=True)
X_test.fillna(0,inplace=True)
y_test.fillna(0,inplace=True) | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Modelling
Model 1: Logistic regression | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
import numpy as np
log_reg = LogisticRegression()
param_grid = {
'tol': [.0001, .001, .01],
'C': [.1, 1, 10],
'max_iter': [50,100,200]
}
grid_log_reg = GridSearchCV(log_reg, param_grid, cv=5)
grid_log_reg.fit(X_train, y_train)
grid_log_reg.score(X_train,y_train)
grid_log_reg.score(X_test,y_test)
# Confirm that it's not just picking the home team
print(sum(grid_log_reg.predict(X_test)=='away'))
print(sum(grid_log_reg.predict(X_test)=='home')) | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Model 2: using less features | diff_cols = [col for col in model_data.columns if col[0:4] == 'diff']
features = diff_cols
# REMOVE PERCENTAGE FOR NOW
diff_cols.remove('diff_percentage')
target = 'winner'
X_train_2 = training_data[diff_cols]
y_train_2 = training_data[target]
X_test_2 = test_data[diff_cols]
y_test_2 = test_data[target]
#X_train_2 = X_train_2[features]
#y_train_2 = y_train_2[features]
#X_test_2 = X_test_2[features]
#y_test_2 = y_test_2[features]
X_train_2.fillna(0,inplace=True)
y_train_2.fillna(0,inplace=True)
X_test_2.fillna(0,inplace=True)
y_test_2.fillna(0,inplace=True)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
import numpy as np
log_reg_2 = LogisticRegression()
param_grid = {
'tol': [.0001, .001, .01],
'C': [.1, 1, 10],
'max_iter': [50,100,200]
}
grid_log_reg_2 = GridSearchCV(log_reg_2, param_grid, cv=5)
grid_log_reg_2.fit(X_train_2, y_train_2)
grid_log_reg_2.score(X_train_2,y_train_2)
grid_log_reg_2.score(X_test_2,y_test_2)
training_data[(training_data['round']==1) & (training_data['season']==2018)] | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Training model on all of the data
Generating predictions
Now that we have a model, we need to ingest data for that model to make a prediction on.
Start by reading in the fixture. | fixture_path = '/Users/t_raver9/Desktop/projects/aflengine/tipengine/fixture2020.csv'
fixture = pd.read_csv(fixture_path)
fixture[fixture['round']==2] | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
We'll then prepare the data for the round we're interested in. We'll do this by:
- getting the team-level data, such as ladder position and form
- getting the player-level data and aggregating it up to the team level
To get the player-level data, we also need to choose who is playing for each team. | next_round_matches = get_upcoming_matches(matches,fixture,round_num=2)
next_round_matches | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Get the IDs for the players we'll be using | import cv2
import pytesseract
custom_config = r'--oem 3 --psm 6'
import pathlib
names_dir = '/Users/t_raver9/Desktop/projects/aflengine/analysis/machine_learning/src/OCR/images'
# Initialise the dictionary
player_names_dict = {}
for team in matches['hteam'].unique():
player_names_dict[team] = []
# Fill out the dictionary
for path in pathlib.Path(names_dir).iterdir():
print(path)
if path.name.split('.')[0] in player_names_dict:
path_str = str(path)
image_obj = cv2.imread(path_str)
image_string = pytesseract.image_to_string(image_obj, config=custom_config)
names = get_player_names(image_string)
player_names_dict[path.name.split('.')[0]].extend(names) | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Try including Bachar Houli
Now we can collect the data for each player and aggregate it to the team level, as we would with the training data | from copy import deepcopy
players_in_rnd = []
for _, v in player_names_dict.items():
players_in_rnd.extend(v)
player_data = get_player_data(players_in_rnd)
players_in_rnd
aggregate = player_data[player_cols].groupby('team').apply(lambda x: x.mean(skipna=False))
# Factor in any missing players
num_players_per_team = player_data[player_cols].groupby('team').count()['Supercoach']
for team in num_players_per_team.index:
aggregate.loc[team] = aggregate.loc[team] * (22/num_players_per_team[team])
aggs_h = deepcopy(aggregate)
aggs_a = deepcopy(aggregate)
aggs_h.columns = aggregate.columns.map(lambda x: 'h_' + str(x))
aggs_a.columns = aggregate.columns.map(lambda x: 'a_' + str(x))
combined = next_round_matches.merge(aggs_h, left_on='hteam', right_on='team')
combined = combined.merge(aggs_a, left_on='ateam', right_on='team')
combined = get_diff_cols(combined)
pd.set_option('max_columns',500) | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Can now use this to make predictions | X = combined[features]
X['diff_wins_form']
grid_log_reg.decision_function(X)
grid_log_reg.predict_proba(X)
grid_log_reg.predict(X)
Z = combined[diff_cols]
grid_log_reg_2.predict_proba(Z)
grid_log_reg_2.predict(Z)
combined[['ateam','hteam']]
combined[['h_percentage_form','a_percentage_form']]
combined[['h_career_games_played','a_career_games_played']]
combined[['h_wins_form','a_wins_form']]
model_coef = grid_log_reg.best_estimator_.coef_
X['diff_season_Supercoach'] | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Glue these together and sort | coef = []
for i in model_coef:
for j in i:
coef.append(abs(j))
zipped = list(zip(features,coef))
zipped.sort(key = lambda x: x[1],reverse=True)
zipped | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Training model on all data | features = [col
for col
in ['h_career_' + col for col in player_cols_to_agg] + \
['h_season_' + col for col in player_cols_to_agg] + \
['a_career_' + col for col in player_cols_to_agg] + \
['a_season_' + col for col in player_cols_to_agg] + \
['h_' + col for col in ladder_cols] + \
['h_' + col + '_form' for col in ladder_cols] + \
['a_' + col for col in ladder_cols] + \
['a_' + col + '_form' for col in ladder_cols] + \
['h_career_' + col for col in misc_columns] + \
['h_season_' + col for col in misc_columns] + \
['a_career_' + col for col in misc_columns] + \
['a_season_' + col for col in misc_columns] + \
diff_cols
]
# REMOVE PERCENTAGE FOR NOW
features.remove('h_percentage')
features.remove('a_percentage')
features.remove('diff_percentage')
target = 'winner'
X = model_data[features]
y = model_data[target]
X.fillna(0,inplace=True)
y.fillna(0,inplace=True)
grid_log_reg_2.predict_proba(Z)
combined[['ateam','hteam']] | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Visualisation | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
category_names = ['Home','Away']
results = {
'Collingwood v Richmond': [50.7,49.3],
'Geelong v Hawthorn': [80.4,19.5],
'Brisbane Lions v Fremantle': [57.3,42.7],
'Carlton v Melbourne': [62.4,37.6],
'Gold Coast v West Coast': [9.9,90.1],
'Port Adelaide v Adelaide': [58.0,42.0],
'GWS v North Melbourne': [62.6,37.4],
'Sydney v Essendon': [75.2,24.8],
'St Kilda v Footscray': [61.2,38.8]
}
def survey(results, category_names):
"""
Parameters
----------
results : dict
A mapping from question labels to a list of answers per category.
It is assumed all lists contain the same number of entries and that
it matches the length of *category_names*.
category_names : list of str
The category labels.
"""
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1)
category_colors = plt.get_cmap('RdYlGn')(
np.linspace(0.15, 0.85, data.shape[1]))
fig, ax = plt.subplots(figsize=(18, 10))
fig.suptitle('Win Probabilities', fontsize=20)
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color)
xcenters = starts + widths / 2
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.5 else 'darkgrey'
for y, (x, c) in enumerate(zip(xcenters, widths)):
ax.text(x, y, str(int(c)), ha='center', va='center',
color=text_color,fontsize=15)
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize=15)
return fig, ax
survey(results, category_names)
plt.show()
plt.show() | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Metadata and functions | from typing import Dict
import numpy as np
def get_season_rounds(matches: pd.DataFrame) -> Dict:
"""
Return a dictionary with seasons as keys and number of games
in season as values
"""
seasons = matches['season'].unique()
rounds_in_season = dict.fromkeys(seasons,0)
for season in seasons:
rounds_in_season[season] = max(matches[matches['season']==season]['round'])
return rounds_in_season
# What teams participated in each season?
def get_season_teams(matches: pd.DataFrame) -> Dict:
"""
Return a dictionary with seasons as keys and a list of teams who played
in that season as values
"""
seasons = matches['season'].unique()
teams_in_season = {}
for season in seasons:
teams = list(matches[matches['season']==season]['hteam'].unique())
teams.extend(list(matches[matches['season']==season]['ateam'].unique()))
teams = np.unique(teams)
teams_in_season[season] = list(teams)
return teams_in_season
player_cols_to_agg = [
'AFLfantasy',
'Supercoach',
'behinds',
'bounces',
'brownlow',
'clangers',
'clearances',
'contested_marks',
'contested_poss',
'disposals',
'frees_against',
'frees_for',
'goal_assists',
'goals',
'handballs',
'hitouts',
'inside50',
'kicks',
'marks',
'marks_in_50',
'one_percenters',
'rebound50',
'tackles',
'tog',
'uncontested_poss',
'centre_clearances',
'disposal_efficiency',
'effective_disposals',
'intercepts',
'metres_gained',
'stoppage_clearances',
'score_involvements',
'tackles_in_50',
'turnovers'
]
match_cols = [
'odds',
'line'
]
ladder_columns = [
'wins',
'losses',
'draws',
'prem_points',
'played',
'points_for',
'points_against',
'percentage',
'position'
]
misc_columns = [
'games_played'
]
diff_cols = [
]
def get_upcoming_matches(matches, fixture, round_num=None):
if round_num == None: # Get the latest populated round
round_num = matches['round'].iloc[-1] + 1
fixture['round'] = fixture['round'].astype(str)
next_round = fixture[fixture['round']==str(round_num)]
# Get list of home and away
matches.sort_values(by=['season','round'],ascending=False,inplace=True)
teams = list(next_round['hometeam'])
teams = list(zip(teams,list(next_round['awayteam']))) # (home, away)
# Initialise upcoming round
df = pd.DataFrame()
output = pd.DataFrame(columns = h_ladder_cols + h_ladder_form_cols + a_ladder_cols + a_ladder_form_cols)
# For each team, find the data that is relevant to them
for team in teams:
h_last_match = matches[(matches['hteam'] == team[0]) | (matches['ateam'] == team[0])].iloc[0]
a_last_match = matches[(matches['hteam'] == team[1]) | (matches['ateam'] == team[1])].iloc[0]
# Home team conditions, and use the 'game_cols' to update the ladder and ladder form for that team
if team[0] == h_last_match['hteam']: # Home team was home team last game
h_last_match_rel_cols = h_last_match[h_ladder_cols + h_ladder_form_cols + game_cols]
h_last_match_rel_cols = update_ladder(h_last_match_rel_cols,'home')
elif team[0] == h_last_match['ateam']: # Home team was away team last game
h_last_match_rel_cols = h_last_match[a_ladder_cols + a_ladder_form_cols + game_cols]
h_last_match_rel_cols = update_ladder(h_last_match_rel_cols,'away')
# Away team conditions
if team[1] == a_last_match['hteam']: # Away team was home team last game
a_last_match_rel_cols = a_last_match[h_ladder_cols + h_ladder_form_cols + game_cols]
a_last_match_rel_cols = update_ladder(a_last_match_rel_cols,'home')
elif team[1] == a_last_match['ateam']: # Away team was away team last game
a_last_match_rel_cols = a_last_match[a_ladder_cols + a_ladder_form_cols + game_cols]
a_last_match_rel_cols = update_ladder(a_last_match_rel_cols,'away')
h_last_match_rel_cols['hteam'] = team[0]
a_last_match_rel_cols['ateam'] = team[1]
# Make sure the columns are the right format
h_col_final = []
for col in h_last_match_rel_cols.index:
if col[0] == 'h':
h_col_final.append(col)
else:
col = 'h' + col[1:]
h_col_final.append(col)
a_col_final = []
for col in a_last_match_rel_cols.index:
if col[0] == 'a':
a_col_final.append(col)
else:
col = 'a' + col[1:]
a_col_final.append(col)
h_last_match_rel_cols.index = h_col_final
a_last_match_rel_cols.index = a_col_final
# Add all of these to the output.
joined = pd.concat([h_last_match_rel_cols,a_last_match_rel_cols]).to_frame().T
joined.drop('hscore',axis=1,inplace=True)
joined.drop('ascore',axis=1,inplace=True)
output = output.append(joined)
matches.sort_values(by=['season','round'],ascending=True,inplace=True)
return output
def update_ladder(last_match_rel_cols, last_game_h_a):
if last_game_h_a == 'home':
# Update wins, losses, draws and prem points
if last_match_rel_cols['hscore'] > last_match_rel_cols['ascore']:
last_match_rel_cols['h_wins'] = last_match_rel_cols['h_wins'] + 1
last_match_rel_cols['h_wins_form'] = last_match_rel_cols['h_wins_form'] + 1
last_match_rel_cols['h_prem_points'] = last_match_rel_cols['h_prem_points'] + 4
last_match_rel_cols['h_prem_points_form'] = last_match_rel_cols['h_prem_points_form'] + 4
elif last_match_rel_cols['hscore'] < last_match_rel_cols['ascore']:
last_match_rel_cols['h_losses'] = last_match_rel_cols['h_losses'] + 1
last_match_rel_cols['h_losses_form'] = last_match_rel_cols['h_losses_form'] + 1
else:
last_match_rel_cols['h_draws'] = last_match_rel_cols['h_draws'] + 1
last_match_rel_cols['h_prem_points'] = last_match_rel_cols['h_prem_points'] + 2
last_match_rel_cols['h_prem_points_form'] = last_match_rel_cols['h_prem_points_form'] + 2
# Update points for and against
last_match_rel_cols['h_points_for'] = last_match_rel_cols['h_points_for'] + last_match_rel_cols['hscore']
last_match_rel_cols['h_points_against'] = last_match_rel_cols['h_points_against'] + last_match_rel_cols['ascore']
last_match_rel_cols['h_points_for_form'] = last_match_rel_cols['h_points_for_form'] + last_match_rel_cols['hscore']
last_match_rel_cols['h_points_against_form'] = last_match_rel_cols['h_points_against_form'] + last_match_rel_cols['ascore']
# Update percentage
last_match_rel_cols['h_percentage'] = (last_match_rel_cols['h_points_for'] / last_match_rel_cols['h_points_against']) * 100
last_match_rel_cols['h_percentage_form'] = (last_match_rel_cols['h_points_for_form'] / last_match_rel_cols['h_points_against_form']) * 100
if last_game_h_a == 'away':
# Update wins, losses, draws and prem points
if last_match_rel_cols['hscore'] > last_match_rel_cols['ascore']:
last_match_rel_cols['a_losses'] = last_match_rel_cols['a_losses'] + 1
last_match_rel_cols['a_losses_form'] = last_match_rel_cols['a_losses_form'] + 1
elif last_match_rel_cols['hscore'] < last_match_rel_cols['ascore']:
last_match_rel_cols['a_wins'] = last_match_rel_cols['a_wins'] + 1
last_match_rel_cols['a_wins_form'] = last_match_rel_cols['a_wins_form'] + 1
last_match_rel_cols['a_prem_points'] = last_match_rel_cols['a_prem_points'] + 4
last_match_rel_cols['a_prem_points_form'] = last_match_rel_cols['a_prem_points_form'] + 4
else:
last_match_rel_cols['a_draws'] = last_match_rel_cols['a_draws'] + 1
last_match_rel_cols['a_prem_points'] = last_match_rel_cols['a_prem_points'] + 2
last_match_rel_cols['a_prem_points_form'] = last_match_rel_cols['a_prem_points_form'] + 2
# Update points for and against
last_match_rel_cols['a_points_for'] = last_match_rel_cols['a_points_for'] + last_match_rel_cols['ascore']
last_match_rel_cols['a_points_against'] = last_match_rel_cols['a_points_against'] + last_match_rel_cols['hscore']
last_match_rel_cols['a_points_for_form'] = last_match_rel_cols['a_points_for_form'] + last_match_rel_cols['ascore']
last_match_rel_cols['a_points_against_form'] = last_match_rel_cols['a_points_against_form'] + last_match_rel_cols['hscore']
# Update percentage
last_match_rel_cols['a_percentage'] = (last_match_rel_cols['a_points_for'] / last_match_rel_cols['a_points_against']) * 100
last_match_rel_cols['a_percentage_form'] = (last_match_rel_cols['a_points_for_form'] / last_match_rel_cols['a_points_against_form']) * 100
return last_match_rel_cols
ladder_columns = {
('wins',0),
('losses',0),
('draws',0),
('prem_points',0),
('played',0),
('points_for',0),
('points_against',0),
('percentage',100),
('position',1)
}
ladder_cols = [i for i,j in ladder_columns]
h_ladder_cols = ['h_' + i for i,j in ladder_columns]
a_ladder_cols = ['a_' + i for i,j in ladder_columns]
h_ladder_form_cols = ['h_' + i + '_form' for i,j in ladder_columns]
a_ladder_form_cols = ['a_' + i + '_form' for i,j in ladder_columns]
h_ladder_form_cols_mapping = dict(zip(ladder_cols,h_ladder_form_cols))
a_ladder_form_cols_mapping = dict(zip(ladder_cols,a_ladder_form_cols))
game_cols = [
'hscore',
'ascore'
]
def update_last_game(df):
for idx,row in df.iterrows():
for col in cols_to_update:
single_game_col = col[7:] # This is the non-aggregate column, e.g. 'Supercoach' instead of 'career_Supercoach'
if col[0:7] == 'career_':
df.at[idx,col] = (df.at[idx,single_game_col] + (df.at[idx,col] * (df.at[idx,'career_games_played']))) / df.at[idx,'career_games_played']
elif col[0:7] == 'season_':
df.at[idx,col] = (df.at[idx,single_game_col] + (df.at[idx,col] * (df.at[idx,'season_games_played']))) / df.at[idx,'season_games_played']
else:
raise Exception('Column not found, check what columns you\'re passing')
return df
def get_player_names(image_string):
"""
Returns the names of players who are named in a team
"""
names = []
name = ''
i = 0
while i <= len(image_string):
if image_string[i] == ']':
name = ''
i += 2 # Skip the first space
else:
i += 1
continue
name = ''
while (image_string[i] != ',') & (image_string[i] != '\n'):
name += image_string[i]
i += 1
if i == len(image_string):
break
name = name.replace(' ','_')
names.append(name)
i += 1
return names
def get_player_data(player_ids):
last_games = pd.DataFrame(columns = players.columns)
for player in player_ids:
last_game_row = players[(players['playerid']==player) & (players['next_matchid'].isna())]
last_games = last_games.append(last_game_row)
return last_games
player_cols = ['AFLfantasy',
'Supercoach',
'behinds',
'bounces',
'brownlow',
'clangers',
'clearances',
'contested_marks',
'contested_poss',
'disposals',
'frees_against',
'frees_for',
'goal_assists',
'goals',
'handballs',
'hitouts',
'inside50',
'kicks',
'marks',
'marks_in_50',
'one_percenters',
'rebound50',
'tackles',
'tog',
'uncontested_poss',
'centre_clearances',
'disposal_efficiency',
'effective_disposals',
'intercepts',
'metres_gained',
'stoppage_clearances',
'score_involvements',
'tackles_in_50',
'turnovers',
'matchid',
'next_matchid',
'team',
'career_AFLfantasy',
'career_Supercoach',
'career_behinds',
'career_bounces',
'career_brownlow',
'career_clangers',
'career_clearances',
'career_contested_marks',
'career_contested_poss',
'career_disposals',
'career_frees_against',
'career_frees_for',
'career_goal_assists',
'career_goals',
'career_handballs',
'career_hitouts',
'career_inside50',
'career_kicks',
'career_marks',
'career_marks_in_50',
'career_one_percenters',
'career_rebound50',
'career_tackles',
'career_tog',
'career_uncontested_poss',
'career_centre_clearances',
'career_disposal_efficiency',
'career_effective_disposals',
'career_intercepts',
'career_metres_gained',
'career_stoppage_clearances',
'career_score_involvements',
'career_tackles_in_50',
'career_turnovers',
'season_AFLfantasy',
'season_Supercoach',
'season_behinds',
'season_bounces',
'season_brownlow',
'season_clangers',
'season_clearances',
'season_contested_marks',
'season_contested_poss',
'season_disposals',
'season_frees_against',
'season_frees_for',
'season_goal_assists',
'season_goals',
'season_handballs',
'season_hitouts',
'season_inside50',
'season_kicks',
'season_marks',
'season_marks_in_50',
'season_one_percenters',
'season_rebound50',
'season_tackles',
'season_tog',
'season_uncontested_poss',
'season_centre_clearances',
'season_disposal_efficiency',
'season_effective_disposals',
'season_intercepts',
'season_metres_gained',
'season_stoppage_clearances',
'season_score_involvements',
'season_tackles_in_50',
'season_turnovers',
'career_games_played',
'season_games_played']
def get_diff_cols(matches: pd.DataFrame) -> pd.DataFrame:
"""
Function to take the columns and separate between home and away teams. Each
metric will have a "diff" column which tells the difference between home
and away for this metric. i.e. there's a diff_percentage column which tells
the difference between home and away for the percentage
"""
print('Creating differential columns')
for col in matches.columns:
if col[0:2] == 'h_':
try:
h_col = col
a_col = 'a_' + col[2:]
diff_col = 'diff_' + col[2:]
matches[diff_col] = matches[h_col] - matches[a_col]
except TypeError:
pass
return matches
from typing import Type
import pandas as pd
class TeamLadder:
def __init__(self, team: str):
self.team = team
for column, init_val in ladder_columns:
setattr(self, column, init_val)
def add_prev_round_team_ladder(self, prev_round_team_ladder):
for col,val in prev_round_team_ladder.items():
self.__dict__[col] = val
def update_home_team(self, match):
self.played += 1
if match.hscore > match.ascore:
self.wins += 1
self.prem_points += 4
elif match.hscore == match.ascore:
self.draws += 1
self.prem_points += 2
else:
self.losses += 1
self.points_for += match.hscore
self.points_against += match.ascore
self.percentage = 100 * (self.points_for / self.points_against)
def update_away_team(self, match):
self.played += 1
if match.hscore < match.ascore:
self.wins += 1
self.prem_points += 4
elif match.hscore == match.ascore:
self.draws += 1
self.prem_points += 2
else:
self.losses += 1
self.points_for += match.ascore
self.points_against += match.hscore
self.percentage = 100 * (self.points_for / self.points_against)
def update_ladder(self, match):
"""
Update the ladder for the team based on the outcome of the game. There
will be two possibilites - the team can be the home or the away team
in the provided match.
"""
if self.team == match.teams['home']:
self.update_home_team(match)
else:
self.update_away_team(match)
class Ladder:
"""
Each round object holds the ladder details for that round for each team
"""
def __init__(self, teams_in_season):
self.teams_in_season = teams_in_season
self.team_ladders = {}
def add_team_ladder(self, team_ladder):
self.team_ladders[team_ladder.team.team] = team_ladder
class Team:
"""
Holds team-level data for a particular match
"""
def __init__(self, generic_team_columns, home_or_away: str):
self.home_or_away = home_or_away
for column in generic_team_columns:
setattr(self, column, None)
def add_data(self, data: pd.DataFrame):
if self.home_or_away == 'home':
for home_col, generic_col in home_cols_mapped.items():
self.__dict__[generic_col] = data[home_col]
if self.home_or_away == 'away':
for away_col, generic_col in away_cols_mapped.items():
self.__dict__[generic_col] = data[away_col]
class Match:
"""
Holds data about a match, as well as an object for each team
"""
def __init__(self, match_columns):
self.teams = {
'home': None,
'away': None
}
for column in match_columns:
setattr(self, column, None)
def add_data(self, data: pd.DataFrame):
for column in self.__dict__.keys():
try:
self.__dict__[column] = data[column]
except KeyError:
continue
def add_home_team(self, team):
self.teams['home'] = team
def add_away_team(self, team):
self.teams['away'] = team
class Round:
"""
Contains match and ladder data for each round
"""
def __init__(self, round_num: int):
self.round_num = round_num
self.matches = []
self.bye_teams = []
self.ladder = None
def add_match(self, match):
self.matches.append(match)
def add_ladder(self, ladder):
self.ladder = ladder
class Season:
"""
Contains the rounds for a season, and which teams competed
"""
def __init__(self, year: int, teams):
self.year = year
self.teams = teams
self.rounds = {}
def add_round(self, round_obj: Type[Round]):
self.rounds[round_obj.round_num] = round_obj
class History:
"""
Holds all season objects
"""
def __init__(self):
self.seasons = {}
def add_season(self, season):
self.seasons[season.year] = season
from typing import Dict
def get_season_num_games(matches: pd.DataFrame) -> Dict:
"""
Return a dictionary with seasons as keys and number of games
in season as values
"""
seasons = matches['season'].unique()
rounds_in_season = dict.fromkeys(seasons,0)
for season in seasons:
rounds_in_season[season] = max(matches[matches['season']==season]['h_played']) + 1
return rounds_in_season | analysis/machine_learning/model.ipynb | criffy/aflengine | gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.