hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
d0b866452d2f7245ca8f7120a58c3676933d1fdd
181,708
ipynb
Jupyter Notebook
analysis/fbgan/killoran_wgan_mpradragonn.ipynb
johli/genesis
5424c1888d4330e505ad87412e7f1cc5dd828888
[ "MIT" ]
12
2020-02-02T14:29:15.000Z
2021-09-12T08:05:43.000Z
analysis/fbgan/killoran_wgan_mpradragonn.ipynb
johli/genesis
5424c1888d4330e505ad87412e7f1cc5dd828888
[ "MIT" ]
1
2022-01-04T08:04:00.000Z
2022-01-10T08:49:04.000Z
analysis/fbgan/killoran_wgan_mpradragonn.ipynb
johli/genesis
5424c1888d4330e505ad87412e7f1cc5dd828888
[ "MIT" ]
3
2020-03-10T22:24:05.000Z
2021-05-05T13:23:01.000Z
413.91344
153,496
0.932045
[ [ [ "import torch\nfrom torch import optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\n\nfrom sklearn.preprocessing import OneHotEncoder\nimport os, math, glob, argparse\nfrom utils.torch_utils import *\nfrom utils.utils import *\nfrom mpradragonn_predictor_pytorch import *\nimport matplotlib.pyplot as plt\nimport utils.language_helpers\n#plt.switch_backend('agg')\nimport numpy as np\nfrom models import *\n\nfrom wgan_gp_mpradragonn_analyzer_quantile_cutoff import *\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device('cuda:0' if use_cuda else 'cpu')\n\nfrom torch.distributions import Normal as torch_normal\n\nclass IdentityEncoder :\n \n def __init__(self, seq_len, channel_map) :\n self.seq_len = seq_len\n self.n_channels = len(channel_map)\n self.encode_map = channel_map\n self.decode_map = {\n nt: ix for ix, nt in self.encode_map.items()\n }\n \n def encode(self, seq) :\n encoding = np.zeros((self.seq_len, self.n_channels))\n \n for i in range(len(seq)) :\n if seq[i] in self.encode_map :\n channel_ix = self.encode_map[seq[i]]\n encoding[i, channel_ix] = 1.\n\n return encoding\n \n def encode_inplace(self, seq, encoding) :\n for i in range(len(seq)) :\n if seq[i] in self.encode_map :\n channel_ix = self.encode_map[seq[i]]\n encoding[i, channel_ix] = 1.\n \n def encode_inplace_sparse(self, seq, encoding_mat, row_index) :\n raise NotImplementError()\n \n def decode(self, encoding) :\n seq = ''\n \n for pos in range(0, encoding.shape[0]) :\n argmax_nt = np.argmax(encoding[pos, :])\n max_nt = np.max(encoding[pos, :])\n seq += self.decode_map[argmax_nt]\n\n return seq\n \n def decode_sparse(self, encoding_mat, row_index) :\n raise NotImplementError()\n\n", "/home/ubuntu/anaconda3/envs/pytorch_p36_fbgan/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ], [ "\nclass ActivationMaximizer(nn.Module) :\n \n def __init__(self, generator_dir, batch_size=1, seq_len=145, latent_size=128, sequence_template=None):\n super(ActivationMaximizer, self).__init__()\n self.generator = Generator_lang(4, seq_len, batch_size, 512)\n self.predictor = DragoNNClassifier(batch_size=batch_size).cnn\n \n self.load_generator(generator_dir)\n \n self.use_cuda = torch.cuda.is_available()\n \n self.x_mask = None\n self.x_template = None\n if sequence_template is not None :\n onehot_mask = np.zeros((seq_len, 4))\n onehot_template = np.zeros((seq_len, 4))\n\n for j in range(len(sequence_template)) :\n if sequence_template[j] == 'N' :\n onehot_mask[j, :] = 1.\n elif sequence_template[j] == 'A' :\n onehot_template[j, 0] = 1.\n elif sequence_template[j] == 'C' :\n onehot_template[j, 1] = 1.\n elif sequence_template[j] == 'G' :\n onehot_template[j, 2] = 1.\n elif sequence_template[j] == 'T' :\n onehot_template[j, 3] = 1.\n \n self.x_mask = Variable(torch.FloatTensor(onehot_mask).unsqueeze(0))\n self.x_template = Variable(torch.FloatTensor(onehot_template).unsqueeze(0))\n if self.use_cuda :\n self.x_mask = self.x_mask.to(device)\n self.x_template = self.x_template.to(device)\n \n self.predictor.eval()\n \n if self.use_cuda :\n self.generator.cuda()\n self.predictor.cuda()\n self.cuda()\n\n def load_generator(self, directory, iteration=None) :\n list_generator = glob.glob(directory + \"G*.pth\")\n generator_file = max(list_generator, key=os.path.getctime)\n self.generator.load_state_dict(torch.load(generator_file))\n \n def forward(self, z) :\n x = self.generator.forward(z)\n \n if self.x_mask is not None :\n x = x * self.x_mask + self.x_template\n \n return self.predictor.forward(x.unsqueeze(2).transpose(1, 3))\n \n def get_pattern(self, z) :\n x = self.generator.forward(z)\n \n if self.x_mask is not None :\n x = x * self.x_mask + self.x_template\n \n return x\n ", "_____no_output_____" ], [ "#Sequence length\nseq_len = 145\nbatch_size = 64\n\n#Sequence decoder\nacgt_encoder = IdentityEncoder(seq_len, {'A':0, 'C':1, 'G':2, 'T':3})\n\n#Sequence template\nsequence_template = 'N' * 145\n\n#Activation maximization model (pytorch)\nact_maximizer = ActivationMaximizer(batch_size=batch_size, seq_len=seq_len, generator_dir='./checkpoint/' + 'mpradragonn_sample' + '/', sequence_template=sequence_template)\n", "[*] Checkpoint 10 found!\n" ], [ "\n#Function for optimizing n sequences for a target predictor\ndef optimize_sequences(act_maximizer, n_seqs, batch_size=1, latent_size=128, n_iters=100, eps1=0., eps2=0.1, noise_std=1e-6, use_adam=True, run_name='default', store_intermediate_n_seqs=None, store_every_iter=100) :\n\n z = Variable(torch.randn(batch_size, latent_size, device=\"cuda\"), requires_grad=True)\n\n norm_var = torch_normal(0, 1)\n\n optimizer = None\n if use_adam :\n optimizer = optim.Adam([z], lr=eps2)\n else :\n optimizer = optim.SGD([z], lr=1)\n\n z.register_hook(lambda grad, batch_size=batch_size, latent_size=latent_size, noise_std=noise_std: grad + noise_std * torch.randn(batch_size, latent_size, device=\"cuda\"))\n\n seqs = []\n fitness_histo = []\n \n n_batches = n_seqs // batch_size\n \n for batch_i in range(n_batches) :\n \n if batch_i % 4 == 0 :\n print(\"Optimizing sequence batch \" + str(batch_i))\n \n #Re-initialize latent GAN seed\n z.data = torch.randn(batch_size, latent_size, device=\"cuda\")\n \n fitness_scores_batch = [act_maximizer(z)[:, 0].data.cpu().numpy().reshape(-1, 1)]\n\n for curr_iter in range(n_iters) :\n\n fitness_score = act_maximizer(z)[:, 0]\n \n fitness_loss = -torch.sum(fitness_score)\n z_prior = -torch.sum(norm_var.log_prob(z))\n\n loss = None\n if use_adam :\n loss = fitness_loss\n else :\n loss = eps1 * z_prior + eps2 * fitness_loss\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n fitness_scores_batch.append(fitness_score.data.cpu().numpy().reshape(-1, 1))\n \n if store_intermediate_n_seqs is not None and batch_i * batch_size < store_intermediate_n_seqs and curr_iter % store_every_iter == 0 :\n onehot_batch = act_maximizer.get_pattern(z).data.cpu().numpy()\n seq_batch = [\n acgt_encoder.decode(onehot_batch[k]) for k in range(onehot_batch.shape[0])\n ]\n with open(run_name + \"_curr_iter_\" + str(curr_iter) + \".txt\", \"a+\") as f :\n for i in range(len(seq_batch)) :\n seq = seq_batch[i]\n\n f.write(seq + \"\\n\")\n \n onehot_batch = act_maximizer.get_pattern(z).data.cpu().numpy()\n seq_batch = [\n acgt_encoder.decode(onehot_batch[k]) for k in range(onehot_batch.shape[0])\n ]\n\n seqs.extend(seq_batch)\n fitness_histo.append(np.concatenate(fitness_scores_batch, axis=1))\n \n fitness_histo = np.concatenate(fitness_histo, axis=0)\n \n return seqs, fitness_histo\n", "_____no_output_____" ], [ "\nn_seqs = 4096#960\nn_iters = 1000\n\nrun_name = 'killoran_mpradragonn_' + str(n_seqs) + \"_sequences\" + \"_\" + str(n_iters) + \"_iters_sample_wgan\"\n\nseqs, fitness_scores = optimize_sequences(\n act_maximizer,\n n_seqs,\n batch_size=64,\n latent_size=128,\n n_iters=n_iters,\n eps1=0.,\n eps2=0.1,\n noise_std=1e-6,\n use_adam=True,\n run_name=\"samples/killoran_mpradragonn/\" + run_name,\n store_intermediate_n_seqs=None,#960,\n store_every_iter=100\n)\n", "Optimizing sequence batch 0\nOptimizing sequence batch 4\nOptimizing sequence batch 8\nOptimizing sequence batch 12\nOptimizing sequence batch 16\nOptimizing sequence batch 20\nOptimizing sequence batch 24\nOptimizing sequence batch 28\nOptimizing sequence batch 32\nOptimizing sequence batch 36\nOptimizing sequence batch 40\nOptimizing sequence batch 44\nOptimizing sequence batch 48\nOptimizing sequence batch 52\nOptimizing sequence batch 56\nOptimizing sequence batch 60\n" ], [ "#Plot fitness statistics of optimization runs\n\n#Plot k trajectories\nplot_n_traj = 100\n\nf = plt.figure(figsize=(8, 6))\n\nfor i in range(min(plot_n_traj, n_seqs)) :\n plt.plot(fitness_scores[i, :], linewidth=2, alpha=0.75)\n\nplt.xlabel(\"Training iteration\", fontsize=14)\nplt.ylabel(\"Fitness score\", fontsize=14)\n\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\n\nplt.xlim(0, n_iters)\nplt.ylim(-3, 3)\n\nplt.tight_layout()\nplt.show()\n\n#Plot mean trajectory\n\nf = plt.figure(figsize=(8, 6))\n\nplt.plot(np.mean(fitness_scores, axis=0), linewidth=2, alpha=0.75)\n\nplt.xlabel(\"Training iteration\", fontsize=14)\nplt.ylabel(\"Fitness score\", fontsize=14)\n\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\n\nplt.xlim(0, n_iters)\nplt.ylim(-3, 3)\n\nplt.tight_layout()\nplt.show()\n", "_____no_output_____" ], [ "#Save sequences to file\n\nwith open(run_name + \".txt\", \"wt\") as f :\n for i in range(len(seqs)) :\n seq = seqs[i]\n\n f.write(seq + \"\\n\")\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0b866bf452e042623f3b6802297b8949ab5f4ea
453,303
ipynb
Jupyter Notebook
9-19-2019 - Lecture Notebook.ipynb
sju-chem264-2019/9-19-2019-lecture-deannapatti
056bea5e8a9e409082e82c185daa88565de2c01e
[ "MIT" ]
null
null
null
9-19-2019 - Lecture Notebook.ipynb
sju-chem264-2019/9-19-2019-lecture-deannapatti
056bea5e8a9e409082e82c185daa88565de2c01e
[ "MIT" ]
null
null
null
9-19-2019 - Lecture Notebook.ipynb
sju-chem264-2019/9-19-2019-lecture-deannapatti
056bea5e8a9e409082e82c185daa88565de2c01e
[ "MIT" ]
null
null
null
492.720652
59,152
0.945707
[ [ [ "# Plotting and Functions", "_____no_output_____" ], [ "This notebook will work trough how to plot data and how to define functions. Throughout the lecture we will take a few moments to plot different functions and see how they depend on their parameters", "_____no_output_____" ], [ "## Plotting in Python: Matplot ", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy as sp", "_____no_output_____" ] ], [ [ "Pyplot is a powerful plotting library that can be used to make publication quaility plots. It is also useful for quikly plotting the results of a calcualtion. \n\nThis is a quick demonstration of its use\n\nNote: when you call al library `import matplotlib.pyplot as plt` the way that use it is to do the following `plt.function()` where `function()` is whatever you are trying to call from the library", "_____no_output_____" ] ], [ [ "# Define x and y values for some function\nx = [i for i in range(20)]\ny1 = [i**2 for i in x]\ny2 = [i**3 for i in x]", "_____no_output_____" ] ], [ [ "The methods used above to make the lists is considered very *pythonic*. It works the same as a loop, but outputs all the results into a list. The left-hand most argument is what the list elements will be and the right hand side is the the way the loop will work.", "_____no_output_____" ], [ "When you use pyplot to make a plot, you can add more than one data set to the figure until you render the plot. Once you render the plot it resets", "_____no_output_____" ] ], [ [ "plt.plot(x,y1)\nplt.plot(x,y2)\nplt.xlabel('X', fontsize=24)\nplt.ylabel('Y', fontsize=24)\nplt.legend(['Quadratic', 'Cubic'], loc=0)\nplt.show()", "_____no_output_____" ] ], [ [ "We can call also use numpy fucntions to make our plots. Numpy is a very powerful math library", "_____no_output_____" ] ], [ [ "# linspace will make a list of values from initial to final with however many increments you want\n# this example goes from 0-2.5 with 20 increments\nx=numpy.linspace(0,1.0,20)\nprint(x)", "_____no_output_____" ], [ "exp_func=np.exp(-2*np.pi*x)\nprint(exp_func)", "_____no_output_____" ], [ "plt.plot(x,exp_func, color=\"black\")\nplt.xlabel('x', fontsize=24)\nplt.ylabel(\"y(x)\", fontsize=24)\nplt.show()", "_____no_output_____" ] ], [ [ "All aspects of the plot can be changed. The best way to figure out what you want to do is to go to the Matplotlib gallery and choose an image that looks like what you are trying to do.\n\nhttps://matplotlib.org/gallery/index.html", "_____no_output_____" ], [ "### Example: Scatter plot with histograms", "_____no_output_____" ] ], [ [ "import numpy as np\n\n#Fixing random state for reproducibility\nnp.random.seed(19680801)\n\n# the random data\nx = np.random.randn(1000)\ny = np.random.randn(1000)\n\n# definitions for the axes\nleft, width = 0.1, 0.65\nbottom, height = 0.1, 0.65\nspacing = 0.005\n\n\nrect_scatter = [left, bottom, width, height]\nrect_histx = [left, bottom + height + spacing, width, 0.2]\nrect_histy = [left + width + spacing, bottom, 0.2, height]\n\n# start with a rectangular Figure\nplt.figure(figsize=(8, 8))\n\nax_scatter = plt.axes(rect_scatter)\nax_scatter.tick_params(direction='in', top=True, right=True)\nax_histx = plt.axes(rect_histx)\nax_histx.tick_params(direction='in', labelbottom=False)\nax_histy = plt.axes(rect_histy)\nax_histy.tick_params(direction='in', labelleft=False)\n\n# the scatter plot:\nax_scatter.scatter(x, y)\n\n# now determine nice limits by hand:\nbinwidth = 0.25\nlim = np.ceil(np.abs([x, y]).max() / binwidth) * binwidth\nax_scatter.set_xlim((-lim, lim))\nax_scatter.set_ylim((-lim, lim))\n\nbins = np.arange(-lim, lim + binwidth, binwidth)\nax_histx.hist(x, bins=bins)\nax_histy.hist(y, bins=bins, orientation='horizontal')\n\nax_histx.set_xlim(ax_scatter.get_xlim())\nax_histy.set_ylim(ax_scatter.get_ylim())\n\nplt.show()", "_____no_output_____" ] ], [ [ "I don't have to be an expert in making that kind of plot. I just have to understand and guess enough to figure out. I also google things I don't know\n\nhttps://www.google.com/search?client=firefox-b-1-d&q=pyplot+histogram+change+color\n\nhttps://stackoverflow.com/questions/42172440/python-matplotlib-histogram-color?rq=1\n\nhttps://matplotlib.org/examples/color/named_colors.html\n\nThen I can make small changes to have the plot look how I want it to look\n\nNotice below I changed \n\n`ax_scatter.scatter(x, y, color=\"purple\")`, \n\n`ax_histx.hist(x, bins=bins, color = \"skyblue\")`, \n\n`ax_histy.hist(y, bins=bins, orientation='horizontal', color=\"salmon\")`", "_____no_output_____" ] ], [ [ "#Fixing random state for reproducibility\nnp.random.seed(19680801)\n\n# the random data\nx = np.random.randn(1000)\ny = np.random.randn(1000)\n\n# definitions for the axes\nleft, width = 0.1, 0.65\nbottom, height = 0.1, 0.65\nspacing = 0.005\n\n\nrect_scatter = [left, bottom, width, height]\nrect_histx = [left, bottom + height + spacing, width, 0.2]\nrect_histy = [left + width + spacing, bottom, 0.2, height]\n\n# start with a rectangular Figure\nplt.figure(figsize=(8, 8))\n\nax_scatter = plt.axes(rect_scatter)\nax_scatter.tick_params(direction='in', top=True, right=True)\nax_histx = plt.axes(rect_histx)\nax_histx.tick_params(direction='in', labelbottom=False)\nax_histy = plt.axes(rect_histy)\nax_histy.tick_params(direction='in', labelleft=False)\n\n# the scatter plot:\nax_scatter.scatter(x, y, color=\"purple\")\n\n# now determine nice limits by hand:\nbinwidth = 0.25\nlim = np.ceil(np.abs([x, y]).max() / binwidth) * binwidth\nax_scatter.set_xlim((-lim, lim))\nax_scatter.set_ylim((-lim, lim))\n\nbins = np.arange(-lim, lim + binwidth, binwidth)\nax_histx.hist(x, bins=bins, color = \"skyblue\")\nax_histy.hist(y, bins=bins, orientation='horizontal', color=\"salmon\")\n\nax_histx.set_xlim(ax_scatter.get_xlim())\nax_histy.set_ylim(ax_scatter.get_ylim())\n\n\n\nplt.show()", "_____no_output_____" ] ], [ [ "Notice how I changed the colors on the plot based off of what I found on the stack exchange. The way to solve issues in the course and computational work is to google them.", "_____no_output_____" ], [ "## Plotting Exersice 1", "_____no_output_____" ], [ "Find a plot from the gallery that you like. Then make some sort of noticable change to it.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\n\n\npoints = np.ones(5) # Draw 5 points for each line\nmarker_style = dict(color='tab:blue', linestyle=':', marker='o',\n markersize=15, markerfacecoloralt='tab:red')\n\nfig, ax = plt.subplots()\n\n# Plot all fill styles.\nfor y, fill_style in enumerate(Line2D.fillStyles):\n ax.text(-0.5, y, repr(fill_style),\n horizontalalignment='center', verticalalignment='center')\n ax.plot(y * points, fillstyle=fill_style, **marker_style)\n\nax.set_axis_off()\nax.set_title('fill style')\n\nplt.show()", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\n\n\npoints = np.ones(5) # Draw 5 points for each line\nmarker_style = dict(color='tab:green', linestyle=':', marker='o',\n markersize=15, markerfacecoloralt='tab:purple')\n\nfig, ax = plt.subplots()\n\n# Plot all fill styles.\nfor y, fill_style in enumerate(Line2D.fillStyles):\n ax.text(-0.5, y, repr(fill_style),\n horizontalalignment='center', verticalalignment='center')\n ax.plot(y * points, fillstyle=fill_style, **marker_style)\n\nax.set_axis_off()\nax.set_title('fill style')\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Plotting Exersice 2", "_____no_output_____" ], [ "Plot a the following functions on the same plot from $ -2\\pi $ to $2\\pi$\n\n$$ \\sin(2\\pi x+\\pi)$$\n$$ \\cos(2\\pi x+\\pi)$$\n$$\\sin(2\\pi x+\\pi)+\\cos(2\\pi x+\\pi)$$", "_____no_output_____" ], [ "This might be useful:\nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.sin.html\nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.cos.html#numpy.cos", "_____no_output_____" ] ], [ [ "np.sin(np.pi/2.)\n", "_____no_output_____" ], [ "np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )\n", "_____no_output_____" ], [ "import matplotlib.pylab as plt\nx = np.linspace(-np.pi, np.pi, 201)\nplt.plot(x, np.sin(x))\nplt.xlabel('Angle [rad]')\nplt.ylabel('sin(x)')\nplt.axis('tight')\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pylab as plt\nx = np.linspace(-2*np.pi, 2*np.pi, 201)\nplt.plot(x, np.sin(x))\nplt.xlabel('Angle [rad]')\nplt.ylabel('sin(x)')\nplt.axis('tight')\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pylab as plt\nx = np.linspace(-2*np.pi, 2*np.pi, 201)\nplt.plot(x, np.sin(2*np.pi*x+np.pi))\nplt.plot(x, np.cos(2*np.pi*x+np.pi))\nplt.xlabel('Angle [rad]')\nplt.ylabel('sin(x)')\nplt.axis('tight')\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pylab as plt\nx = np.linspace(-2*np.pi, 2*np.pi, 201)\nplt.plot(x, np.cos(2*np.pi*x+np.pi))\nplt.xlabel('Angle [rad]')\nplt.ylabel('cos(x)')\nplt.axis('tight')\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pylab as plt\nx = np.linspace(-2*np.pi, 2*np.pi, 201)\nplt.plot(x, np.sin(2*np.pi*x+np.pi), color=\"blue\")\nplt.plot(x, np.cos(2*np.pi*x+np.pi), color=\"red\")\nplt.plot(x, np.sin(2*np.pi*x+np.pi)+np.cos(2*np.pi*x+np.pi), color=\"green\")\n\nplt.xlabel('x')\nplt.ylabel('y(x)')\nplt.axis('tight')\n\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pylab as plt\nx = np.linspace(-2*np.pi, 2*np.pi, 201)\nplt.plot(x, np.sin(2*np.pi*x+np.pi), color=\"black\")\nplt.plot(x, np.cos(2*np.pi*x+np.pi), color=\"red\")\nplt.plot(x, np.sin(2*np.pi*x+np.pi)+np.cos(2*np.pi*x+np.pi), color=\"gray\")\n\nplt.xlabel('x')\nplt.ylabel('y(x)')\nplt.axis('tight')\n\nplt.show()", "_____no_output_____" ] ], [ [ "# Lecture plots", "_____no_output_____" ], [ "Periodically during lecture we will take a pause to plot some of the interesting functions that we use in class.", "_____no_output_____" ], [ "## Classical wavefunctions\n\nThe following plot shows the the spacial component of the standard wavefunction with a wavelength of $\\lambda=\\text{1.45 m}$ and a relative amplitude of $A=1$ when the time, $t=0$ and the phase $\\phi=1.0$.", "_____no_output_____" ] ], [ [ "x=np.linspace(0,3.0,100)\nsinx=np.sin(2*np.pi*x+0+1)\nplt.plot(x,sinx, color=\"black\")\nplt.xlabel('x', fontsize=24)\nplt.ylabel(\"y(x)\", fontsize=24)\nplt.show()", "_____no_output_____" ] ], [ [ "Make a new figure where you plot the same wave function at three time points in the future. Assume the frequency is $\\nu=.1 \\text{ ms / s} $ Use a different color for each plot", "_____no_output_____" ], [ "## Orthogonality", "_____no_output_____" ], [ "Graphically show that the the following two functions are orthogonal on the interval $-3\\pi$ to $3\\pi$\n$$ \\sin(x) \\text{ and } \\cos(3x)$$\n\nPlot both functions together, then plot the product of both functions and explain why it is orthogonal", "_____no_output_____" ] ], [ [ "import matplotlib.pylab as plt\nx = np.linspace(-3*np.pi, 3*np.pi, 201)\nplt.plot(x, np.sin(x))\nplt.xlabel('Angle [rad]')\nplt.ylabel('sin(x)')\nplt.axis('tight')\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pylab as plt\nx = np.linspace(-3*np.pi, 3*np.pi, 201)\nplt.plot(x, np.sin(x))\nplt.plot(x, np.cos(3*x))\nplt.xlabel('Angle [rad]')\nplt.ylabel('sin(x)')\nplt.axis('tight')\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pylab as plt\nx = np.linspace(-3*np.pi, 3*np.pi, 201)\nprod=np.sin(x)*np.cos(3*x)\nplt.plot(x, np.sin(x))\nplt.plot(x, np.cos(3*x))\nplt.xlabel('Angle [rad]')\nplt.ylabel('sin(x)')\nplt.axis('tight')\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pylab as plt\nx = np.linspace(-3*np.pi, 3*np.pi, 201)\nprod=np.sin(x)*np.cos(3*x)\nplt.plot(x, prod, color=\"blue\")\nplt.xlabel('Angle [rad]')\nplt.ylabel('sin(x)')\nplt.axis('tight')\nplt.show()", "_____no_output_____" ], [ "prod=np.sin(x)*np.cos(3*x)\n\n\n", "_____no_output_____" ], [ "prod=np.sin(x)*np.cos(3*x)\nx = np.linspace(-3*np.pi, 3*np.pi, 201)\nexp_func=prod\nnp.trapz(exp_func,x)", "_____no_output_____" ] ], [ [ "Use the numpy trapezoid rule integrator to show the the two functions are orthogonal\n`np.trapz(y,x)`\n\nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.trapz.html", "_____no_output_____" ] ], [ [ "# Example code\nx=numpy.linspace(0,1.0,20)\nexp_func=np.exp(-2*np.pi*x)\nnp.trapz(exp_func,x)", "_____no_output_____" ], [ "# Your code here", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0b86962dc3eba638163b53542b95c823381f86b
148,834
ipynb
Jupyter Notebook
notebooks/20200818_Akiyama_AFM.ipynb
dineshpinto/qudiamond-analysis
6d3669f609b94ef0dcbd6201a85a5152dceabb17
[ "MIT" ]
2
2021-05-18T18:46:57.000Z
2022-03-27T14:14:37.000Z
notebooks/20200818_Akiyama_AFM.ipynb
dineshpinto/qudiamond-analysis
6d3669f609b94ef0dcbd6201a85a5152dceabb17
[ "MIT" ]
null
null
null
notebooks/20200818_Akiyama_AFM.ipynb
dineshpinto/qudiamond-analysis
6d3669f609b94ef0dcbd6201a85a5152dceabb17
[ "MIT" ]
null
null
null
224.14759
46,072
0.880861
[ [ [ "import os\nimport sys\nsys.path.append('../')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pprint import pprint\nfrom scipy.optimize import curve_fit\n\nimport src.io as sio\nimport src.preprocessing as spp\nimport src.fitting as sft", "_____no_output_____" ], [ "AFM_FOLDER = sio.get_folderpath(\"20200818_Akiyama_AFM\")\nAFM_FOLDER1 = sio.get_folderpath(\"20200721_Akiyama_AFM\")\nAFM_FOLDER2 = sio.get_folderpath(\"20200824_Akiyama_AFM\")\nAFM_FOLDER3 = sio.get_folderpath(\"20200826_TFSC_Preamp_AFM/11613_Tip_5/Akiyama_Tip_Stage\")\nAFM_FOLDER4 = sio.get_folderpath(\"20200826_TFSC_Preamp_AFM/11613_Tip_5/Custom_Tip_Stage\")\nAFM_FOLDER5 = sio.get_folderpath(\"20200828_Tip_Approach1\")\nAFM_FOLDER6 = sio.get_folderpath(\"20200901_Tip_Approach_2/Actual_tip_approach\")", "_____no_output_____" ] ], [ [ "# Approach", "_____no_output_____" ] ], [ [ "params, data = sio.read_dat(AFM_FOLDER6 + \"HistoryData001.dat\")\namplitude = data[\"Amplitude (m)\"].values\nfig, ax = plt.subplots()\nax.plot(amplitude*1e9)\nax.set_ylabel(\"Amplitude (nm)\")\nax.set_xlabel(\"Time (a.u.)\")\n#plt.savefig(\"snap.jpg\", dpi=600)", "_____no_output_____" ] ], [ [ "## 20200721_Akiyama_AFM", "_____no_output_____" ] ], [ [ "params, data = sio.read_dat(AFM_FOLDER1 + \"frq-sweep002.dat\")\nfreq_shift = data[\"Frequency Shift (Hz)\"].values\namplitude = data[\"Amplitude (m)\"].values\nphase = data[\"Phase (deg)\"].values\namp_freq_sweep = sft.fit_fano(freq_shift, amplitude, linear_offset=True)\nphase_freq_sweep = sft.fit_fano(freq_shift, phase)", "_____no_output_____" ], [ "%matplotlib inline\n\nfig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)\nax1.plot(freq_shift, amplitude*1e12)\n#ax1.plot(freq_shift, amp_freq_sweep.best_fit)\nax1.set_ylabel(\"Amplitude (pm)\")\n\nax2.plot(freq_shift, phase)\n#ax2.plot(freq_shift, phase_freq_sweep.best_fit)\nax2.set_ylabel(data.columns[3])\nax2.set_xlabel(data.columns[0])\n\n#plt.savefig(\"second.jpg\", dpi=600)", "_____no_output_____" ] ], [ [ "Quality factor can be calculated as $ Q = \\frac{f_R}{\\Delta f} $", "_____no_output_____" ] ], [ [ "print(f'Q-factor= {params[\"f_res (Hz)\"] / amp_freq_sweep.params[\"fwhm\"].value}')", "_____no_output_____" ] ], [ [ "## 20200818_Akiyama_AFM", "_____no_output_____" ] ], [ [ "params, data = sio.read_dat(AFM_FOLDER + \"frq-sweep001.dat\")\n#pprint(params, sort_dicts=False)\nfreq_shift = data[\"Frequency Shift (Hz)\"]\namplitude = data[\"Amplitude (m)\"]\nphase = data[\"Phase (deg)\"]\nfano = sft.fit_fano(freq_shift, amplitude)\nlorentzian = sft.fit_fano(freq_shift, phase)\nparams", "_____no_output_____" ] ], [ [ "## Equations for calculating Q factor\n\n$$ Q = \\frac{f_R}{\\Delta f} $$\n\n$$ Q = \\frac{A(\\omega_0)}{A_{in}} $$", "_____no_output_____" ] ], [ [ "f_res = 44379.7064\nsigma = 62.2841355\nprint(f_res/sigma)\n\nA_drive = 50e-3\nA_res = 28.3e-6 * 1 / 500e-6\nprint(A_res/A_drive)\n\n# Calibration\nA_drive = 50e-3\nosc_amp = 50e-9\n\nprint(osc_amp/A_drive)", "712.5362830154398\n1.132\n1e-06\n" ] ], [ [ "## Plot frequency sweep curves", "_____no_output_____" ] ], [ [ "fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)\nax1.plot(freq_shift, amplitude)\nax1.plot(freq_shift, fano.best_fit)\nax1.set_ylabel(data.columns[2])\n\nax2.plot(freq_shift, phase)\nax2.plot(freq_shift, lorentzian.best_fit)\nax2.set_ylabel(data.columns[3])\nax2.set_xlabel(data.columns[1])", "_____no_output_____" ] ], [ [ "## Extract fit values", "_____no_output_____" ] ], [ [ "print(\"{} = {:.1f} +- {:.1f}\".format(fano.params[\"sigma\"].name, fano.params[\"sigma\"].value, fano.params[\"sigma\"].stderr))\nprint(\"{} = {:.2e} +- {:.0e}\".format(fano.params[\"amplitude\"].name, fano.params[\"amplitude\"].value, fano.params[\"amplitude\"].stderr))", "_____no_output_____" ] ], [ [ "# 20200824_Akiyama_AFM\n\n## Automatically read files from disk\n\nReads all files stored in **AFM_FOLDER2 = \"20200824_Akiyama_AFM/\"** and plots the amplitude and phase data.\n\nOptionally, the data can be fit to Fano resonances by setting the variable\n```python\nfit = True\n```\nThe Q-factor is calculated as:\n\n$$ Q = \\frac{f_R}{\\Delta f} = \\frac{f_R}{2 \\sigma} $$\n\nErrors are calculated as (this also gives an estimate of the SNR):\n$$ \\frac{\\Delta Q}{Q} = \\sqrt{ \\left( \\frac{\\Delta (\\Delta f)}{\\Delta f} \\right)^2 + \\left( \\frac{\\Delta (\\sigma)}{\\sigma} \\right)^2 } $$\n\nAnother estimate of the SNR, is the Chi square or weighted sum of squared deviations (lower is better):\n$$ \\chi^2 = \\sum_{i} {\\frac{(O_i - C_i)^2}{\\sigma_i^2}} $$", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nfit = False # Setting to True will take slightly longer due to the fitting protocols\n\nfiles = []\nfor file in os.listdir(AFM_FOLDER2):\n if file.endswith(\".dat\"):\n files.append(file) \n\nfig, ax = plt.subplots(nrows=len(files), ncols=2)\n\nfor idx, file in enumerate(files):\n params, data = sio.read_dat(AFM_FOLDER2 + file)\n freq_shift = data[\"Frequency Shift (Hz)\"]\n amplitude = data[\"Amplitude (m)\"]\n phase = data[\"Phase (deg)\"]\n\n ax[idx, 0].plot(freq_shift, amplitude)\n ax[idx, 0].set_ylabel(data.columns[2])\n ax[idx, 0].set_title(file)\n\n ax[idx, 1].plot(freq_shift, phase)\n ax[idx, 1].set_ylabel(data.columns[3])\n ax[idx, 1].set_title(file)\n \n if fit:\n fano1 = sft.fit_fano(freq_shift, amplitude)\n q_factor = (params[\"Center Frequency (Hz)\"] + fano1.params[\"center\"].value) / (2 * fano1.params[\"sigma\"].value)\n q_factor_err = q_factor * np.sqrt((fano1.params[\"center\"].stderr/fano1.params[\"center\"].value)**2 + (fano1.params[\"sigma\"].stderr/fano1.params[\"sigma\"].value)**2)\n ax[idx, 0].plot(freq_shift, fano1.best_fit, label=\"Q={:.0f}$\\pm{:.0f}$\".format(q_factor, q_factor_err))\n ax[idx, 0].legend()\n fano2 = sft.fit_fano(freq_shift, phase, linear_offset=True)\n ax[idx, 1].plot(freq_shift, fano2.best_fit)\n print(\"chi-square ({}) = {:.2e}\".format(file, fano1.chisqr))\n\nfig.tight_layout()\nfig.text(0.5, 0.02, data.columns[1], ha='center', va='center')", "_____no_output_____" ] ], [ [ "## 20200826_TFSC_Preamp_AFM \n### 11613_Tip_5", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nfit = False # Setting to True will take slightly longer due to the fitting protocols\n\nfiles = []\nfor file in os.listdir(AFM_FOLDER4):\n if file.endswith(\".dat\"):\n files.append(file) \n\nfig, ax = plt.subplots(nrows=len(files), ncols=2)\n\nfor idx, file in enumerate(files):\n params, data = sio.read_dat(AFM_FOLDER4 + file)\n freq_shift = data[\"Frequency Shift (Hz)\"]\n amplitude = data[\"Amplitude (m)\"]\n phase = data[\"Phase (deg)\"]\n\n ax[idx, 0].plot(freq_shift, amplitude)\n ax[idx, 0].set_ylabel(data.columns[2])\n ax[idx, 0].set_title(file)\n\n ax[idx, 1].plot(freq_shift, phase)\n ax[idx, 1].set_ylabel(data.columns[3])\n ax[idx, 1].set_title(file)\n \n if fit:\n fano1 = sft.fit_fano(freq_shift, amplitude)\n q_factor = (params[\"Center Frequency (Hz)\"] + fano1.params[\"center\"].value) / (fano1.params[\"sigma\"].value)\n q_factor_err = q_factor * np.sqrt((fano1.params[\"center\"].stderr/fano1.params[\"center\"].value)**2 + (fano1.params[\"sigma\"].stderr/fano1.params[\"sigma\"].value)**2)\n ax[idx, 0].plot(freq_shift, fano1.best_fit, label=\"Q={:.0f}$\\pm{:.0f}$\".format(q_factor, q_factor_err))\n ax[idx, 0].legend()\n fano2 = sft.fit_fano(freq_shift, phase, linear_offset=True)\n ax[idx, 1].plot(freq_shift, fano2.best_fit)\n print(\"chi-square ({}) = {:.2e}\".format(file, fano1.chisqr))\n\nfig.tight_layout()\nfig.text(0.5, 0.02, data.columns[1], ha='center', va='center')", "_____no_output_____" ], [ "omega_0 = 1\nomega = np.linspace(0, 2, 1000)\nQ = 1\n\nratio = omega / omega_0\n\nphi = np.arctan(-ratio / (Q * (1 - ratio**2)))\n\nfid, ax = plt.subplots()\nax.plot(ratio, phi)", "_____no_output_____" ] ], [ [ "# Calibration from Thermal Noise density\n\nFrom Atomic Force Microscopy, Second Edition by Bert Voigtländer\n\nSection 11.6.5 Experimental Determination of the Sensitivity and Spring Constant in AFM Without Tip-Sample Contact\n\nEq. 11.28 and 11.26", "_____no_output_____" ] ], [ [ "%matplotlib widget\nfile = \"SignalAnalyzer_Spectrum001\"\nparams, data = sio.read_dat(AFM_FOLDER4 + file)\n\ncalibration_params = sft.find_afm_calibration_parameters(data, frequency_range=[40000, 48000], Q=1000, f_0_guess=44000)\nfig, ax = plt.subplots()\nax.plot(calibration_params[\"Frequency (Hz)\"], calibration_params[\"PSD squared (V**2/Hz)\"])\nax.plot(calibration_params[\"Frequency (Hz)\"], calibration_params[\"PSD squared fit (V**2/Hz)\"])\nprint(\"Calibration (m/V) =\", calibration_params[\"Calibration (m/V)\"])", "_____no_output_____" ], [ "%matplotlib inline\n\nfit = False # Setting to True will take slightly longer due to the fitting protocols\n\nfiles = []\nfor file in os.listdir(\"../../Data/\" + AFM_FOLDER4):\n if file.endswith(\".dat\"):\n files.append(file) \n\n\nfiles = [\"frq-sweep002.dat\"]\n \nfig, ax = plt.subplots(nrows=len(files), ncols=2)\n\nfor idx, file in enumerate(files):\n params, data = sio.read_dat(AFM_FOLDER4 + file)\n freq_shift = data[\"Frequency Shift (Hz)\"]\n amplitude = data[\"Amplitude (m)\"]\n phase = data[\"Phase (deg)\"]\n \n if len(files) == 1:\n ax[0].plot(freq_shift, amplitude)\n ax[0].set_ylabel(data.columns[2])\n\n ax[1].plot(freq_shift, phase)\n ax[1].set_ylabel(data.columns[3])\n else:\n ax[idx, 0].plot(freq_shift, amplitude)\n ax[idx, 0].set_ylabel(data.columns[2])\n ax[idx, 0].set_title(file)\n\n ax[idx, 1].plot(freq_shift, phase)\n ax[idx, 1].set_ylabel(data.columns[3])\n ax[idx, 1].set_title(file)\n\n if fit:\n fano1 = sft.fit_fano(freq_shift, amplitude)\n #q_factor = (params[\"Center Frequency (Hz)\"] + fano1.params[\"center\"].value) / (fano1.params[\"sigma\"].value)\n #q_factor_err = q_factor * np.sqrt((fano1.params[\"center\"].stderr/fano1.params[\"center\"].value)**2 + (fano1.params[\"sigma\"].stderr/fano1.params[\"sigma\"].value)**2)\n ax[idx, 0].plot(freq_shift, fano1.best_fit)\n ax[idx, 0].legend()\n fano2 = sft.fit_fano(freq_shift, phase, linear_offset=True)\n ax[idx, 1].plot(freq_shift, fano2.best_fit)\n print(\"chi-square ({}) = {:.2e}\".format(file, fano1.chisqr))\n\nfig.tight_layout()\nfig.text(0.5, 0.02, data.columns[1], ha='center', va='center')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0b869bba927fc9b85999ec8e0000048898953de
80,551
ipynb
Jupyter Notebook
cerca-1-imatge.ipynb
gdsa-upc/2018-Equip2
9892c059126d314ff21d29fd88c7d1cd67c03eb8
[ "MIT" ]
null
null
null
cerca-1-imatge.ipynb
gdsa-upc/2018-Equip2
9892c059126d314ff21d29fd88c7d1cd67c03eb8
[ "MIT" ]
null
null
null
cerca-1-imatge.ipynb
gdsa-upc/2018-Equip2
9892c059126d314ff21d29fd88c7d1cd67c03eb8
[ "MIT" ]
1
2022-02-15T18:59:18.000Z
2022-02-15T18:59:18.000Z
404.778894
76,072
0.942037
[ [ [ "import sklearn\nimport pickle\nfrom PIL import Image, ImageOps\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "#cargar de un pickle\n\nwith open('/home/jupyter/Pickles/Descriptores/train.pickle', 'rb') as train:\n train_list = pickle.load(train)\n\n\nwith open('/home/jupyter/Pickles/Descriptores/validation.pickle', 'rb') as validation:\n validation_list = pickle.load(validation)", "_____no_output_____" ], [ "with open('/home/jupyter/Pickles/Imagenes/images_train.pickle', 'rb') as trains:\n images_train_list = pickle.load(trains)\n\n\nwith open('/home/jupyter/Pickles/Imagenes/images_validation.pickle', 'rb') as validations:\n images_validation_list = pickle.load(validations)", "_____no_output_____" ], [ "#cerca con todas las imagenes\nv_val = np.reshape(validation_list[70], (1,4096))\nv_train = np.reshape(train_list, (1194,4096))", "_____no_output_____" ], [ "v_train = sklearn.preprocessing.normalize(train_list, norm='l2', axis=1, copy=True, return_norm=False)\nv_val = sklearn.preprocessing.normalize(v_val, norm='l2', axis=1, copy=True, return_norm=False)", "_____no_output_____" ], [ "train_list_t = v_train.transpose()", "_____no_output_____" ], [ "res= np.matmul(v_val, train_list_t)", "_____no_output_____" ], [ "ranks = np.argsort(res, axis=1)[:,::-1]\nx_train_img = []\nx_val_img = []", "_____no_output_____" ], [ "x_val_img.append(np.array(images_validation_list[70]))", "_____no_output_____" ], [ "j = 0\nfor j in range (1194):\n x_train_img.append(np.array(images_train_list[j]))", "_____no_output_____" ], [ "h,w = (224, 224)\nnew_image= Image.new('RGB', (h*5,w*1))\n\n# Visualize ranks of the 10 queries\noffset = 10 # it will show results from query #'offset' to #offset+10\nfor q in range(1):\n ranks_q = ranks[q*(offset+1),:]\n for i in range(5):\n new_image.paste(Image.fromarray(x_train_img[ranks_q[i]]), (h*(1+i),w*q))\n # visualize query\n ima_q = Image.fromarray(x_val_img[0])\n ima_q = ImageOps.expand(ima_q, border=15, fill='orange')\n\n new_image.paste(ima_q, (0,w*q))", "_____no_output_____" ], [ "plt.imshow(new_image)\nplt.axis('off')\nplt.show()", "_____no_output_____" ], [ "new_image.save('no_funciona.jpg')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0b88082b8862a8ffd8a3e8459579c79eede5b3d
30,102
ipynb
Jupyter Notebook
cat_vs_dog_0_0_2.ipynb
yarusx/cat_vs_dogo
ab008d092e74953a3c00f540af5e90185b09a155
[ "MIT" ]
1
2021-02-23T10:01:57.000Z
2021-02-23T10:01:57.000Z
cat_vs_dog_0_0_2.ipynb
yarusx/cat_vs_dogo
ab008d092e74953a3c00f540af5e90185b09a155
[ "MIT" ]
null
null
null
cat_vs_dog_0_0_2.ipynb
yarusx/cat_vs_dogo
ab008d092e74953a3c00f540af5e90185b09a155
[ "MIT" ]
null
null
null
38.992228
231
0.453458
[ [ [ "<a href=\"https://colab.research.google.com/github/yarusx/cat-vs-dogo/blob/main/cat_vs_dog_0_0_2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nfrom tensorflow.keras.preprocessing import image_dataset_from_directory", "_____no_output_____" ], [ "_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'\npath_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)\nPATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')\n\ntrain_dir = os.path.join(PATH, 'train')\nvalidation_dir = os.path.join(PATH, 'validation')\n\nBATCH_SIZE = 32\nIMG_SIZE = (160, 160)\n\ntrain_dataset = image_dataset_from_directory(train_dir,\n shuffle=True,\n batch_size=BATCH_SIZE,\n image_size=IMG_SIZE)", "Downloading data from https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip\n68608000/68606236 [==============================] - 1s 0us/step\nFound 2000 files belonging to 2 classes.\n" ], [ "validation_dataset = image_dataset_from_directory(validation_dir,\n shuffle=True,\n batch_size=BATCH_SIZE,\n image_size=IMG_SIZE)", "Found 1000 files belonging to 2 classes.\n" ], [ "class_names = train_dataset.class_names\n\n# plt.figure(figsize=(10, 10))\n# for images, labels in train_dataset.take(1):\n# for i in range(9):\n# ax = plt.subplot(3, 3, i + 1)\n# plt.imshow(images[i].numpy().astype(\"uint8\"))\n# plt.title(class_names[labels[i]])\n# plt.axis(\"off\")", "_____no_output_____" ], [ "val_batches = tf.data.experimental.cardinality(validation_dataset)\ntest_dataset = validation_dataset.take(val_batches // 5)\nvalidation_dataset = validation_dataset.skip(val_batches // 5)", "_____no_output_____" ], [ "AUTOTUNE = tf.data.experimental.AUTOTUNE\n\ntrain_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)\nvalidation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)\ntest_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)", "_____no_output_____" ], [ "data_augmentation = tf.keras.Sequential([\n tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n])", "_____no_output_____" ], [ "# for image, _ in train_dataset.take(1):\n# plt.figure(figsize=(10, 10))\n# first_image = image[0]\n# for i in range(9):\n# ax = plt.subplot(3, 3, i + 1)\n# augmented_image = data_augmentation(tf.expand_dims(first_image, 0))\n# plt.imshow(augmented_image[0] / 255)\n# plt.axis('off')", "_____no_output_____" ], [ "rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)", "_____no_output_____" ], [ "# Create the base model from the pre-trained model MobileNet V2\nIMG_SHAPE = IMG_SIZE + (3,)\nbase_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,\n include_top=False,\n weights='imagenet')", "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_160_no_top.h5\n9412608/9406464 [==============================] - 0s 0us/step\n" ], [ "image_batch, label_batch = next(iter(train_dataset))\nfeature_batch = base_model(image_batch)\nprint(feature_batch.shape)", "(32, 5, 5, 1280)\n" ], [ "base_model.trainable = False", "_____no_output_____" ], [ "global_average_layer = tf.keras.layers.GlobalAveragePooling2D()\nfeature_batch_average = global_average_layer(feature_batch)\nprint(feature_batch_average.shape)", "(32, 1280)\n" ], [ "prediction_layer = tf.keras.layers.Dense(1)\nprediction_batch = prediction_layer(feature_batch_average)\nprint(prediction_batch.shape)", "(32, 1)\n" ], [ "inputs = tf.keras.Input(shape=(160, 160, 3))\nx = data_augmentation(inputs)\nx = rescale(x)\nx = base_model(x, training=False)\nx = global_average_layer(x)\nx = tf.keras.layers.Dropout(0.4)(x)\noutputs = prediction_layer(x)\nmodel = tf.keras.Model(inputs, outputs)", "_____no_output_____" ], [ "base_learning_rate = 0.0001\nmodel.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy'])", "_____no_output_____" ], [ "loss0, accuracy0 = model.evaluate(validation_dataset)\nprint(\"initial loss: {:.2f}\".format(loss0))\nprint(\"initial accuracy: {:.2f}\".format(accuracy0))", "26/26 [==============================] - 1s 45ms/step - loss: 0.6649 - accuracy: 0.5408\ninitial loss: 0.66\ninitial accuracy: 0.54\n" ], [ "initial_epochs = 1\nhistory = model.fit(train_dataset,\n epochs=initial_epochs,\n validation_data=validation_dataset)\nval_acc = history.history['val_accuracy']", "63/63 [==============================] - 5s 77ms/step - loss: 0.6902 - accuracy: 0.6105 - val_loss: 0.4842 - val_accuracy: 0.6856\n" ], [ "while np.mean(val_acc)*100 < 98.5:\n initial_epochs = 3\n history = model.fit(train_dataset,\n epochs=initial_epochs,\n validation_data=validation_dataset)\n \n val_acc = history.history['val_accuracy']\n \ntry:\n !mkdir -p saved_model\nexcept:\n pass\n\nmodel.save('saved_model/dvc/')\n\n!zip -r dvc.zip saved_model/dvc/\n\nfrom google.colab import files\nfiles.download(\"dvc.zip\")", "Epoch 1/3\n63/63 [==============================] - 5s 75ms/step - loss: 0.1765 - accuracy: 0.9230 - val_loss: 0.0850 - val_accuracy: 0.9752\nEpoch 2/3\n63/63 [==============================] - 5s 74ms/step - loss: 0.1732 - accuracy: 0.9235 - val_loss: 0.0831 - val_accuracy: 0.9765\nEpoch 3/3\n63/63 [==============================] - 5s 73ms/step - loss: 0.1832 - accuracy: 0.9210 - val_loss: 0.0858 - val_accuracy: 0.9715\nEpoch 1/3\n63/63 [==============================] - 5s 77ms/step - loss: 0.1874 - accuracy: 0.9150 - val_loss: 0.0790 - val_accuracy: 0.9790\nEpoch 2/3\n63/63 [==============================] - 5s 75ms/step - loss: 0.1710 - accuracy: 0.9310 - val_loss: 0.0793 - val_accuracy: 0.9777\nEpoch 3/3\n63/63 [==============================] - 5s 73ms/step - loss: 0.1723 - accuracy: 0.9260 - val_loss: 0.0767 - val_accuracy: 0.9765\nEpoch 1/3\n63/63 [==============================] - 5s 73ms/step - loss: 0.1757 - accuracy: 0.9245 - val_loss: 0.0798 - val_accuracy: 0.9765\nEpoch 2/3\n63/63 [==============================] - 5s 73ms/step - loss: 0.1678 - accuracy: 0.9280 - val_loss: 0.0710 - val_accuracy: 0.9790\nEpoch 3/3\n63/63 [==============================] - 5s 72ms/step - loss: 0.1597 - accuracy: 0.9325 - val_loss: 0.0757 - val_accuracy: 0.9777\nEpoch 1/3\n63/63 [==============================] - 5s 72ms/step - loss: 0.1569 - accuracy: 0.9320 - val_loss: 0.0688 - val_accuracy: 0.9814\nEpoch 2/3\n63/63 [==============================] - 5s 72ms/step - loss: 0.1576 - accuracy: 0.9350 - val_loss: 0.0723 - val_accuracy: 0.9802\nEpoch 3/3\n63/63 [==============================] - 5s 73ms/step - loss: 0.1667 - accuracy: 0.9315 - val_loss: 0.0694 - val_accuracy: 0.9802\nEpoch 1/3\n63/63 [==============================] - 5s 73ms/step - loss: 0.1567 - accuracy: 0.9305 - val_loss: 0.0655 - val_accuracy: 0.9827\nEpoch 2/3\n63/63 [==============================] - 5s 73ms/step - loss: 0.1526 - accuracy: 0.9330 - val_loss: 0.0641 - val_accuracy: 0.9839\nEpoch 3/3\n63/63 [==============================] - 5s 73ms/step - loss: 0.1564 - accuracy: 0.9335 - val_loss: 0.0676 - val_accuracy: 0.9802\nEpoch 1/3\n63/63 [==============================] - 5s 74ms/step - loss: 0.1537 - accuracy: 0.9300 - val_loss: 0.0669 - val_accuracy: 0.9802\nEpoch 2/3\n63/63 [==============================] - 5s 78ms/step - loss: 0.1556 - accuracy: 0.9285 - val_loss: 0.0694 - val_accuracy: 0.9802\nEpoch 3/3\n63/63 [==============================] - 5s 74ms/step - loss: 0.1600 - accuracy: 0.9300 - val_loss: 0.0691 - val_accuracy: 0.9802\nEpoch 1/3\n63/63 [==============================] - 5s 74ms/step - loss: 0.1506 - accuracy: 0.9335 - val_loss: 0.0622 - val_accuracy: 0.9827\nEpoch 2/3\n63/63 [==============================] - 5s 74ms/step - loss: 0.1511 - accuracy: 0.9305 - val_loss: 0.0622 - val_accuracy: 0.9827\nEpoch 3/3\n63/63 [==============================] - 5s 78ms/step - loss: 0.1444 - accuracy: 0.9420 - val_loss: 0.0586 - val_accuracy: 0.9827\nEpoch 1/3\n63/63 [==============================] - 5s 83ms/step - loss: 0.1524 - accuracy: 0.9385 - val_loss: 0.0641 - val_accuracy: 0.9814\nEpoch 2/3\n63/63 [==============================] - 5s 77ms/step - loss: 0.1547 - accuracy: 0.9335 - val_loss: 0.0584 - val_accuracy: 0.9839\nEpoch 3/3\n63/63 [==============================] - 5s 79ms/step - loss: 0.1479 - accuracy: 0.9405 - val_loss: 0.0653 - val_accuracy: 0.9802\nEpoch 1/3\n63/63 [==============================] - 5s 76ms/step - loss: 0.1475 - accuracy: 0.9415 - val_loss: 0.0600 - val_accuracy: 0.9814\nEpoch 2/3\n63/63 [==============================] - 5s 78ms/step - loss: 0.1344 - accuracy: 0.9510 - val_loss: 0.0563 - val_accuracy: 0.9864\nEpoch 3/3\n63/63 [==============================] - 5s 79ms/step - loss: 0.1363 - accuracy: 0.9450 - val_loss: 0.0627 - val_accuracy: 0.9814\nEpoch 1/3\n63/63 [==============================] - 5s 79ms/step - loss: 0.1341 - accuracy: 0.9450 - val_loss: 0.0570 - val_accuracy: 0.9839\nEpoch 2/3\n63/63 [==============================] - 5s 76ms/step - loss: 0.1428 - accuracy: 0.9345 - val_loss: 0.0603 - val_accuracy: 0.9802\nEpoch 3/3\n63/63 [==============================] - 5s 78ms/step - loss: 0.1259 - accuracy: 0.9470 - val_loss: 0.0565 - val_accuracy: 0.9827\nEpoch 1/3\n63/63 [==============================] - 5s 79ms/step - loss: 0.1578 - accuracy: 0.9295 - val_loss: 0.0596 - val_accuracy: 0.9814\nEpoch 2/3\n63/63 [==============================] - 5s 78ms/step - loss: 0.1392 - accuracy: 0.9370 - val_loss: 0.0549 - val_accuracy: 0.9827\nEpoch 3/3\n63/63 [==============================] - 5s 77ms/step - loss: 0.1464 - accuracy: 0.9365 - val_loss: 0.0579 - val_accuracy: 0.9839\nEpoch 1/3\n63/63 [==============================] - 5s 76ms/step - loss: 0.1479 - accuracy: 0.9385 - val_loss: 0.0597 - val_accuracy: 0.9814\nEpoch 2/3\n63/63 [==============================] - 5s 77ms/step - loss: 0.1339 - accuracy: 0.9395 - val_loss: 0.0529 - val_accuracy: 0.9839\nEpoch 3/3\n63/63 [==============================] - 5s 78ms/step - loss: 0.1478 - accuracy: 0.9400 - val_loss: 0.0601 - val_accuracy: 0.9790\nEpoch 1/3\n63/63 [==============================] - 5s 80ms/step - loss: 0.1355 - accuracy: 0.9450 - val_loss: 0.0556 - val_accuracy: 0.9814\nEpoch 2/3\n63/63 [==============================] - 5s 79ms/step - loss: 0.1335 - accuracy: 0.9425 - val_loss: 0.0576 - val_accuracy: 0.9814\nEpoch 3/3\n63/63 [==============================] - 5s 75ms/step - loss: 0.1315 - accuracy: 0.9435 - val_loss: 0.0591 - val_accuracy: 0.9827\nEpoch 1/3\n63/63 [==============================] - 5s 77ms/step - loss: 0.1465 - accuracy: 0.9415 - val_loss: 0.0561 - val_accuracy: 0.9839\nEpoch 2/3\n63/63 [==============================] - 5s 78ms/step - loss: 0.1251 - accuracy: 0.9520 - val_loss: 0.0584 - val_accuracy: 0.9790\nEpoch 3/3\n63/63 [==============================] - 5s 76ms/step - loss: 0.1364 - accuracy: 0.9460 - val_loss: 0.0501 - val_accuracy: 0.9851\nEpoch 1/3\n63/63 [==============================] - 5s 74ms/step - loss: 0.1365 - accuracy: 0.9360 - val_loss: 0.0514 - val_accuracy: 0.9839\nEpoch 2/3\n63/63 [==============================] - 5s 76ms/step - loss: 0.1302 - accuracy: 0.9435 - val_loss: 0.0538 - val_accuracy: 0.9839\nEpoch 3/3\n63/63 [==============================] - 5s 75ms/step - loss: 0.1406 - accuracy: 0.9385 - val_loss: 0.0551 - val_accuracy: 0.9839\nEpoch 1/3\n63/63 [==============================] - 5s 73ms/step - loss: 0.1400 - accuracy: 0.9360 - val_loss: 0.0551 - val_accuracy: 0.9827\nEpoch 2/3\n63/63 [==============================] - 5s 79ms/step - loss: 0.1442 - accuracy: 0.9380 - val_loss: 0.0517 - val_accuracy: 0.9839\nEpoch 3/3\n63/63 [==============================] - 5s 76ms/step - loss: 0.1339 - accuracy: 0.9465 - val_loss: 0.0550 - val_accuracy: 0.9839\nEpoch 1/3\n63/63 [==============================] - 5s 79ms/step - loss: 0.1499 - accuracy: 0.9345 - val_loss: 0.0568 - val_accuracy: 0.9814\nEpoch 2/3\n63/63 [==============================] - 5s 76ms/step - loss: 0.1401 - accuracy: 0.9435 - val_loss: 0.0527 - val_accuracy: 0.9839\nEpoch 3/3\n63/63 [==============================] - 5s 81ms/step - loss: 0.1285 - accuracy: 0.9410 - val_loss: 0.0575 - val_accuracy: 0.9827\nEpoch 1/3\n63/63 [==============================] - 5s 78ms/step - loss: 0.1457 - accuracy: 0.9355 - val_loss: 0.0564 - val_accuracy: 0.9839\nEpoch 2/3\n63/63 [==============================] - 5s 76ms/step - loss: 0.1210 - accuracy: 0.9520 - val_loss: 0.0491 - val_accuracy: 0.9876\nEpoch 3/3\n63/63 [==============================] - 5s 79ms/step - loss: 0.1300 - accuracy: 0.9450 - val_loss: 0.0513 - val_accuracy: 0.9851\nINFO:tensorflow:Assets written to: saved_model/dvc/assets\nupdating: saved_model/dvc/ (stored 0%)\nupdating: saved_model/dvc/assets/ (stored 0%)\nupdating: saved_model/dvc/variables/ (stored 0%)\nupdating: saved_model/dvc/variables/variables.index (deflated 75%)\nupdating: saved_model/dvc/variables/variables.data-00000-of-00001 (deflated 8%)\nupdating: saved_model/dvc/saved_model.pb (deflated 92%)\n" ], [ "from google.colab import drive\ndrive.mount('/content/drive')\n!unzip -q /content/drive/MyDrive/dvc.zip\ndvc = tf.keras.models.load_model('/content/saved_model/dvc')", "Mounted at /content/drive\nreplace saved_model/dvc/variables/variables.index? [y]es, [n]o, [A]ll, [N]one, [r]ename: A\n" ], [ "try:\n !mkdir -p saved_model\nexcept:\n pass\n\nmodel.save('saved_model/dvc/')\n\n!zip -r dvc.zip saved_model/dvc/\n\nfrom google.colab import files\nfiles.download(\"dvc.zip\")", "_____no_output_____" ], [ "from keras.preprocessing.image import load_img, img_to_array\n\n# load and prepare the image\ndef load_image(filename):\n\t# load the image\n\timg = load_img(filename, target_size=(160, 160))\n\t# convert to array\n\timg = img_to_array(img)\n\t# reshape into a single sample with 3 channels\n\timg = img.reshape(1, 160, 160, 3)\n\treturn img\n\nimg = load_image('/content/drive/MyDrive/dayana_1.JPG')", "_____no_output_____" ], [ "categories = [\"Cat\", \"Dog\"]", "_____no_output_____" ], [ "prediction = dvc_model.predict(img)\nprediction = tf.nn.sigmoid(prediction)\nprint(prediction)\nplt.figure()\nplt.imshow(img[0]/255)\nplt.title(categories[int(np.round_(prediction))])", "_____no_output_____" ], [ "loss0, accuracy0 = model.evaluate(validation_dataset)", "26/26 [==============================] - 1s 40ms/step - loss: 0.0848 - accuracy: 0.9765\n" ], [ "# #Retrieve a batch of images from the test set\n# image_batch, label_batch = test_dataset.as_numpy_iterator().next()\n# predictions = model.predict_on_batch(image_batch).flatten()\n\n# # Apply a sigmoid since our model returns logits\n# predictions = tf.nn.sigmoid(predictions)\n# predictions = tf.where(predictions < 0.5, 0, 1)\n\n# print('Predictions:\\n', predictions.numpy())\n# print('Labels:\\n', label_batch)\n\n# plt.figure(figsize=(10, 10))\n# for i in range(9):\n# ax = plt.subplot(3, 3, i + 1)\n# plt.imshow(image_batch[i].astype(\"uint8\"))\n# plt.title(class_names[predictions[i]])\n# plt.axis(\"off\")", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0b882d31d730c59b7e7538f4e805d5087d57dba
955,421
ipynb
Jupyter Notebook
notebooks/0.0_db_RG_climatology.ipynb
dhruvbalwada/sogos
4ccff0dc0bb7c3d9388bf1787167ce8d5dd78c59
[ "MIT" ]
1
2021-03-15T14:05:18.000Z
2021-03-15T14:05:18.000Z
notebooks/0.0_db_RG_climatology.ipynb
dhruvbalwada/sogos
4ccff0dc0bb7c3d9388bf1787167ce8d5dd78c59
[ "MIT" ]
22
2020-09-11T18:44:15.000Z
2021-11-05T19:11:30.000Z
notebooks/0.0_db_RG_climatology.ipynb
dhruvbalwada/sogos
4ccff0dc0bb7c3d9388bf1787167ce8d5dd78c59
[ "MIT" ]
1
2021-07-07T20:12:04.000Z
2021-07-07T20:12:04.000Z
529.026024
213,240
0.937744
[ [ [ "# some_file.py\nimport sys\n\n# insert at 1, 0 is the script path (or '' in REPL)\nsys.path.insert(1, \"/Users/dhruvbalwada/work_root/sogos/\")", "_____no_output_____" ], [ "import os\nfrom numpy import *\nimport pandas as pd\nimport xarray as xr\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ], [ "from xgcm import Grid\nfrom xgcm.autogenerate import generate_grid_ds", "_____no_output_____" ], [ "import sogos.download_product as dlp\nimport sogos.load_product as ldp\nimport sogos.time_tools as tt\nimport sogos.geo_tools as gt\nimport sogos.download_file as df", "_____no_output_____" ], [ "import gsw\nimport cmocean as cmocean", "_____no_output_____" ] ], [ [ "# Download Latest Data", "_____no_output_____" ] ], [ [ "data_dir = \"/Users/dhruvbalwada/work_root/sogos/data/raw/climatology/\"", "_____no_output_____" ] ], [ [ "FTP ADDRESS: ftp://kakapo.ucsd.edu/pub/gilson/argo_climatology/", "_____no_output_____" ], [ "Data prior to 2017 (till Dec 2016) is in a single file ", "_____no_output_____" ] ], [ [ "# download the big climatology files\nwget.download(\n \"ftp://kakapo.ucsd.edu/pub/gilson/argo_climatology/RG_ArgoClim_Salinity_2017.nc.gz\",\n data_dir,\n)\nwget.download(\n \"ftp://kakapo.ucsd.edu/pub/gilson/argo_climatology/RG_ArgoClim_Temperature_2017.nc.gz\",\n data_dir,\n)", "_____no_output_____" ], [ "from ftplib import FTP", "_____no_output_____" ], [ "ftp_address = \"ftp://kakapo.ucsd.edu/pub/gilson/argo_climatology/RG_ArgoClim_2019\"", "_____no_output_____" ], [ "url_root = \"/pub/gilson/argo_climatology/\"\nftp_root = \"kakapo.ucsd.edu\"", "_____no_output_____" ], [ "ftp = FTP(ftp_root)\nftp.login()\nftp.cwd(url_root)\ncontents = ftp.nlst(\"RG_ArgoClim_2017*\")", "_____no_output_____" ], [ "contents = ftp.nlst(\"RG_ArgoClim_201*\")", "_____no_output_____" ], [ "for i in contents:\n print(\"Downloading\" + i)\n wget.download(\"ftp://kakapo.ucsd.edu/pub/gilson/argo_climatology/\" + i, data_dir)", "DownloadingRG_ArgoClim_201701.nc.gz\nDownloadingRG_ArgoClim_201702.nc.gz\nDownloadingRG_ArgoClim_201703.nc.gz\nDownloadingRG_ArgoClim_201704.nc.gz\nDownloadingRG_ArgoClim_201705.nc.gz\nDownloadingRG_ArgoClim_201706.nc.gz\nDownloadingRG_ArgoClim_201707.nc.gz\nDownloadingRG_ArgoClim_201708.nc.gz\nDownloadingRG_ArgoClim_201709.nc.gz\nDownloadingRG_ArgoClim_201710.nc.gz\nDownloadingRG_ArgoClim_201711.nc.gz\nDownloadingRG_ArgoClim_201712.nc.gz\nDownloadingRG_ArgoClim_201801.nc.gz\nDownloadingRG_ArgoClim_201802.nc.gz\nDownloadingRG_ArgoClim_201803.nc.gz\nDownloadingRG_ArgoClim_201804.nc.gz\nDownloadingRG_ArgoClim_201805.nc.gz\nDownloadingRG_ArgoClim_201806.nc.gz\nDownloadingRG_ArgoClim_201807.nc.gz\nDownloadingRG_ArgoClim_201808.nc.gz\nDownloadingRG_ArgoClim_201809.nc.gz\nDownloadingRG_ArgoClim_201810.nc.gz\nDownloadingRG_ArgoClim_201811.nc.gz\nDownloadingRG_ArgoClim_201812.nc.gz\nDownloadingRG_ArgoClim_201901.nc.gz\nDownloadingRG_ArgoClim_201902.nc.gz\nDownloadingRG_ArgoClim_201903.nc.gz\nDownloadingRG_ArgoClim_201904.nc.gz\nDownloadingRG_ArgoClim_201905.nc.gz\nDownloadingRG_ArgoClim_201906.nc.gz\nDownloadingRG_ArgoClim_201907.nc.gz\nDownloadingRG_ArgoClim_201908.nc.gz\nDownloadingRG_ArgoClim_201909.nc.gz\n" ] ], [ [ "## Load some data ", "_____no_output_____" ] ], [ [ "Tclim = xr.open_dataset(data_dir + \"RG_ArgoClim_Temperature_2017.nc\", decode_times=False)\nSclim = xr.open_dataset(data_dir + \"RG_ArgoClim_Salinity_2017.nc\", decode_times=False)", "_____no_output_____" ], [ "Climextra = xr.open_mfdataset(data_dir+ 'RG_ArgoClim_201*', decode_times=False)", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/ipykernel_launcher.py:1: FutureWarning: In xarray version 0.15 the default behaviour of `open_mfdataset`\nwill change. To retain the existing behavior, pass\ncombine='nested'. To use future default behavior, pass\ncombine='by_coords'. See\nhttp://xarray.pydata.org/en/stable/combining.html#combining-multi\n\n \"\"\"Entry point for launching an IPython kernel.\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/xarray/backends/api.py:931: FutureWarning: The datasets supplied have global dimension coordinates. You may want\nto use the new `combine_by_coords` function (or the\n`combine='by_coords'` option to `open_mfdataset`) to order the datasets\nbefore concatenation. Alternatively, to continue concatenating based\non the order the datasets are supplied in future, please use the new\n`combine_nested` function (or the `combine='nested'` option to\nopen_mfdataset).\n from_openmfds=True,\n" ], [ "RG_clim = xr.merge([Tclim, Sclim, Climextra])", "_____no_output_____" ], [ "# Calendar type was missing, and giving errors in decoding time\nRG_clim.TIME.attrs['calendar'] = '360_day'\nRG_clim = xr.decode_cf(RG_clim)", "_____no_output_____" ], [ "## Add density and other things \nSA = xr.apply_ufunc(gsw.SA_from_SP, RG_clim.ARGO_SALINITY_MEAN+RG_clim.ARGO_SALINITY_ANOMALY, RG_clim.PRESSURE , \n RG_clim.LONGITUDE, RG_clim.LATITUDE,\n dask='parallelized', output_dtypes=[float,]).rename('SA')\nCT = xr.apply_ufunc(gsw.CT_from_t, SA, RG_clim.ARGO_TEMPERATURE_MEAN+RG_clim.ARGO_SALINITY_ANOMALY, RG_clim.PRESSURE, \n dask='parallelized', output_dtypes=[float,]).rename('CT')\nSIGMA0 = xr.apply_ufunc(gsw.sigma0, SA, CT, dask='parallelized', output_dtypes=[float,]).rename('SIGMA0')", "_____no_output_____" ], [ "RG_clim = xr.merge([RG_clim, SIGMA0])", "_____no_output_____" ], [ "T_region = RG_clim.ARGO_TEMPERATURE_ANOMALY.groupby('TIME.season').mean() + RG_clim.ARGO_TEMPERATURE_MEAN\nS_region = RG_clim.ARGO_SALINITY_ANOMALY.groupby('TIME.season').mean() + RG_clim.ARGO_SALINITY_MEAN\nrho_region = RG_clim.SIGMA0.groupby('TIME.season').mean()", "_____no_output_____" ], [ "plt.figure(figsize=(18,3))\nplt.subplot(141)\nT_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='DJF').plot.contourf(levels=11, vmin=-9, vmax=9);\n\nplt.subplot(142)\nT_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='MAM').plot.contourf(levels=11, vmin=-9, vmax=9);\n\nplt.subplot(143)\nT_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='JJA').plot.contourf(levels=11, vmin=-9, vmax=9);\n\nplt.subplot(144)\nT_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='SON').plot.contourf(levels=11, vmin=-9, vmax=9);", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n" ], [ "plt.figure(figsize=(18,3))\nplt.subplot(141)\nS_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='DJF').plot.contourf(levels=11, vmin=33.7, vmax=34.2)\n\nplt.subplot(142)\nS_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='MAM').plot.contourf(levels=11, vmin=33.7, vmax=34.2)\n\nplt.subplot(143)\nS_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='JJA').plot.contourf(levels=11, vmin=33.7, vmax=34.2)\n\nplt.subplot(144)\nS_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='SON').plot.contourf(levels=11, vmin=33.7, vmax=34.2)\nplt.tight_layoutout()", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n" ], [ "plt.figure(figsize=(18,3))\nplt.subplot(141)\nrho_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='DJF').plot.contourf(levels=11)\n\nplt.subplot(142)\nrho_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='MAM').plot.contourf(levels=11)\n\nplt.subplot(143)\nrho_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='JJA').plot.contourf(levels=11)\n\nplt.subplot(144)\nrho_region.sel(LATITUDE=slice(-65,-45), LONGITUDE=slice(20,60)).isel(PRESSURE=0).sel(season='SON').plot.contourf(levels=11)", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n" ] ], [ [ "### Some Climatological Mean Sections ", "_____no_output_____" ] ], [ [ "dens_section30 = RG_clim.SIGMA0.sel(LONGITUDE=30, method='nearest').sel(LATITUDE=slice(-70,-40)).load()\ndens_section40 = RG_clim.SIGMA0.sel(LONGITUDE=40, method='nearest').sel(LATITUDE=slice(-70,-40)).load()", "_____no_output_____" ], [ "glider = {\"start_month\": 4.99, \"end_month\":7.8, \"start_lat\": -51.5, \"end_lat\": -53, \"max_depth\": 1000}", "_____no_output_____" ], [ "plt.figure(figsize=(15,4))\n\nplt.subplot(121)\nRG_clim.ARGO_TEMPERATURE_MEAN.sel(LONGITUDE=30, method='nearest').sel(LATITUDE=slice(-70,-40)\n ).plot.contourf(vmin=-10, levels=24)\nRG_clim.ARGO_TEMPERATURE_MEAN.sel(LONGITUDE=30, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contour(linestyles='-.',levels=[1])\ndens_section.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C0',\n linestyles='dashed', linewidths=4)\nplt.plot([glider['start_lat'], glider['start_lat']], [4, glider['max_depth']], color='C2', alpha=0.5)\nplt.plot([glider['end_lat'], glider['end_lat']], [4, glider['max_depth']], color='C2', alpha=0.5)\nplt.gca().invert_yaxis()\n\nplt.subplot(122)\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=30, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contourf(vmin=33.77, vmax=35.21, levels=24, cmap=cmocean.cm.haline)\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=30, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contour(levels=[34.2], linestyles='dashdot')\ndens_section.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C1', \n linestyles='dashed', linewidths=4)\nplt.plot([glider['start_lat'], glider['start_lat']], [4, 1900], color='C3', alpha=0.5)\n\nplt.gca().invert_yaxis()\n\nplt.tight_layout()\n\n#plt.savefig('../figures/clim_TS_30E.png')", "_____no_output_____" ], [ "plt.figure(figsize=(15,4))\n\nplt.subplot(121)\nRG_clim.ARGO_TEMPERATURE_MEAN.sel(LONGITUDE=40, method='nearest').sel(LATITUDE=slice(-70,-40)\n ).plot.contourf(vmin=-10,levels=24)\nRG_clim.ARGO_TEMPERATURE_MEAN.sel(LONGITUDE=40, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contour(linestyles='-.',levels=[1])\ndens_section40.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section40.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C0',\n linestyles='dashed', linewidths=4)\nplt.gca().invert_yaxis()\n\nplt.subplot(122)\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=40, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contourf(vmin=33.77, vmax=35.21,levels=24, cmap=cmocean.cm.haline)\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=40, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contour(levels=[34.2], linestyles='dashdot')\ndens_section40.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section40.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C1', \n linestyles='dashed', linewidths=4)\n\nplt.gca().invert_yaxis()\n\nplt.tight_layout()\nplt.savefig('../figures/clim_TS_40E.png')", "_____no_output_____" ], [ "plt.figure(figsize=(12,4))\n\nplt.subplot(121)\nRG_clim.ARGO_TEMPERATURE_MEAN.sel(LONGITUDE=40, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contourf(levels=24)\nRG_clim.ARGO_TEMPERATURE_MEAN.sel(LONGITUDE=40, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contour(levels=[1])\nplt.gca().invert_yaxis()\n\nplt.subplot(122)\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=40, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contourf(levels=24)\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=40, method='nearest').sel(LATITUDE=slice(-70,-40)).plot.contour(levels=[34.2])\nplt.gca().invert_yaxis()\n\nplt.tight_layout()", "_____no_output_____" ], [ "# Not much seasonality below 200m\ndens_section.groupby('TIME.month').mean().mean('month').sel(PRESSURE=slice(0,1000)).plot.contourf(cmap='Blues')\ndens_section.groupby('TIME.month').mean().isel(month=0).sel(PRESSURE=slice(0,1000)).plot.contour(levels=[27.2])\ndens_section.groupby('TIME.month').mean().isel(month=3).sel(PRESSURE=slice(0,1000)).plot.contour(levels=[27.2])\ndens_section.groupby('TIME.month').mean().isel(month=6).sel(PRESSURE=slice(0,1000)).plot.contour(levels=[27.2])\ndens_section.groupby('TIME.month').mean().isel(month=9).sel(PRESSURE=slice(0,1000)).plot.contour(levels=[27.2])\n\nplt.gca().invert_yaxis()", "_____no_output_____" ] ], [ [ "## N2\n\n\\begin{equation}\nN^2 = db/dz\n\\end{equation}\n\n$b = -\\frac{g}{\\rho_0} \\rho'$\n\n$b = g(\\alpha \\triangle T - \\beta \\triangle S)$", "_____no_output_____" ] ], [ [ "RG_clim", "_____no_output_____" ], [ "RG_clim = generate_grid_ds(RG_clim, \n {'Z':'PRESSURE', 'X':'LONGITUDE', 'Y':'LATITUDE'})", "_____no_output_____" ], [ "grid = Grid(RG_clim, periodic='X')", "_____no_output_____" ], [ "g = 9.81\nrho0 = 1000", "_____no_output_____" ], [ "dens_clim_monthly = RG_clim.SIGMA0.groupby('TIME.month').mean()", "_____no_output_____" ], [ "dens_clim_monthly", "_____no_output_____" ], [ "N2_clim_monthly = grid.interp(-g/rho0* grid.diff(dens_clim_monthly, 'Z', boundary='extend') / -(grid.diff(RG_clim.PRESSURE, 'Z', boundary='extend')), 'Z', boundary='extend')", "_____no_output_____" ], [ "N2_clim_monthly_SO = N2_clim_monthly.sel(LATITUDE=slice(-70, -30)).load()", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/core.py:119: RuntimeWarning: divide by zero encountered in true_divide\n return func(*args2)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/core.py:119: RuntimeWarning: invalid value encountered in true_divide\n return func(*args2)\n" ], [ "N2_clim_monthly_SO = N2_clim_monthly_SO.rename('N2')", "_____no_output_____" ], [ "plt.figure(figsize=(18,12))\n\nplt.subplot(221)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=1).plot(vmin=-5e-5)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=1).plot.contour(levels=[2e-5])\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=30, method='nearest').sel(\n LATITUDE=slice(-70,-40)).plot.contour(levels=[34.35], linestyles='dashdot')\n\n\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C1', \n linestyles='dashed', linewidths=4)\nplt.title('January')\nplt.gca().invert_yaxis()\n\nplt.subplot(222)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=4).plot(vmin=-5e-5)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=4).plot.contour(levels=[2e-5])\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=30, method='nearest').sel(\n LATITUDE=slice(-70,-40)).plot.contour(levels=[34.35], linestyles='dashdot')\n\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C1', \n linestyles='dashed', linewidths=4)\nplt.title('April')\nplt.gca().invert_yaxis()\n\nplt.subplot(223)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=7).plot(vmin=-5e-5)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=7).plot.contour(levels=[2e-5])\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=30, method='nearest').sel(\n LATITUDE=slice(-70,-40)).plot.contour(levels=[34.35], linestyles='dashdot')\n\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C1', \n linestyles='dashed', linewidths=4)\nplt.title('July')\nplt.gca().invert_yaxis()\n\nplt.subplot(224)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=10).plot(vmin=-5e-5)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=10).plot.contour(levels=[2e-5])\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=30, method='nearest').sel(\n LATITUDE=slice(-70,-40)).plot.contour(levels=[34.35], linestyles='dashdot')\n\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C1', \n linestyles='dashed', linewidths=4)\nplt.title('October')\nplt.gca().invert_yaxis()\n\nplt.savefig('../figures/clim_N2_30E.png')", "_____no_output_____" ], [ "plt.figure(figsize=(8,3))\n\nplt.subplot(121)\n#plt.pcolormesh(N2_clim_monthly_SO.LATITUDE.sel(LATITUDE=slice(-70, -40)), \n# N2_clim_monthly_SO.LATITUDE.sel(=slice(-70, -40)), \nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=4).plot(vmin=-5e-5,\n rasterized=True,add_colorbar=False)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=4).plot.contour(levels=[2e-5])\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=30, method='nearest').sel(\n LATITUDE=slice(-70,-40)).plot.contour(levels=[34.35], linestyles='dashdot')\n\n\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C1', \n linestyles='dashed', linewidths=4)\nplt.title('April')\nplt.gca().invert_yaxis()\nplt.ylim([1500, 0])\nplt.xlabel('Latitude')\nplt.ylabel('Depth (m)')\n\nplt.subplot(122)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=9).plot(vmin=-5e-5,\n rasterized=True,add_colorbar=False)\nN2_clim_monthly_SO.sel(LATITUDE=slice(-70, -40)).sel(LONGITUDE=30, method='nearest').sel(month=9).plot.contour(levels=[2e-5])\nRG_clim.ARGO_SALINITY_MEAN.sel(LONGITUDE=30, method='nearest').sel(\n LATITUDE=slice(-70,-40)).plot.contour(levels=[34.35], linestyles='dashdot')\n\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(colors='k', levels=7)\ndens_section30.groupby('TIME.month').mean().mean('month').plot.contour(levels=[27.189, 27.752], colors='C1', \n linestyles='dashed', linewidths=4)\nplt.title('September')\nplt.gca().invert_yaxis()\nplt.ylim([1500, 0])\nplt.xlabel('Latitude')\nplt.ylabel('Depth (m)')\n\nplt.tight_layout()\nplt.savefig('N2_climatology.pdf')", "_____no_output_____" ], [ "N2.sel(LATITUDE=slice(-60, -40)).sel(LONGITUDE=30, method='nearest').isel(TIME=-1).plot()", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/core.py:119: RuntimeWarning: divide by zero encountered in true_divide\n return func(*args2)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/core.py:119: RuntimeWarning: invalid value encountered in true_divide\n return func(*args2)\n" ] ], [ [ "The apply ufunc way, not working yet. ", "_____no_output_____" ] ], [ [ "CT_clim = CT.groupby('TIME.month').mean()\nSA_clim = SA.groupby('TIME.month').mean()", "_____no_output_____" ], [ "CT_clim_region = CT_clim.sel(LATITUDE=slice(-65,-35), LONGITUDE=slice(20,50)).load()\nSA_clim_region = SA_clim.sel(LATITUDE=slice(-65,-35), LONGITUDE=slice(20,50)).load()", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n" ], [ "(N2, pmid) = xr.apply_ufunc(gsw.Nsquared, SA_clim_region, CT_clim_region, RG_clim.PRESSURE, \n dask='parallelized', \n input_core_dims=[['PRESSURE'],['PRESSURE'],['PRESSURE']],\n output_core_dims=[['PRESSURE'],['PRESSURE']], exclude_dims=set(['PRESSURE']))", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/gsw/stability.py:85: RuntimeWarning: divide by zero encountered in true_divide\n N2 = ((g_local**2) / (specvol_mid * db_to_pa * dp))\n" ] ], [ [ "### Gestrophic Velocities", "_____no_output_____" ] ], [ [ "psi = xr.apply_ufunc(gsw.geo_strf_dyn_height, SA, CT , RG_clim.PRESSURE, \n dask='parallelized', output_dtypes=[float,]).rename('psi')", "_____no_output_____" ], [ "psi", "_____no_output_____" ], [ "vels = xr.apply_ufunc(gsw.geostrophic_velocity, psi, psi.LONGITUDE, psi.LATITUDE, \n dask='parallelized', output_core_dims=[4,4], output_dtypes=[float,]).rename('vels')", "_____no_output_____" ], [ "vels", "_____no_output_____" ] ], [ [ "### Mixed Layer Depth\n\nEnded up going with Holte's climatology for MLD work ", "_____no_output_____" ] ], [ [ "delta_dens = RG_clim.SIGMA0 - RG_clim.SIGMA0.isel(PRESSURE=0)", "_____no_output_____" ], [ "import nc_time_axis", "_____no_output_____" ], [ "RG_clim.SIGMA0.sel(LONGITUDE=30, method='nearest').sel( LATITUDE=slice(-63,-45)).isel(TIME=-1).plot.contourf()\nplt.gca().invert_yaxis()", "_____no_output_____" ], [ "RG_clim.SIGMA0.sel(LONGITUDE=30, LATITUDE=-50, method='nearest').isel(TIME=-1).plot()\nRG_clim.SIGMA0.sel(LONGITUDE=30, LATITUDE=-50, method='nearest').isel(TIME=-7).plot()\nRG_clim.SIGMA0.sel(LONGITUDE=30, LATITUDE=-60, method='nearest').isel(TIME=-1).plot()\nRG_clim.SIGMA0.sel(LONGITUDE=30, LATITUDE=-60, method='nearest').isel(TIME=-7).plot()", "_____no_output_____" ], [ "delta_dens.sel(LONGITUDE=30, LATITUDE=-50, method='nearest').isel(TIME=-1).plot()", "_____no_output_____" ], [ "temp = delta_dens.where(delta_dens>0.03).sel(LONGITUDE=30, LATITUDE=-50, method='nearest').isel(TIME=-1).plot()", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/core.py:119: RuntimeWarning: invalid value encountered in greater\n return func(*args2)\n" ], [ "temp = delta_dens.where(delta_dens>0.03)", "_____no_output_____" ], [ "MLD = temp.PRESSURE.where(temp == temp.min('PRESSURE')).min('PRESSURE')", "_____no_output_____" ], [ "MLD_clim = temp.PRESSURE.where(temp == temp.min('PRESSURE')).min('PRESSURE').groupby('TIME.month').mean()", "_____no_output_____" ], [ "MLD_clim.load()", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/core.py:119: RuntimeWarning: invalid value encountered in greater\n return func(*args2)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/utils.py:29: RuntimeWarning: All-NaN slice encountered\n return func(*args, **kwargs)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/toolz/functoolz.py:488: RuntimeWarning: All-NaN slice encountered\n ret = f(ret)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: divide by zero encountered in true_divide\n x = np.divide(x1, x2, out)\n/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n" ], [ "MLD_clim.month", "_____no_output_____" ], [ "plt.figure(figsize=(12,4))\n\nplt.subplot(121)\nMLD_clim.sel(LATITUDE=slice(-75,-25), LONGITUDE=slice(20,90)).sel(month=1).plot(vmin=0, vmax=120)\n\nplt.subplot(122)\nMLD_clim.sel(LATITUDE=slice(-75,-25), LONGITUDE=slice(20,90)).sel(month=7).plot(vmin=0, vmax=120)", "_____no_output_____" ], [ "MLD_clim.max('month').sel(LATITUDE=slice(-75,-25), LONGITUDE=slice(20,380)).plot(vmin=0, vmax=150)", "_____no_output_____" ], [ "deltaH = MLD_clim.max('month') - MLD_clim.min('month')", "/Users/dhruvbalwada/code/miniconda/envs/sogos/lib/python3.7/site-packages/xarray/core/nputils.py:223: RuntimeWarning: All-NaN slice encountered\n result = getattr(npmodule, name)(values, axis=axis, **kwargs)\n" ], [ "deltaH.sel(LATITUDE=slice(-75,-25), LONGITUDE=slice(20,380)).plot(vmin=0, vmax=80)", "_____no_output_____" ], [ "MLD_clim.sel(LATITUDE=-45, LONGITUDE=35, method='nearest').plot(label='45S')\nMLD_clim.sel(LATITUDE=-50, LONGITUDE=35, method='nearest').plot(label='50S')\nMLD_clim.sel(LATITUDE=-55, LONGITUDE=35, method='nearest').plot(label='55S')\nMLD_clim.sel(LATITUDE=-60, LONGITUDE=35, method='nearest').plot(label='60S')\nplt.legend()", "_____no_output_____" ], [ "MLD_clim.sel(LATITUDE=-45, LONGITUDE=45, method='nearest').plot(label='45S')\nMLD_clim.sel(LATITUDE=-50, LONGITUDE=45, method='nearest').plot(label='50S')\nMLD_clim.sel(LATITUDE=-55, LONGITUDE=45, method='nearest').plot(label='55S')\nMLD_clim.sel(LATITUDE=-60, LONGITUDE=45, method='nearest').plot(label='60S')\nplt.legend()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0b88e4b9c9432315370a9e3a431f309a46cabc8
33,167
ipynb
Jupyter Notebook
nbooks/Slider-Crank.ipynb
JorgeDeLosSantos/mem-upgto-1703
31f37eb0645799b155bce4629e813af98ee18a89
[ "MIT" ]
null
null
null
nbooks/Slider-Crank.ipynb
JorgeDeLosSantos/mem-upgto-1703
31f37eb0645799b155bce4629e813af98ee18a89
[ "MIT" ]
null
null
null
nbooks/Slider-Crank.ipynb
JorgeDeLosSantos/mem-upgto-1703
31f37eb0645799b155bce4629e813af98ee18a89
[ "MIT" ]
null
null
null
404.47561
31,318
0.926192
[ [ [ "## Mecanismo de manivela-biela corredera", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import pi,cos,sin,tan\n%matplotlib inline\n\ndef plotvector(p0,u,color=\"r\"):\n \"\"\"\n Gráfica un vector, dados su punto inicial \n y sus componentes rectangulares\n \"\"\"\n plt.plot([p0[0],p0[0]+u[0]],[p0[1],p0[1]+u[1]], color=color, ls=\"-\", marker='o')\n\nr2, r3 = 3, 8\nfor t2 in np.linspace(0,2*pi,10):\n color = np.array([t2,0,t2])/(2*pi)\n t3 = np.arcsin(-r2*sin(t2)/r3)\n r1 = r2*cos(t2) + r3*cos(t3)\n R2 = np.array([r2*cos(t2),r2*sin(t2)])\n R3 = np.array([r3*cos(t3),r3*sin(t3)])\n plotvector([0,0], R2, color)\n plotvector(R2, R3, color)\n plt.grid('on')\n plt.axis('equal');", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
d0b8a259350fb7d976f1871766b461db33b9818e
31,100
ipynb
Jupyter Notebook
project/notebooks/Dataset10/Dataset10 -Churn Prediction.ipynb
f-data/churn-prediction
059bcb8eb35a80bfed8ce13b535479678dc1e2b2
[ "MIT" ]
null
null
null
project/notebooks/Dataset10/Dataset10 -Churn Prediction.ipynb
f-data/churn-prediction
059bcb8eb35a80bfed8ce13b535479678dc1e2b2
[ "MIT" ]
null
null
null
project/notebooks/Dataset10/Dataset10 -Churn Prediction.ipynb
f-data/churn-prediction
059bcb8eb35a80bfed8ce13b535479678dc1e2b2
[ "MIT" ]
null
null
null
125.403226
19,904
0.832637
[ [ [ "#Add needed imports\nimport numpy as np\nimport pandas as pd\nfrom imblearn.over_sampling import SMOTE\nimport seaborn as sns\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.dummy import DummyClassifier\nfrom imblearn.over_sampling import SMOTENC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score,confusion_matrix, precision_score, recall_score,f1_score\nfrom sklearn.tree import DecisionTreeClassifier \nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import RepeatedStratifiedKFold, GridSearchCV\nfrom sklearn import svm\nimport shap\nimport os\n\n#Read data\nproccessed_data_path =os.path.join(os.path.pardir,os.path.pardir,'data','processed')\ntrain_path = os.path.join(proccessed_data_path,'dataset10.csv')\ndf = pd.read_csv(train_path)\nlabels=df['Churn']\nx = df.drop(columns=['Churn','Unnamed: 0'],axis = 'columns')\ny=np.ravel(labels)\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3)\noversample = SMOTENC(categorical_features=[2])\noversample = SMOTE()\nx_train, y_train = oversample.fit_resample(x_train, y_train)\n\nsc = StandardScaler()\nx_train = sc.fit_transform(x_train)\nx_test = sc.transform(x_test)", "_____no_output_____" ], [ "svm_model = svm.SVC(random_state=0,gamma='auto')\nrf_model=RandomForestClassifier(random_state=0)\ndt_model=DecisionTreeClassifier(random_state=0,criterion='entropy',max_depth = 7,min_samples_leaf=30) \nlr_model= LogisticRegression(random_state=0, max_iter=300)\nmlp_model =MLPClassifier(random_state=0,activation='relu', solver='sgd',learning_rate='adaptive')\nxgb_model = XGBClassifier(random_state=0 ,learning_rate=0.05, max_depth=7,eval_metric='mlogloss',use_label_encoder =False)\ngmb_model= GradientBoostingClassifier(random_state=0,n_estimators=20,learning_rate=0.75,max_features=4,max_depth=5)\nmodel_params = {\n 'svm': {\n 'model': svm_model,\n 'params' : {\n 'C': [15,10],\n 'kernel': ['rbf','linear']\n } \n },\n 'rf': {\n 'model': rf_model,\n 'params' : {\n 'n_estimators': [1,5,10]\n }\n },\n 'dt': {\n 'model': dt_model,\n 'params' : {}\n },\n 'lr' : {\n 'model':lr_model,\n 'params': {\n 'C': [1,5,10]\n }\n },\n 'mlp' : {\n 'model':mlp_model,\n 'params': {}\n },\n 'xg_boost' : {\n 'model':xgb_model,\n 'params': {}\n },\n 'gbm' : {\n 'model':gmb_model,\n 'params': {}\n }\n}", "_____no_output_____" ], [ "scores = []\ncv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=100)\nfor model_name, mp in model_params.items():\n clf = GridSearchCV(mp['model'], mp['params'], cv=cv, return_train_score=False)\n clf.fit(x_train,y_train)\n conf_matrix =confusion_matrix(y_test,clf.predict(x_test))\n scores.append({\n 'model': model_name,\n 'best_score': clf.best_score_,\n 'best_params': clf.best_params_,\n 'precision':precision_score(y_test,clf.predict(x_test)),\n 'recall':recall_score(y_test,clf.predict(x_test)),\n 'f1_score':f1_score(y_test,clf.predict(x_test)),\n 'true positives':conf_matrix[0][0],\n 'true negatives':conf_matrix[1][1],\n 'false postives':conf_matrix[0][1],\n 'false negatives':conf_matrix[1][0]\n })\n \ndf = pd.DataFrame(scores,columns=['model','best_score','precision','recall','f1_score','true positives','true negatives','false postives','false negatives','best_params'])\nprint(df)", "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\nStochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n" ], [ "import shap\nxgb_model = XGBClassifier(random_state=0 ,learning_rate=0.05, max_depth=7,eval_metric='mlogloss',use_label_encoder =False)\nxgb_model.fit(x_train,y_train)\nexplainer = shap.TreeExplainer(xgb_model)\nshap_values = explainer.shap_values(x)\nshap.summary_plot(shap_values, x)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d0b8a4ca4d7327dd3bf98a55d7cb84fcc318c6de
5,638
ipynb
Jupyter Notebook
FTP/FTP_Get_file.ipynb
techthiyanes/awesome-notebooks
10ab4da1b94dfa101e908356a649609b0b17561a
[ "BSD-3-Clause" ]
null
null
null
FTP/FTP_Get_file.ipynb
techthiyanes/awesome-notebooks
10ab4da1b94dfa101e908356a649609b0b17561a
[ "BSD-3-Clause" ]
null
null
null
FTP/FTP_Get_file.ipynb
techthiyanes/awesome-notebooks
10ab4da1b94dfa101e908356a649609b0b17561a
[ "BSD-3-Clause" ]
null
null
null
21.76834
270
0.559418
[ [ [ "<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>", "_____no_output_____" ], [ "# FTP - Get file\n<a href=\"https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/FTP/FTP_Get_file.ipynb\" target=\"_parent\"><img src=\"https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg\"/></a>", "_____no_output_____" ], [ "**Tags:** #ftp #file #naas_drivers #operations #snippet #naas", "_____no_output_____" ], [ "**Author:** [Jeremy Ravenel](https://www.linkedin.com/in/ACoAAAJHE7sB5OxuKHuzguZ9L6lfDHqw--cdnJg/)", "_____no_output_____" ], [ "## Input", "_____no_output_____" ], [ "### Import library", "_____no_output_____" ] ], [ [ "from naas_drivers import ftp", "_____no_output_____" ] ], [ [ "### Variables", "_____no_output_____" ] ], [ [ "path = \"/path/to/file/in/ftp\"\nuser = \"my user\"\npasswd = \"my passwd\"", "_____no_output_____" ] ], [ [ "## Model", "_____no_output_____" ], [ "### Connect to ftp", "_____no_output_____" ] ], [ [ "ftp = ftp.connect(user, passwd)", "_____no_output_____" ] ], [ [ "## Output", "_____no_output_____" ], [ "### Get the path", "_____no_output_____" ] ], [ [ "ftp = ftp.connect(user, passwd)\nftp.get(path)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0b8a8b8191c2e9b500da3ccb222882f2b3543fc
62,351
ipynb
Jupyter Notebook
notebook/data_processing_2.ipynb
IIVIIIII/2020_Weathering
0759f2848ca912c8f1f9875f18c8e0aa604948f5
[ "MIT" ]
null
null
null
notebook/data_processing_2.ipynb
IIVIIIII/2020_Weathering
0759f2848ca912c8f1f9875f18c8e0aa604948f5
[ "MIT" ]
null
null
null
notebook/data_processing_2.ipynb
IIVIIIII/2020_Weathering
0759f2848ca912c8f1f9875f18c8e0aa604948f5
[ "MIT" ]
null
null
null
37.880316
150
0.412792
[ [ [ "# import dependencies\nimport pandas as pd\nimport requests\nimport json\n", "_____no_output_____" ], [ "# read csv on covid-19 covid vulnerability index data and convert to dataframe\nccvi = pd.read_csv('../resources/ccvi.csv')\n\n# drop rows that contain any null values (there are 655 of them)\nccvi = ccvi.dropna(how='any')\n\n# display dataframe\nccvi\n", "_____no_output_____" ], [ "# get covid data for each race by state\ncovid = pd.read_csv('../resources/CRDT_Data.csv')\n\n# display dataframe\ncovid\n", "_____no_output_____" ], [ "# dictionary for convertying state names to corresponding numbers or abbreviations\nstates = {\n 'southcarolina': {'num': '45', 'abbr': 'SC'},\n 'southdakota': {'num': '46', 'abbr': 'SD'},\n 'tennessee': {'num': '47', 'abbr': 'TN'},\n 'texas': {'num': '48', 'abbr': 'TX'},\n 'vermont': {'num': '50', 'abbr': 'VT'},\n 'utah': {'num': '49', 'abbr': 'UT'},\n 'virginia': {'num': '51', 'abbr': 'VA'},\n 'washington': {'num': '53', 'abbr': 'WA'},\n 'westvirginia': {'num': '54', 'abbr': 'WV'},\n 'wisconsin': {'num': '55', 'abbr': 'WI'},\n 'wyoming': {'num': '56', 'abbr': 'WY'},\n 'puertorico': {'num': '72', 'abbr': 'PR'},\n 'alabama': {'num': '01', 'abbr': 'AL'},\n 'alaska': {'num': '02', 'abbr': 'AK'},\n 'arizona': {'num': '04', 'abbr': 'AZ'},\n 'arkansas': {'num': '05', 'abbr': 'AR'},\n 'california': {'num': '06', 'abbr': 'CA'},\n 'colorado': {'num': '08', 'abbr': 'CO'},\n 'delaware': {'num': '10', 'abbr': 'CT'},\n 'districtofcolumbia': {'num': '11', 'abbr': 'DE'},\n 'connecticut': {'num': '09', 'abbr': 'DC'},\n 'florida': {'num': '12', 'abbr': 'FL'},\n 'georgia': {'num': '13', 'abbr': 'GA'},\n 'idaho': {'num': '16', 'abbr': 'ID'},\n 'hawaii': {'num': '15', 'abbr': 'HI'},\n 'illinois': {'num': '17', 'abbr': 'IL'},\n 'indiana': {'num': '18', 'abbr': 'IN'},\n 'iowa': {'num': '19', 'abbr': 'IA'},\n 'kansas': {'num': '20', 'abbr': 'KS'},\n 'kentucky': {'num': '21', 'abbr': 'KS'},\n 'louisiana': {'num': '22', 'abbr': 'LA'},\n 'maine': {'num': '23', 'abbr': 'ME'},\n 'maryland': {'num': '24', 'abbr': 'MD'},\n 'massachusetts': {'num': '25', 'abbr': 'MA'},\n 'michigan': {'num': '26', 'abbr': 'MI'},\n 'minnesota': {'num': '27', 'abbr': 'MN'},\n 'mississippi': {'num': '28', 'abbr': 'MS'},\n 'missouri': {'num': '29', 'abbr': 'MO'},\n 'montana': {'num': '30', 'abbr': 'MT'},\n 'nebraska': {'num': '31', 'abbr': 'NE'},\n 'nevada': {'num': '32', 'abbr': 'NV'},\n 'newhampshire': {'num': '33', 'abbr': 'NH'},\n 'newjersey': {'num': '34', 'abbr': 'NJ'},\n 'newmexico': {'num': '35', 'abbr': 'NM'},\n 'newyork': {'num': '36', 'abbr': 'NY'},\n 'northcarolina': {'num': '37', 'abbr': 'NC'},\n 'northdakota': {'num': '38', 'abbr': 'ND'},\n 'oregon': {'num': '41', 'abbr': 'OR'},\n 'pennsylvania': {'num': '42', 'abbr': 'PA'},\n 'rhodeisland': {'num': '44', 'abbr': 'RI'}\n}\n\n# all statistical categories to to be queried \npops = 'B01003_001E,B02001_002E,B02001_003E,B02001_004E,B02001_005E,B02001_006E,B03001_003E'\n\n# create list of racial groups to iterate through\nraces = ['total','white','black','native','asian','pacific','hispanic']\n\n# dictionary with all data to be used from all states that made the data avaliable\nstateData = {}\n\n# all states without necessary data\nerror_states = []\n", "_____no_output_____" ], [ "# iterate through states\nfor state in states:\n \n try:\n\n # get census state number\n state_num = states[state]['num']\n\n # create url to request data from api\n url = f'https://api.census.gov/data/2019/acs/acs5?get=NAME,{pops}&for=tract:*&in=state:{state_num}'\n\n # set returned data to a variable\n response = requests.get(url).json()\n\n # create list to store dictionaries with data for each census tract\n tracts = []\n\n # create dictionaries with population data for each census tract \n # (with properly formatted fips code)\n for r in response:\n if r[0] != 'NAME':\n tracts.append({\n 'FIPS': int(f'{r[8]}{r[9]}{r[10]}'),\n 'total': int(r[1]),\n 'white': int(r[2]),\n 'black': int(r[3]),\n 'native': int(r[4]),\n 'asian': int(r[5]),\n 'pacific': int(r[6]),\n 'hispanic': int(r[7])\n })\n\n # create dataframe with census population data\n populations = pd.DataFrame(tracts)\n\n # merge population data and ccvi data on census tract fips code\n ccvi_and_pop = pd.merge(populations, ccvi, on='FIPS')\n\n # create dictionary to hold data for each racial demographic\n demogs = {\n 'total': {},\n 'white': {},\n 'black': {},\n 'native': {},\n 'asian': {},\n 'pacific': {},\n 'hispanic': {}\n }\n\n # iterate through list of races\n for race in races:\n\n # calculate total population for each race\n demogs[race]['population'] = int(ccvi_and_pop[race].sum())\n\n # calculate average ccvi for each race\n demogs[race]['ccvi'] = (ccvi_and_pop[race]*ccvi_and_pop['ccvi']).sum()/demogs[race]['population']\n\n # calculate population of each race as a percentage of total population\n demogs[race]['population_percent'] = (demogs[race]['population']/demogs['total']['population'])*100\n\n # get covid data for each race by state\n covid = pd.read_csv('../resources/CRDT_Data.csv')\n\n # filter to only include data for selected state\n covid = covid.loc[covid['State'] == states[state]['abbr'],:]\n\n # filter to only include data from 2020\n covid = covid.loc[covid['Date'] < 20210000,:]\n\n # create dataframe with only relevant columns for covid cases\n cases = covid[['Cases_Total','Cases_White','Cases_Black','Cases_AIAN','Cases_Asian','Cases_NHPI','Cases_Ethnicity_Hispanic']]\n\n # create dataframe with only relevant columns for covid deaths\n deaths = covid[['Deaths_Total','Deaths_White','Deaths_Black','Deaths_AIAN','Deaths_Asian','Deaths_NHPI','Deaths_Ethnicity_Hispanic']]\n\n # iterate through covid data for selected races and place data in a dictionary\n for i in range(0, len(cases.columns)):\n\n # total cases for each race\n demogs[races[i]]['cases'] = int(cases[cases.columns[i]].values[0])\n\n # number of cases for each race as a percentage of total cases\n demogs[races[i]]['percent_of_cases'] = (demogs[races[i]]['cases']/demogs['total']['cases'])*100\n\n # percent discrepancy between percent of total cases and percent of total population for by each race\n # (theoretically each race should account for the same percent of cases as their percent of the population)\n demogs[races[i]]['discrepancy_percent'] = (demogs[races[i]]['percent_of_cases']/demogs[races[i]]['population_percent'])*100\n\n # total deaths for each race\n demogs[races[i]]['deaths'] = int(deaths[deaths.columns[i]].values[0])\n\n # chance of an infection resulting in death for each race\n demogs[races[i]]['chance_of_death'] = (demogs[races[i]]['deaths']/demogs[races[i]]['cases'])*100\n\n # number of deaths for each race as a percentage of total deaths\n demogs[races[i]]['percent_of_deaths'] = (demogs[races[i]]['deaths']/demogs['total']['deaths'])*100\n\n # create dataframe without total population values\n demographics = pd.DataFrame(demogs).drop(columns=['total'])\n\n\n # create dictionary to hold calculated values to be used in max patch\n for_max = {}\n\n # iterate through statistical categories\n for row in list(demographics.index):\n\n # create a list that holds all values within the row of a statistical category\n values = demographics.loc[row].values\n\n # iterate through races\n for i in range(1, len(races)):\n\n # get population numbers\n if row == 'population':\n for_max[races[i]] = {}\n for_max[races[i]][row] = int(values[i-1])\n\n # calculate inverted ccvi values\n elif row == 'ccvi':\n for_max[races[i]]['inverted_ccvi'] = round(100-(values[i-1])*100, 2)\n\n # calculate chances for where next infection will occure\n elif row == 'discrepancy_percent':\n for_max[races[i]]['chance_of_infection'] = round((values[i-1]/values.sum())*100, 2)\n\n # get values for chance of infection resulting in death\n elif row == 'chance_of_death':\n for_max[races[i]][row] = round(values[i-1], 2)\n\n # create keys to hold number of cases and deaths generated by Max alogrithm\n for key in for_max:\n for_max[key]['generated_cases'] = 0\n for_max[key]['generated_deaths'] = 0\n\n stateData[state] = for_max\n \n except:\n \n error_states.append(state)\n\n", "<ipython-input-5-4a8ca7f482d8>:57: RuntimeWarning: invalid value encountered in double_scalars\n demogs[race]['ccvi'] = (ccvi_and_pop[race]*ccvi_and_pop['ccvi']).sum()/demogs[race]['population']\n" ], [ "# display avaliable states in alphabetical order\nstateData = sorted(stateData)\nfor state in stateData:\n print(state)", "alaska\narkansas\ncalifornia\ncolorado\ngeorgia\nillinois\niowa\nmaine\nminnesota\nmissouri\nnebraska\noregon\ntennessee\nutah\nwashington\nwyoming\n" ], [ "# display unavaliable states\nfor state in error_states:\n print(state)", "southcarolina\nsouthdakota\ntexas\nvermont\nvirginia\nwestvirginia\nwisconsin\npuertorico\nalabama\narizona\ndelaware\ndistrictofcolumbia\nconnecticut\nflorida\nidaho\nhawaii\nindiana\nkansas\nkentucky\nlouisiana\nmaryland\nmassachusetts\nmichigan\nmississippi\nmontana\nnevada\nnewhampshire\nnewjersey\nnewmexico\nnewyork\nnorthcarolina\nnorthdakota\npennsylvania\nrhodeisland\n" ], [ "stateData", "_____no_output_____" ], [ "with open(\"../resources/stateData.json\", \"w\") as outfile:\n json.dump(stateData, outfile)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0b8b44ddd834e461fe7d319a1baeaa824b240cd
346,223
ipynb
Jupyter Notebook
examples/exbald/data/testing.ipynb
doc22940/yellowbrick
9e6c4fbffc16fd542677fc7d73dbb9c3784b79ed
[ "Apache-2.0" ]
3,662
2016-05-19T19:16:28.000Z
2022-03-30T00:25:19.000Z
examples/exbald/data/testing.ipynb
doc22940/yellowbrick
9e6c4fbffc16fd542677fc7d73dbb9c3784b79ed
[ "Apache-2.0" ]
1,047
2016-05-18T15:20:59.000Z
2022-03-30T16:12:49.000Z
examples/exbald/data/testing.ipynb
doc22940/yellowbrick
9e6c4fbffc16fd542677fc7d73dbb9c3784b79ed
[ "Apache-2.0" ]
602
2016-05-18T15:02:35.000Z
2022-03-27T16:57:11.000Z
340.101179
58,190
0.915583
[ [ [ "# User testing for for Scikit-Yellowbrick\n", "_____no_output_____" ], [ "### Using data that was recorded from sensors during Data Science Certificate Program at GW\nhttps://github.com/georgetown-analytics/classroom-occupancy \n\nData consist of temperature, humidity, CO2 levels, light, # of bluetooth devices, noise levels and count of people in the room.", "_____no_output_____" ] ], [ [ "import pandas as pd\n%matplotlib inline", "_____no_output_____" ], [ "dataset = pd.read_csv('dataset.csv')", "_____no_output_____" ], [ "dataset.head(5)", "_____no_output_____" ], [ "dataset.count_total.describe()", "_____no_output_____" ], [ "#add a new column to create a binary class for room occupancy \ncountmed = dataset.count_total.median()\ndataset['room_occupancy'] = dataset['count_total'].apply(lambda x: 'occupied' if x > 4 else 'empty')", "_____no_output_____" ], [ "# map room occupancy to a number\ndataset['room_occupancy_num'] = dataset.room_occupancy.map({'empty':0, 'occupied':1})", "_____no_output_____" ], [ "dataset.head(5)", "_____no_output_____" ], [ "dataset.room_occupancy.describe()", "_____no_output_____" ], [ "import os\nimport sys \n\n# Modify the path \nsys.path.append(\"..\")\n\nimport pandas as pd\nimport yellowbrick as yb \nimport matplotlib.pyplot as plt\n\nplt.rcParams['figure.figsize'] = (12, 8)", "_____no_output_____" ], [ "g = yb.anscombe()", "_____no_output_____" ] ], [ [ "## Feature Analysis", "_____no_output_____" ], [ "Feature analysis visualizers are designed to visualize instances in data space in order to detect features or targets that might impact downstream fitting. Because ML operates on high-dimensional data sets (usually at least 35), the visualizers focus on aggregation, optimization, and other techniques to give overviews of the data. It is our intent that the steering process will allow the data scientist to zoom and filter and explore the relationships between their instances and between dimensions.\n\nAt the moment we have three feature analysis visualizers implemented:\n\nRank2D: rank pairs of features to detect covariance\n\nRadViz: plot data points along axes ordered around a circle to detect separability\n\nParallel Coordinates: plot instances as lines along vertical axes to detect clusters\n\nFeature analysis visualizers implement the Transformer API from Scikit-Learn, meaning they can be used as intermediate transform steps in a Pipeline (particularly a VisualPipeline). They are instantiated in the same way, and then fit and transform are called on them, which draws the instances correctly. Finally show or show is called which displays the image.", "_____no_output_____" ] ], [ [ "from yellowbrick.features.rankd import Rank2D \nfrom yellowbrick.features.radviz import RadViz \nfrom yellowbrick.features.pcoords import ParallelCoordinates", "_____no_output_____" ] ], [ [ "### Rank2D\n\nRank1D and Rank2D evaluate single features or pairs of features using a variety of metrics that score the features on the scale [-1, 1] or [0, 1] allowing them to be ranked. A similar concept to SPLOMs, the scores are visualized on a lower-left triangle heatmap so that patterns between pairs of features can be easily discerned for downstream analysis.", "_____no_output_____" ] ], [ [ "# Load the classification data set\ndata = dataset\n\n# Specify the features of interest\nfeatures = ['temperature','humidity','co2','light','noise','bluetooth_devices']\n\n# Extract the numpy arrays from the data frame\nX = data[features].as_matrix()\ny = data['count_total'].as_matrix()", "_____no_output_____" ], [ "# Instantiate the visualizer with the Covariance ranking algorithm \nvisualizer = Rank2D(features=features, algorithm='covariance')\n\nvisualizer.fit(X, y) # Fit the data to the visualizer\nvisualizer.transform(X) # Transform the data\nvisualizer.show() # Draw/show/show the data", "_____no_output_____" ], [ "# Instantiate the visualizer with the Pearson ranking algorithm\nvisualizer = Rank2D(features=features, algorithm='pearson')\n\nvisualizer.fit(X, y) # Fit the data to the visualizer\nvisualizer.transform(X) # Transform the data\nvisualizer.show() # Draw/show/show the data", "_____no_output_____" ] ], [ [ "### RadViz\n\nRadViz is a multivariate data visualization algorithm that plots each feature dimension uniformely around the circumference of a circle then plots points on the interior of the circle such that the point normalizes its values on the axes from the center to each arc. This meachanism allows as many dimensions as will easily fit on a circle, greatly expanding the dimensionality of the visualization.\nData scientists use this method to dect separability between classes. E.g. is there an opportunity to learn from the feature set or is there just too much noise?", "_____no_output_____" ] ], [ [ "# Specify the features of interest and the classes of the target \nfeatures = ['temperature','humidity','co2','light','noise','bluetooth_devices']\nclasses = ['empty', 'occupied']\n\n# Extract the numpy arrays from the data frame \nX = data[features].as_matrix()\ny = data.room_occupancy_num.as_matrix()", "_____no_output_____" ], [ "# Instantiate the visualizer\nvisualizer = visualizer = RadViz(classes=classes, features=features)\n\nvisualizer.fit(X, y) # Fit the data to the visualizer\nvisualizer.transform(X) # Transform the data\nvisualizer.show() # Draw/show/show the data", "_____no_output_____" ] ], [ [ "For regression, the RadViz visualizer should use a color sequence to display the target information, as opposed to discrete colors.", "_____no_output_____" ], [ "## Parallel Coordinates\n\n### !!! On this step notebook crashes and has to be restarted", "_____no_output_____" ] ], [ [ "# Specify the features of interest and the classes of the target \n#features = ['temperature','humidity','co2','light','noise','bluetooth_devices']\n#classes = ['empty', 'occupied']\n\n# Extract the numpy arrays from the data frame \n#X = data[features].as_matrix()\n#y = data.room_occupancy_num.as_matrix()", "_____no_output_____" ], [ "# Instantiate the visualizer\n#visualizer = visualizer = ParallelCoordinates(classes=classes, features=features)\n\n#visualizer.fit(X, y) # Fit the data to the visualizer\n#visualizer.transform(X) # Transform the data\n#visualizer.show() # Draw/show/show the data", "_____no_output_____" ] ], [ [ "## Regressor Evaluation", "_____no_output_____" ], [ "Regression models attempt to predict a target in a continuous space. Regressor score visualizers display the instances in model space to better understand how the model is making predictions. We currently have implemented two regressor evaluations:\n\nResiduals Plot: plot the difference between the expected and actual values\n\nPrediction Error: plot expected vs. the actual values in model space\n\nEstimator score visualizers wrap Scikit-Learn estimators and expose the Estimator API such that they have fit(), predict(), and score() methods that call the appropriate estimator methods under the hood. Score visualizers can wrap an estimator and be passed in as the final step in a Pipeline or VisualPipeline.", "_____no_output_____" ] ], [ [ "# Regression Evaluation Imports \n\nfrom sklearn.linear_model import Ridge, Lasso \nfrom sklearn.model_selection import train_test_split\n\nfrom yellowbrick.regressor import PredictionError, ResidualsPlot", "_____no_output_____" ] ], [ [ "### Residuals Plot\n\nA residual plot shows the residuals on the vertical axis and the independent variable on the horizontal axis. If the points are randomly dispersed around the horizontal axis, a linear regression model is appropriate for the data; otherwise, a non-linear model is more appropriate.", "_____no_output_____" ] ], [ [ "# Load the data\ndf = data\nfeature_names = ['temperature','humidity','co2','light','noise','bluetooth_devices']\ntarget_name = 'count_total'\n\n# Get the X and y data from the DataFrame \nX = df[feature_names].as_matrix()\ny = df[target_name].as_matrix() \n\n# Create the train and test data \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)", "_____no_output_____" ], [ "# Instantiate the linear model and visualizer \nridge = Ridge()\nvisualizer = ResidualsPlot(ridge)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.show() # Draw/show/show the data", "_____no_output_____" ] ], [ [ "### Prediction Error Plot\n\nPlots the actual targets from the dataset against the predicted values generated by our model. This allows us to see how much variance is in the model. Data scientists diagnose this plot by comparing against the 45 degree line, where the prediction exactly matches the model.", "_____no_output_____" ] ], [ [ "# Load the data\ndf = data\nfeature_names = ['temperature','humidity','co2','light','noise','bluetooth_devices']\ntarget_name = 'count_total'\n\n# Get the X and y data from the DataFrame \nX = df[feature_names].as_matrix()\ny = df[target_name].as_matrix() \n\n# Create the train and test data \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)", "_____no_output_____" ], [ "# Instantiate the linear model and visualizer \nlasso = Lasso()\nvisualizer = PredictionError(lasso)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.show() # Draw/show/show the data", "_____no_output_____" ] ], [ [ "## Classifier Evaluation\n\nClassification models attempt to predict a target in a discrete space, that is assign an instance of dependent variables one or more categories. Classification score visualizers display the differences between classes as well as a number of classifier-specific visual evaluations. We currently have implemented three classifier evaluations:\n\nClassificationReport: Presents the confusion matrix of the classifier as a heatmap\n\nROCAUC: Presents the graph of receiver operating characteristics along with area under the curve\n\nClassBalance: Displays the difference between the class balances and support\n\nEstimator score visualizers wrap Scikit-Learn estimators and expose the Estimator API such that they have fit(), predict(), and score() methods that call the appropriate estimator methods under the hood. Score visualizers can wrap an estimator and be passed in as the final step in a Pipeline or VisualPipeline.", "_____no_output_____" ] ], [ [ "# Classifier Evaluation Imports \n\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nfrom yellowbrick.classifier import ClassificationReport, ROCAUC, ClassBalance", "_____no_output_____" ] ], [ [ "### Classification report\n\nThe classification report visualizer displays the precision, recall, and F1 scores for the model. Integrates numerical scores as well color-coded heatmap in order for easy interpretation and detection.", "_____no_output_____" ] ], [ [ "# Load the classification data set\ndata = dataset\n\n# Specify the features of interest and the classes of the target \nfeatures = ['temperature','humidity','co2','light','noise','bluetooth_devices']\nclasses = ['empty', 'occupied']\n\n# Extract the numpy arrays from the data frame \nX = data[features].as_matrix()\ny = data.room_occupancy_num.as_matrix()\n\n# Create the train and test data \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)", "_____no_output_____" ], [ "# Instantiate the classification model and visualizer \nbayes = GaussianNB()\nvisualizer = ClassificationReport(bayes, classes=classes)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.show() # Draw/show/show the data", "_____no_output_____" ] ], [ [ "### ROCAUC\n\nPlot the ROC to visualize the tradeoff between the classifier's sensitivity and specificity.", "_____no_output_____" ] ], [ [ "# Instantiate the classification model and visualizer \nlogistic = LogisticRegression()\nvisualizer = ROCAUC(logistic)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.show() # Draw/show/show the data", "_____no_output_____" ] ], [ [ "### ClassBalance\n\nClass balance chart that shows the support for each class in the fitted classification model.", "_____no_output_____" ] ], [ [ "# Instantiate the classification model and visualizer \nforest = RandomForestClassifier()\nvisualizer = ClassBalance(forest, classes=classes)\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data \ng = visualizer.show() # Draw/show/show the data", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0b8c0828032e992ed70e242df1700d75ca1e8f8
26,521
ipynb
Jupyter Notebook
Data Analyst with Python/18_Introduction_to_Databases_in_Python/18_5_Putting it all together.ipynb
brendensong/DataCamp
c9768e08476a2139ec630663b46c06a83c06f311
[ "FSFAP" ]
1
2021-07-08T07:08:12.000Z
2021-07-08T07:08:12.000Z
Data Analyst with Python/18_Introduction_to_Databases_in_Python/18_5_Putting it all together.ipynb
brendensong/DataCamp
c9768e08476a2139ec630663b46c06a83c06f311
[ "FSFAP" ]
null
null
null
Data Analyst with Python/18_Introduction_to_Databases_in_Python/18_5_Putting it all together.ipynb
brendensong/DataCamp
c9768e08476a2139ec630663b46c06a83c06f311
[ "FSFAP" ]
7
2021-07-25T19:14:30.000Z
2022-03-27T13:27:36.000Z
37.196353
511
0.594623
[ [ [ "# 5. Putting it all together\n**Bring together all of the skills you acquired in the previous chapters to work on a real-life project. From connecting to a database and populating it, to reading and querying it.**", "_____no_output_____" ], [ "It's time to put all your effort so far to good use on a census case study.\n\n### Census case study\nThe case study is broken down into three parts. \n1. we are going to prepare SQLAlchemy and the database. \n2. we will load the data into the database. \n3. we solve a few data science type problems with our query knowledge.\n\n### Part 1: preparing SQLAlchemy and the database\nFor part 1 we are going to focus on preparing SQLAlchemy and the database. You might remember this example from Chapter 1. We import `create_engine` and `Metadata`, then create the engine and initialize the metadata.\n```python\nfrom sqlalchemy import create_engine, MetaData\nengine = create_engine('sqlite:///census_nyc.sqlite')\nmetadata = MetaData()\n```\n\n### Part 1: preparing SQLAlchemy and the database\nThen we will build the census table to hold our data. You might remember the employees table we built in Chapter 4. We begin by importing the `Table` and `Column` objects along with all the types we are going to use in our table. Next we define our Table using the Table object by giving it a name, the metadata object, and then each of the columns we want in our table. Finally we create the table in the database by using the create all method on the metadata with the engine.\n```python\nfrom sqlalchemy import Table, Column, String, Integer, Numeric, Boolean\n\nengine = create_engine('sqlite:///')\nmetadata = MetaData()\n\nemployees = Table('employees', metadata,\n Column('id', Integer()),\n Column('name', String(255)),\n Column('salary', Numeric()),\n Column('active', Boolean()))\nmetadata.create_all(engine)\n```", "_____no_output_____" ], [ "## Setup the engine and metadata\nIn this exercise, your job is to create an engine to the database that will be used in this chapter. Then, you need to initialize its metadata.\n\nRecall how you did this in Chapter 1 by leveraging `create_engine()` and `MetaData()`.", "_____no_output_____" ], [ "- Import `create_engine` and `MetaData` from `sqlalchemy`.\n- Create an `engine` to the chapter 5 database by using `'sqlite:///chapter5.sqlite'` as the connection string.\n- Create a MetaData object as `metadata`.", "_____no_output_____" ] ], [ [ "# Import create_engine, MetaData\nfrom sqlalchemy import create_engine, MetaData\n\n# Define an engine to connect to chapter5.sqlite: engine\nengine = create_engine('sqlite:///chapter5.sqlite')\n\n# Initialize MetaData: metadata\nmetadata = MetaData()", "_____no_output_____" ] ], [ [ "## Create the table to the database\nHaving setup the engine and initialized the metadata, you will now define the `census` table object and then create it in the database using the `metadata` and `engine` from the previous exercise. To create it in the database, you will have to use the `.create_all()` method on the `metadata` with `engine` as the argument.", "_____no_output_____" ], [ "- Import `Table`, `Column`, `String`, and `Integer` from `sqlalchemy`.\n- Define a `census` table with the following columns:\n - `'state'` - String - length of 30\n - `'sex'` - String - length of 1\n - `'age'` - Integer\n - `'pop2000'` - Integer\n - `'pop2008'` - Integer\n- Create the table in the database using the `metadata` and `engine`.", "_____no_output_____" ] ], [ [ "# Import Table, Column, String, and Integer\nfrom sqlalchemy import Table, Column, String, Integer\n\n# Build a census table: census\ncensus = Table('census', metadata,\n Column('state', String(30)),\n Column('sex', String(1)),\n Column('age', Integer),\n Column('pop2000', Integer),\n Column('pop2008', Integer))\n\n# Create the table in the database\nmetadata.create_all(engine)", "_____no_output_____" ] ], [ [ "---\n## Populating the database\nWith our table in place, we can now load the data into it. The US Census Agency gave us a CSV file full of data that we need to load into the table.\n\n### Part 2: populating the database\nWe'll start that by building a `values_list` like we did in chapter 4 with this exercise. \n```python\nvalues_list = []\nfor row in csv_reader:\n data = {'state': row[0], 'sex': row[1], 'age': row[2],\n 'pop2000': row[3], 'pop2008': row[4]}\n values_list.append(data)\n```\nWe begin by defining an empty list then looping over the rows of the CSV. Then we build a dictionary for each CSV row that has the data for that row matched up with the column we want to store it in. Then we append the dictionary to the values list.\n\n### Part 2: Populating the Database\nNow we can insert that `values_list` as we did in Chapter 4 like this example. We we start by importing the `insert` statement. Then we build an insert statement for our table, finally we use the execute method on our connection with the statement and values list to insert the data into the table.\n```python\nfrom sqlalchemy import insert\nstmt = insert(employees)\nresult_proxy = connection.execute(stmt, values_list)\nprint(result_proxy.rowcount)\n```\n```\n\n2\n```\nTo review how many rows were inserted, we use the `rowcount` method of the `ResultProxy`.", "_____no_output_____" ], [ "## Reading the data from the CSV\nLeverage the Python CSV module from the standard library and load the data into a list of dictionaries.", "_____no_output_____" ], [ "- Create an empty list called `values_list`.\n- Iterate over the rows of `csv_reader` with a for loop, creating a dictionary called `data` for each row and append it to `values_list`.\n - Within the for loop, `row` will be a list whose entries are `'state'`, `'sex'`, `'age'`, `'pop2000'` and `'pop2008'` (in that order).", "_____no_output_____" ] ], [ [ "import csv\n\ncsv_reader = csv.reader(open('census.csv'))\n\n# Create an empty list: values_list\nvalues_list = []\n\n# Iterate over the rows\nfor row in csv_reader:\n # Create a dictionary with the values\n data = {'state': row[0], 'sex': row[1], 'age': row[2], \n 'pop2000': row[3], 'pop2008': row[4]}\n # Append the dictionary to the values list\n values_list.append(data)", "_____no_output_____" ] ], [ [ "## Load data from a list into the Table\nUsing the multiple insert pattern, in this exercise, you will load the data from `values_list` into the table.", "_____no_output_____" ], [ "- Import `insert` from `sqlalchemy`.\n- Build an insert statement for the `census` table.\n- Execute the statement `stmt` along with `values_list`. You will need to pass them both as arguments to `connection.execute()`.\n- Print the `rowcount` attribute of `results`.", "_____no_output_____" ] ], [ [ "# Import insert\nfrom sqlalchemy import insert\n\n# Build insert statement: stmt\nstmt = insert(census)\n\n# Use values_list to insert data: results\nresults = connection.execute(stmt, values_list)\n\n# Print rowcount\nprint(results.rowcount)", "8772\n" ] ], [ [ "---\n## Querying the database\n### Part 3: answering data science questions with queries\nHere is an example of how we calculated an average in an exercise from Chapter 3. We began by importing the select statement. Next we built a select statement that creates a weighted average. We do this by summing the result of multiplying the age with the population and dividing that by the sum of the total population and labeling that average age. Next we grouped by the sex column to determine the average `age` for each `sex`. Finally, we executed the query and fetched all the results.\n```python\nfrom sqlalchemy import select\nstmt = select([census.columns.sex,\n (func.sum(census.columns.pop2008 *\n census.columns.age) /\n func.sum(census.columns.pop2008)\n ). label('avarage_age')])\nstmt = stmt.group_by(census.columns.sex)\nresutls = connection.execute(stmt).fetchall()\n```\n\n### Part 3: answering data science questions with queries\nWe learned how to calculate a percentage by using the case and cast clauses in Chapter 3. We begin by importing `case`, `cast`, and `Float`. Then we build a select statement that calculates the sum of the `pop2008` column in cases where the state is New York. Then we divided that by the sum of the total population which is cast to a Float so we would get Decimal values. Finally, we multiplied by 100 to get a percentage and labeled it `ny_percent`.\n```python\nfrom sqlalchemy import case, cast, Float\nstmt = select([\n (func.sum(\n case([\n (census.columns.state == 'New York',\n census.columns.pop2008)\n ], else_=0)) /\n cast(func.sum(census.columns.pop2008),\n Float) * 100). label('ny_percent')])\n```\n\nAlso from Chapter 3, we learned how calculate the difference between two columns grouped by another column. We start by building a `select` statement, that selects the column we want to determine the change by, which in this case is `age`. Then we calculate the difference between the population in 2008 and in 2000, and we label that `pop_change`. Remember to wrap the difference calculation in parentheses so you can label it. Next, we order by `pop_change` and finally we limit it to just 5 results.\n```python\nstmt = select([census.columns.age,\n (census.columns.pop2008 -\n census.columns.pop2000).label('pop_chage')\n ])\nstmt = stmt.order_by('pop_change')\nstmt = stmt.limit(5)\n```", "_____no_output_____" ], [ "## Determine the average age by population\nTo calculate a weighted average, we first find the total sum of weights multiplied by the values we're averaging, then divide by the sum of all the weights.\n\nFor example, if we wanted to find a weighted average of `data = [10, 30, 50]` weighted by `weights = [2,4,6]`, we would compute *(2*10 + 4*30 + 6*50) / (2+4+6)*, or `sum(weights * data) / sum(weights)`.\n\nIn this exercise, however, you will make use of **`func.sum()`** together with select to `select` the weighted average of a column from a table. You will still work with the `census` data, and you will compute the average of age weighted by state population in the year 2000, and then group this weighted average by sex.", "_____no_output_____" ], [ "- Import `select` and `func` from `sqlalchemy`.\n- Write a statement to `select` the average of age (`age`) weighted by population in **2000** (`pop2000`) from `census`.", "_____no_output_____" ] ], [ [ "# Import select and func\nfrom sqlalchemy import select, func\n\n# Select the average of age weighted by pop2000\nstmt = select([func.sum(census.columns.pop2000 *\n census.columns.age) /\n func.sum(census.columns.pop2000)])", "_____no_output_____" ] ], [ [ "- Modify the select statement to alias the new column with weighted average as `'average_age'` using `.label()`.", "_____no_output_____" ] ], [ [ "\n# Import select and func\nfrom sqlalchemy import select, func\n\n# Relabel the new column as average_age\nstmt = select([(func.sum(census.columns.pop2000 * \n census.columns.age) / \n func.sum(census.columns.pop2000)).label('average_age')\n\t\t\t ])", "_____no_output_____" ] ], [ [ "- Modify the select statement to select the `sex` column of `census` in addition to the weighted average, with the `sex` column coming first.\n- Group by the `sex` column of `census`.", "_____no_output_____" ] ], [ [ "# Import select and func\nfrom sqlalchemy import select, func\n\n# Add the sex column to the select statement\nstmt = select([census.columns.sex,\n (func.sum(census.columns.pop2000 * \n census.columns.age) / \n func.sum(census.columns.pop2000)).label('average_age'), \n\t\t\t ])\n\n# Group by sex\nstmt = stmt.group_by(census.columns.sex)", "_____no_output_____" ] ], [ [ "- Execute the statement on the `connection` and fetch all the results.\n- Loop over the results and print the values in the `sex` and `average_age` columns for each record in the results.", "_____no_output_____" ] ], [ [ "\n# Import select and func\nfrom sqlalchemy import select, func\n\n# Select sex and average age weighted by 2000 population\nstmt = select([census.columns.sex,\n (func.sum(census.columns.pop2000 * \n census.columns.age) / \n func.sum(census.columns.pop2000)).label('average_age')\n ])\n\n# Group by sex\nstmt = stmt.group_by(census.columns.sex)\n\n# Execute the query and fetch all the results\nconnection = engine.connect()\nresults = connection.execute(stmt).fetchall()\n\n# Print the sex and average age column for each result\nfor result in results:\n print(result.sex, result.average_age)", "F 37\nM 34\n" ] ], [ [ "## Determine the percentage of population by gender and state\nIn this exercise, you will write a query to determine the percentage of the population in 2000 that comprised of women. You will group this query by state.", "_____no_output_____" ], [ "- Import `case`, `cast` and `Float` from `sqlalchemy`.\n- Define a statement to select `state` and the percentage of women in 2000.\n - Inside `func.sum()`, use `case()` to select women (using the `sex` column) from `pop2000`. Remember to specify `else_=0` if the `sex` is not `'F'`.\n - To get the percentage, divide the number of women in the year 2000 by the overall population in 2000. Cast the divisor - `census.columns.pop2000` - to `Float` before multiplying by 100.\n- Group the query by `state`.\n- Execute the query and store it as `results`.\n- Print `state` and `percent_female` for each record.", "_____no_output_____" ] ], [ [ "\n# import case, cast and Float from sqlalchemy\nfrom sqlalchemy import case, cast, Float, desc\n\n# Build a query to calculate the percentage of women in 2000: stmt\nstmt = select([census.columns.state, \n (func.sum(\n case([\n (census.columns.sex == 'F', \n census.columns.pop2000)\n ], else_=0)) /\n cast(func.sum(census.columns.pop2000), \n Float) * 100).label('percent_female')\n])\n\n# Group By state\nstmt = stmt.group_by(census.columns.state)\n\nstmt = stmt.order_by(desc('percent_female'))\n\n# Execute the query and store the results: results\nresults = connection.execute(stmt).fetchall()\n\n# Print the percentage\nfor result in results:\n print(result.state, result.percent_female)", "District of Columbia 53.129626141738385\nRhode Island 52.07343391902215\nMaryland 51.93575549972231\nMississippi 51.92229481794672\nMassachusetts 51.843023571316785\nNew York 51.83453865150073\nAlabama 51.832407770179465\nLouisiana 51.75351596554121\nPennsylvania 51.74043473051053\nSouth Carolina 51.73072129765755\nConnecticut 51.66816507130644\nVirginia 51.657252447241795\nDelaware 51.61109733558627\nNew Jersey 51.51713956125773\nMaine 51.50570813418951\nNorth Carolina 51.482262322084594\nMissouri 51.46888602639692\nOhio 51.46550350015544\nTennessee 51.430689699449275\nWest Virginia 51.40042318092286\nFlorida 51.36488001165242\nKentucky 51.32687036927168\nArkansas 51.26992846221834\nHawaii 51.118011836915514\nGeorgia 51.11408350339436\nOklahoma 51.11362457075227\nIllinois 51.11224234802867\nNew Mexico 51.0471720798335\nVermont 51.018573209949466\nMichigan 50.97246518318712\nIndiana 50.95480313297678\nIowa 50.950398342534264\nNebraska 50.8584549336086\nNew Hampshire 50.858019844961746\nKansas 50.821864107754735\nWisconsin 50.61486452653393\nSouth Dakota 50.52583581373275\nWashington 50.518565087218334\nTexas 50.515721664207966\nNorth Dakota 50.50069363231332\nMinnesota 50.49332944301148\nOregon 50.4294670361772\nCalifornia 50.35233214901979\nMontana 50.32202690728538\nArizona 50.22361303057914\nIdaho 49.98972623903102\nUtah 49.97295275106927\nWyoming 49.94595542648306\nColorado 49.84767060299562\nNevada 49.36736361384359\nAlaska 49.301497893484594\n" ] ], [ [ "*Interestingly, the District of Columbia had the highest percentage of women in 2000, while Alaska had the highest percentage of males.*", "_____no_output_____" ], [ "## Determine the difference by state from the 2000 and 2008 censuses\nIn this final exercise, you will write a query to calculate the states that changed the most in population. You will limit your query to display only the top 10 states.", "_____no_output_____" ], [ "- Build a statement to:\n - Select `state`.\n - Calculate the difference in population between 2008 (`pop2008`) and 2000 (`pop2000`).\n- Group the query by `census.columns.state` using the `.group_by()` method on `stmt`.\n- Order by `'pop_change'` in descending order using the `.order_by()` method with the `desc()` function on `'pop_change'`.\n- ~Limit the query to the top `10` states using the `.limit()` method.~\n- Execute the query and store it as `results`.\n- Print the state and the population change for each result. ", "_____no_output_____" ] ], [ [ "# Build query to return state name and population difference from 2008 to 2000\nstmt = select([census.columns.state, \n (census.columns.pop2008-\n census.columns.pop2000).label('pop_change')\n])\n\n# Group by State\nstmt = stmt.group_by(census.columns.state)\n\n# Order by Population Change\nstmt = stmt.order_by(desc('pop_change'))\n\n# Limit to top 10\n##stmt = stmt.limit(10)\n\n# Use connection to execute the statement and fetch all results\nresults = connection.execute(stmt).fetchall()\n\n# Print the state and population change for each record\nfor result in results:\n print('{}:{}'.format(result.state, result.pop_change))", "Texas:40137\nCalifornia:35406\nFlorida:21954\nArizona:14377\nGeorgia:13357\nNorth Carolina:11574\nVirginia:6639\nColorado:6425\nUtah:5934\nIllinois:5412\nNevada:5367\nWashington:4666\nTennessee:4621\nMissouri:4547\nMinnesota:3763\nOklahoma:3677\nPennsylvania:3384\nSouth Carolina:3360\nWisconsin:2945\nOregon:2817\nMaryland:2551\nArkansas:2549\nIdaho:2500\nIndiana:2336\nNew Mexico:2095\nKentucky:2021\nNebraska:1924\nIowa:1915\nMississippi:1864\nNew York:1851\nNew Jersey:1773\nKansas:1772\nOhio:1585\nAlabama:1576\nHawaii:1454\nSouth Dakota:990\nMontana:960\nDelaware:858\nWyoming:830\nAlaska:740\nDistrict of Columbia:659\nNorth Dakota:585\nWest Virginia:537\nMaine:358\nRhode Island:197\nNew Hampshire:189\nVermont:7\nMassachusetts:-242\nLouisiana:-300\nConnecticut:-392\nMichigan:-2592\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
d0b8d40d9611ce8bc585c1ad34653d4d6d55bcf6
27,679
ipynb
Jupyter Notebook
docs/human_ensembl_regulatory/create_datasets.ipynb
ML-Bioinfo-CEITEC/genomic_benchmarks
a9ad26e0c6741c3b5a93b9427c05f79c5c8faf92
[ "Apache-2.0" ]
5
2021-11-11T09:39:15.000Z
2022-02-11T17:47:28.000Z
docs/human_ensembl_regulatory/create_datasets.ipynb
ML-Bioinfo-CEITEC/genomic_benchmarks
a9ad26e0c6741c3b5a93b9427c05f79c5c8faf92
[ "Apache-2.0" ]
14
2021-10-15T08:00:05.000Z
2022-02-23T10:17:00.000Z
docs/human_ensembl_regulatory/create_datasets.ipynb
ML-Bioinfo-CEITEC/genomic_benchmarks
a9ad26e0c6741c3b5a93b9427c05f79c5c8faf92
[ "Apache-2.0" ]
1
2021-11-17T14:59:56.000Z
2021-11-17T14:59:56.000Z
43.248438
283
0.592471
[ [ [ "# Prepare environment", "_____no_output_____" ] ], [ [ "!pip install git+https://github.com/katarinagresova/ensembl_scraper.git@6d3bba8e6be7f5ead58a3bbaed6a4e8cd35e62fd", "Collecting git+https://github.com/katarinagresova/ensembl_scraper.git@6d3bba8e6be7f5ead58a3bbaed6a4e8cd35e62fd\n Cloning https://github.com/katarinagresova/ensembl_scraper.git (to revision 6d3bba8e6be7f5ead58a3bbaed6a4e8cd35e62fd) to /tmp/pip-req-build-fz97hoif\n Running command git clone --filter=blob:none -q https://github.com/katarinagresova/ensembl_scraper.git /tmp/pip-req-build-fz97hoif\n Running command git rev-parse -q --verify 'sha^6d3bba8e6be7f5ead58a3bbaed6a4e8cd35e62fd'\n Running command git fetch -q https://github.com/katarinagresova/ensembl_scraper.git 6d3bba8e6be7f5ead58a3bbaed6a4e8cd35e62fd\n Resolved https://github.com/katarinagresova/ensembl_scraper.git to commit 6d3bba8e6be7f5ead58a3bbaed6a4e8cd35e62fd\n Preparing metadata (setup.py) ... \u001b[?25ldone\n\u001b[?25hRequirement already satisfied: bio in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.3.3)\nRequirement already satisfied: biopython in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.79)\nRequirement already satisfied: certifi in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (2021.10.8)\nRequirement already satisfied: charset-normalizer in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (2.0.11)\nRequirement already satisfied: idna in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (3.3)\nRequirement already satisfied: joblib in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.1.0)\nRequirement already satisfied: numpy in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.22.2)\nRequirement already satisfied: pandas in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.4.0)\nRequirement already satisfied: plac in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.3.4)\nRequirement already satisfied: pyfiglet in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (0.8.post1)\nRequirement already satisfied: python-dateutil in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (2.8.2)\nRequirement already satisfied: pytz in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (2021.3)\nRequirement already satisfied: PyYAML>=5.4.1 in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (6.0)\nRequirement already satisfied: requests in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (2.27.1)\nRequirement already satisfied: scikit-learn in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.0.2)\nRequirement already satisfied: scipy in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.8.0)\nRequirement already satisfied: six in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.16.0)\nRequirement already satisfied: threadpoolctl in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (3.1.0)\nRequirement already satisfied: tqdm in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (4.62.3)\nRequirement already satisfied: twobitreader in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (3.1.7)\nRequirement already satisfied: urllib3 in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from scraper==0.0.1) (1.26.8)\nRequirement already satisfied: mygene in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from bio->scraper==0.0.1) (3.2.2)\nRequirement already satisfied: biothings-client>=0.2.6 in /home/katarina/git/genomic_benchmarks/venv/lib/python3.8/site-packages (from mygene->bio->scraper==0.0.1) (0.2.6)\n\u001b[33mWARNING: You are using pip version 21.3.1; however, version 22.0.3 is available.\nYou should consider upgrading via the '/home/katarina/git/genomic_benchmarks/venv/bin/python -m pip install --upgrade pip' command.\u001b[0m\n" ] ], [ [ "# Create config file", "_____no_output_____" ] ], [ [ "import yaml\n\nconfig = {\n \"root_dir\": \"../../datasets/\",\n \"organisms\": {\n \"homo_sapiens\": {\n \"regulatory_feature\"\n }\n }\n}\n\nuser_config = 'user_config.yaml'\nwith open(user_config, 'w') as handle:\n yaml.dump(config, handle)", "_____no_output_____" ] ], [ [ "# Prepare directories", "_____no_output_____" ] ], [ [ "from pathlib import Path\n\nBASE_FILE_PATH = Path(\"../../datasets/human_ensembl_regulatory/\")\n\n# copied from https://stackoverflow.com/a/57892171\ndef rm_tree(pth: Path):\n for child in pth.iterdir():\n if child.is_file():\n child.unlink()\n else:\n rm_tree(child)\n pth.rmdir()\n\nif BASE_FILE_PATH.exists():\n rm_tree(BASE_FILE_PATH)", "_____no_output_____" ] ], [ [ "# Run tool", "_____no_output_____" ] ], [ [ "!python -m scraper.ensembl_scraper -c user_config.yaml", "Processing organisms: 0%| | 0/1 [00:00<?, ?it/s]\nProcessing feature files: 0%| | 0/1 [00:00<?, ?it/s]\u001b[AINFO:root:download_file(): Going to download file from path ftp://ftp.ensembl.org/pub/release-100/mysql/regulation_mart_100/hsapiens_regulatory_feature__regulatory_feature__main.txt.gz\nINFO:root:download_file(): File downloaded to path ../../datasets//tmp//homo_sapiens_regulatory_feature.txt.gz.\nINFO:root:parse_feature_file(): Going to parse file ../../datasets//tmp//homo_sapiens_regulatory_feature.txt.gz\nINFO:root:parse_feature_file(): Done parsing file ../../datasets//tmp//homo_sapiens_regulatory_feature.txt.gz\n\n\nProcessing feature types: 0%| | 0/6 [00:00<?, ?it/s]\u001b[A\u001b[AINFO:root:find_sequences(): Going to find sequences based on genomic loci.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 downloaded to path ../../datasets//tmp/hg38.2bit.\nINFO:root:find_sequences(): Done finding sequences.\nINFO:root:remove_low_quality(): Going to preprocess sequences.\nINFO:root:remove_low_quality(): Original number of sequences: 141250\nINFO:root:remove_low_quality(): Number of sequences after contigs rejection: 141250\nINFO:root:remove_low_quality(): Number of sequences after outlier rejection: 123915\nINFO:root:remove_low_quality(): Number of sequences after Ns rejection: 123909\nINFO:root:remove_low_quality(): Done preprocessing sequences.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\n\n\nProcessing feature types: 17%|██▌ | 1/6 [46:10<3:50:52, 2770.60s/it]\u001b[A\u001b[AINFO:root:find_sequences(): Going to find sequences based on genomic loci.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:find_sequences(): Done finding sequences.\nINFO:root:remove_low_quality(): Going to preprocess sequences.\nINFO:root:remove_low_quality(): Original number of sequences: 177376\nINFO:root:remove_low_quality(): Number of sequences after contigs rejection: 177376\nINFO:root:remove_low_quality(): Number of sequences after outlier rejection: 152129\nINFO:root:remove_low_quality(): Number of sequences after Ns rejection: 152106\nINFO:root:remove_low_quality(): Done preprocessing sequences.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\n\n\nProcessing feature types: 33%|████▎ | 2/6 [1:17:19<2:29:21, 2240.25s/it]\u001b[A\u001b[AINFO:root:find_sequences(): Going to find sequences based on genomic loci.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:find_sequences(): Done finding sequences.\nINFO:root:remove_low_quality(): Going to preprocess sequences.\nINFO:root:remove_low_quality(): Original number of sequences: 132592\nINFO:root:remove_low_quality(): Number of sequences after contigs rejection: 132592\nINFO:root:remove_low_quality(): Number of sequences after outlier rejection: 106894\nINFO:root:remove_low_quality(): Number of sequences after Ns rejection: 106890\nINFO:root:remove_low_quality(): Done preprocessing sequences.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\n\n\nProcessing feature types: 50%|██████▌ | 3/6 [1:37:08<1:28:00, 1760.04s/it]\u001b[A\u001b[AINFO:root:find_sequences(): Going to find sequences based on genomic loci.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:find_sequences(): Done finding sequences.\nINFO:root:remove_low_quality(): Going to preprocess sequences.\nINFO:root:remove_low_quality(): Original number of sequences: 97099\nINFO:root:remove_low_quality(): Number of sequences after contigs rejection: 96572\nINFO:root:remove_low_quality(): Number of sequences after outlier rejection: 87381\nINFO:root:remove_low_quality(): Number of sequences after Ns rejection: 87378\nINFO:root:remove_low_quality(): Done preprocessing sequences.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\n\n\nProcessing feature types: 67%|██████████ | 4/6 [1:51:27<46:48, 1404.49s/it]\u001b[A\u001b[AINFO:root:find_sequences(): Going to find sequences based on genomic loci.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:find_sequences(): Done finding sequences.\nINFO:root:remove_low_quality(): Going to preprocess sequences.\nINFO:root:remove_low_quality(): Original number of sequences: 35191\nINFO:root:remove_low_quality(): Number of sequences after contigs rejection: 35191\nINFO:root:remove_low_quality(): Number of sequences after outlier rejection: 32260\nINFO:root:remove_low_quality(): Number of sequences after Ns rejection: 32258\nINFO:root:remove_low_quality(): Done preprocessing sequences.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\n\n\nProcessing feature types: 83%|█████████████▎ | 5/6 [1:54:37<16:06, 966.54s/it]\u001b[A\u001b[AINFO:root:find_sequences(): Going to find sequences based on genomic loci.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:find_sequences(): Done finding sequences.\nINFO:root:remove_low_quality(): Going to preprocess sequences.\nINFO:root:remove_low_quality(): Original number of sequences: 30436\nINFO:root:remove_low_quality(): Number of sequences after contigs rejection: 29820\nINFO:root:remove_low_quality(): Number of sequences after outlier rejection: 25816\nINFO:root:remove_low_quality(): Number of sequences after Ns rejection: 25816\nINFO:root:remove_low_quality(): Done preprocessing sequences.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\nINFO:root:download_2bit_file(): Going to download 2bit file hg38\nINFO:root:download_2bit_file(): File for hg38 already exists. Not going to download.\n\n\nProcessing feature types: 100%|███████████████| 6/6 [1:56:39<00:00, 1166.62s/it]\u001b[A\u001b[A\n\nProcessing feature files: 100%|███████████████| 1/1 [1:56:59<00:00, 7019.15s/it]\u001b[A\nProcessing organisms: 100%|███████████████████| 1/1 [1:56:59<00:00, 7019.15s/it]\n" ] ], [ [ "# Reformating", "_____no_output_____" ] ], [ [ "!mkdir -p ../../datasets/human_ensembl_regulatory/train\n!mkdir -p ../../datasets/human_ensembl_regulatory/test", "_____no_output_____" ], [ "!mv ../../datasets/homo_sapiens_regulatory_feature_open_chromatin_region/train/positive.csv ../../datasets/human_ensembl_regulatory/train/ocr.csv\n!mv ../../datasets/homo_sapiens_regulatory_feature_open_chromatin_region/test/positive.csv ../../datasets/human_ensembl_regulatory/test/ocr.csv\n\n!mv ../../datasets/homo_sapiens_regulatory_feature_promoter/train/positive.csv ../../datasets/human_ensembl_regulatory/train/promoter.csv\n!mv ../../datasets/homo_sapiens_regulatory_feature_promoter/test/positive.csv ../../datasets/human_ensembl_regulatory/test/promoter.csv\n\n!mv ../../datasets/homo_sapiens_regulatory_feature_enhancer/train/positive.csv ../../datasets/human_ensembl_regulatory/train/enhancer.csv\n!mv ../../datasets/homo_sapiens_regulatory_feature_enhancer/test/positive.csv ../../datasets/human_ensembl_regulatory/test/enhancer.csv", "_____no_output_____" ], [ "def chop_sequnces(file_path, max_len):\n\n df = pd.read_csv(file_path)\n df_array = df.values\n\n new_df_array = []\n index = 0\n for row in df_array:\n splits = ((row[3] - row[2]) // max_len) + 1\n\n if splits == 1:\n new_df_array.append([index, row[1], row[2], row[3], row[4]])\n index += 1\n\n elif splits == 2:\n length = (row[3] - row[2]) // 2\n new_df_array.append([\n index,\n row[1],\n row[2],\n row[2] + length,\n row[4] \n ])\n index += 1\n new_df_array.append([\n index,\n row[1],\n row[2] + length + 1,\n row[3],\n row[4] \n ])\n index += 1\n else:\n length = (row[3] - row[2]) // splits\n new_df_array.append([\n index,\n row[1],\n row[2],\n row[2] + length,\n row[4] \n ])\n index += 1\n for i in range(1, splits - 1):\n new_df_array.append([\n index,\n row[1],\n row[2] + i*length + 1,\n row[2] + (i + 1)*length,\n row[4] \n ])\n index += 1\n\n new_df_array.append([\n index,\n row[1],\n row[2] + (splits - 1)*length + 1,\n row[3],\n row[4] \n ])\n index += 1\n new_df = pd.DataFrame(new_df_array, columns=df.columns)\n new_df.to_csv(file_path, index=False)", "_____no_output_____" ], [ "chop_sequnces(\"../../datasets/human_ensembl_regulatory/train/promoter.csv\", 700)\nchop_sequnces(\"../../datasets/human_ensembl_regulatory/test/promoter.csv\", 700)", "_____no_output_____" ], [ "!find ../../datasets/human_ensembl_regulatory/ -type f -name \"*.csv\" -exec gzip {} \\;", "_____no_output_____" ], [ "!mv ../../datasets/homo_sapiens_regulatory_feature_enhancer/metadata.yaml ../../datasets/human_ensembl_regulatory/metadata.yaml", "_____no_output_____" ], [ "with open(\"../../datasets/human_ensembl_regulatory/metadata.yaml\", \"r\") as stream:\n try:\n config = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\nconfig", "_____no_output_____" ], [ "new_config = {\n 'classes' : {\n 'ocr': {\n 'type': config['classes']['positive']['type'],\n 'url': config['classes']['positive']['url'],\n 'extra_processing': 'ENSEMBL_HUMAN_GENOME'\n },\n 'promoter': {\n 'type': config['classes']['positive']['type'],\n 'url': config['classes']['positive']['url'],\n 'extra_processing': 'ENSEMBL_HUMAN_GENOME'\n },\n 'enhancer': {\n 'type': config['classes']['positive']['type'],\n 'url': config['classes']['positive']['url'],\n 'extra_processing': 'ENSEMBL_HUMAN_GENOME'\n }\n },\n 'version': config['version']\n}\nnew_config ", "_____no_output_____" ], [ "with open(\"../../datasets/human_ensembl_regulatory/metadata.yaml\", 'w') as handle:\n yaml.dump(new_config, handle)", "_____no_output_____" ] ], [ [ "# Cleaning", "_____no_output_____" ] ], [ [ "!rm user_config.yaml\n!rm -rf ../../datasets/tmp/\n\n!rm -rf ../../datasets/homo_sapiens_regulatory_feature_CTCF_binding_site\n!rm -rf ../../datasets/homo_sapiens_regulatory_feature_enhancer\n!rm -rf ../../datasets/homo_sapiens_regulatory_feature_promoter\n!rm -rf ../../datasets/homo_sapiens_regulatory_feature_promoter_flanking_region\n!rm -rf ../../datasets/homo_sapiens_regulatory_feature_TF_binding_site\n!rm -rf ../../datasets/homo_sapiens_regulatory_feature_open_chromatin_region\n", "_____no_output_____" ] ], [ [ "# Testing", "_____no_output_____" ] ], [ [ "from genomic_benchmarks.loc2seq import download_dataset\n\ndownload_dataset(\"human_ensembl_regulatory\", local_repo=True)", "Reference /home/katarina/.genomic_benchmarks/fasta/Homo_sapiens.GRCh38.dna.toplevel.fa.gz already exists. Skipping.\n" ], [ "from genomic_benchmarks.data_check import info\n\ninfo(\"human_ensembl_regulatory\", 0, local_repo=True)", "Dataset `human_ensembl_regulatory` has 3 classes: enhancer, ocr, promoter.\n\nThe length of genomic intervals ranges from 71 to 802, with average 429.91753643694585 and median 401.0.\n\nTotally 289061 sequences have been found, 231348 for training and 57713 for testing.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0b8d4c8a7cc8fa4b5d38fbacaacb0c2c0117868
13,358
ipynb
Jupyter Notebook
tutorials/detect.ipynb
QRemy/gammapy
fe799e8a8e792d216fdb11fb7abcb64d58f273dd
[ "BSD-3-Clause" ]
null
null
null
tutorials/detect.ipynb
QRemy/gammapy
fe799e8a8e792d216fdb11fb7abcb64d58f273dd
[ "BSD-3-Clause" ]
null
null
null
tutorials/detect.ipynb
QRemy/gammapy
fe799e8a8e792d216fdb11fb7abcb64d58f273dd
[ "BSD-3-Clause" ]
null
null
null
32.740196
425
0.596347
[ [ [ "# Source detection with Gammapy\n\n## Context\n\nThe first task in a source catalogue production is to identify significant excesses in the data that can be associated to unknown sources and provide a preliminary parametrization in term of position, extent, and flux. In this notebook we will use Fermi-LAT data to illustrate how to detect candidate sources in counts images with known background.\n\n**Objective: build a list of significant excesses in a Fermi-LAT map**\n\n\n## Proposed approach \n\nThis notebook show how to do source detection with Gammapy using the methods available in `~gammapy.detect`.\nWe will use images from a Fermi-LAT 3FHL high-energy Galactic center dataset to do this:\n\n* perform adaptive smoothing on counts image\n* produce 2-dimensional test-statistics (TS)\n* run a peak finder to detect point-source candidates\n* compute Li & Ma significance images\n* estimate source candidates radius and excess counts\n\nNote that what we do here is a quick-look analysis, the production of real source catalogs use more elaborate procedures.\n\nWe will work with the following functions and classes:\n\n* `~gammapy.maps.WcsNDMap`\n* `~gammapy.detect.ASmooth`\n* `~gammapy.detect.TSMapEstimator`\n* `~gammapy.detect.find_peaks`\n* `~gammapy.detect.compute_lima_image`\n", "_____no_output_____" ], [ "## Setup\n\nAs always, let's get started with some setup ...", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom gammapy.maps import Map\nfrom gammapy.detect import (\n ASmooth,\n TSMapEstimator,\n find_peaks,\n compute_lima_image,\n)\nfrom gammapy.catalog import SOURCE_CATALOGS\nfrom gammapy.cube import PSFKernel\nfrom gammapy.stats import significance\nfrom astropy.coordinates import SkyCoord\nfrom astropy.convolution import Tophat2DKernel\nimport astropy.units as u\nimport numpy as np", "_____no_output_____" ], [ "# defalut matplotlib colors without grey\ncolors = [\n u\"#1f77b4\",\n u\"#ff7f0e\",\n u\"#2ca02c\",\n u\"#d62728\",\n u\"#9467bd\",\n u\"#8c564b\",\n u\"#e377c2\",\n u\"#bcbd22\",\n u\"#17becf\",\n]", "_____no_output_____" ] ], [ [ "## Read in input images\n\nWe first read in the counts cube and sum over the energy axis:", "_____no_output_____" ] ], [ [ "counts = Map.read(\"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts.fits.gz\")\nbackground = Map.read(\n \"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-background.fits.gz\"\n)\nexposure = Map.read(\n \"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-exposure.fits.gz\"\n)\n\nmaps = {\"counts\": counts, \"background\": background, \"exposure\": exposure}\n\nkernel = PSFKernel.read(\n \"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-psf.fits.gz\"\n)", "_____no_output_____" ] ], [ [ "## Adaptive smoothing\n\nFor visualisation purpose it can be nice to look at a smoothed counts image. This can be performed using the adaptive smoothing algorithm from [Ebeling et al. (2006)](https://ui.adsabs.harvard.edu/abs/2006MNRAS.368...65E/abstract).\nIn the following example the `threshold` argument gives the minimum significance expected, values below are clipped.\n", "_____no_output_____" ] ], [ [ "%%time\nscales = u.Quantity(np.arange(0.05, 1, 0.05), unit=\"deg\")\nsmooth = ASmooth(threshold=3, scales=scales)\nimages = smooth.run(**maps)", "_____no_output_____" ], [ "plt.figure(figsize=(15, 5))\nimages[\"counts\"].plot(add_cbar=True, vmax=10);", "_____no_output_____" ] ], [ [ "## TS map estimation\n\nThe Test Statistic, TS = 2 ∆ log L ([Mattox et al. 1996](https://ui.adsabs.harvard.edu/abs/1996ApJ...461..396M/abstract)), compares the likelihood function L optimized with and without a given source.\nThe TS map is computed by fitting by a single amplitude parameter on each pixel as described in Appendix A of [Stewart (2009)](https://ui.adsabs.harvard.edu/abs/2009A%26A...495..989S/abstract). The fit is simplified by finding roots of the derivative of the fit statistics (default settings use [Brent's method](https://en.wikipedia.org/wiki/Brent%27s_method)).", "_____no_output_____" ] ], [ [ "%%time\nestimator = TSMapEstimator()\nimages = estimator.run(maps, kernel.data)", "_____no_output_____" ] ], [ [ "### Plot resulting images", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(15, 5))\nimages[\"sqrt_ts\"].plot(add_cbar=True);", "_____no_output_____" ], [ "plt.figure(figsize=(15, 5))\nimages[\"flux\"].plot(add_cbar=True, stretch=\"sqrt\", vmin=0);", "_____no_output_____" ], [ "plt.figure(figsize=(15, 5))\nimages[\"niter\"].plot(add_cbar=True);", "_____no_output_____" ] ], [ [ "## Source candidates\n\nLet's run a peak finder on the `sqrt_ts` image to get a list of point-sources candidates (positions and peak `sqrt_ts` values).\nThe `find_peaks` function performs a local maximun search in a sliding window, the argument `min_distance` is the minimum pixel distance between peaks (smallest possible value and default is 1 pixel).", "_____no_output_____" ] ], [ [ "sources = find_peaks(images[\"sqrt_ts\"], threshold=8, min_distance=1)\nnsou = len(sources)\nsources", "_____no_output_____" ], [ "# Plot sources on top of significance sky image\nplt.figure(figsize=(15, 5))\n\n_, ax, _ = images[\"sqrt_ts\"].plot(add_cbar=True)\n\nax.scatter(\n sources[\"ra\"],\n sources[\"dec\"],\n transform=plt.gca().get_transform(\"icrs\"),\n color=\"none\",\n edgecolor=\"w\",\n marker=\"o\",\n s=600,\n lw=1.5,\n);", "_____no_output_____" ] ], [ [ "Note that we used the instrument point-spread-function (PSF) as kernel, so the hypothesis we test is the presence of a point source. In order to test for extended sources we would have to use as kernel an extended template convolved by the PSF. Alternatively, we can compute the significance of an extended excess using the Li & Ma formalism, which is faster as no fitting is involve.", "_____no_output_____" ], [ "## Li & Ma significance maps\n\nWe can compute significance for an observed number of counts and known background using an extension of equation (17) from the [Li & Ma (1983)](https://ui.adsabs.harvard.edu/abs/1983ApJ...272..317L/abstract) (see `gammapy.stats.significance` for details). We can perform this calculation intergating the counts within different radius. To do so we use an astropy Tophat kernel with the `compute_lima_image` function.\n\n", "_____no_output_____" ] ], [ [ "%%time\nradius = np.array([0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5])\npixsize = counts.geom.pixel_scales[0].value\nnr = len(radius)\nsigni = np.zeros((nsou, nr))\nexcess = np.zeros((nsou, nr))\nfor kr in range(nr):\n npixel = radius[kr] / pixsize\n kernel = Tophat2DKernel(npixel)\n result = compute_lima_image(counts, background, kernel)\n signi[:, kr] = result[\"significance\"].data[sources[\"y\"], sources[\"x\"]]\n excess[:, kr] = result[\"excess\"].data[sources[\"y\"], sources[\"x\"]]", "_____no_output_____" ] ], [ [ "For simplicity we saved the significance and excess at the position of the candidates found previously on the TS map, but we could aslo have applied the peak finder on these significances maps for each scale, or alternatively implemented a 3D peak detection (in longitude, latitude, radius). Now let's look at the significance versus integration radius:", "_____no_output_____" ] ], [ [ "plt.figure()\nfor ks in range(nsou):\n plt.plot(radius, signi[ks, :], color=colors[ks])\nplt.xlabel(\"Radius\")\nplt.ylabel(\"Li & Ma Significance\")\nplt.title(\"Guessing optimal radius of each candidate\");", "_____no_output_____" ] ], [ [ "We can add the optimal radius guessed and the corresdponding excess to the source candidate properties table.", "_____no_output_____" ] ], [ [ "# rename the value key to sqrt(TS)_PS\nsources.rename_column(\"value\", \"sqrt(TS)_PS\")\n\nindex = np.argmax(signi, axis=1)\nsources[\"significance\"] = signi[range(nsou), index]\nsources[\"radius\"] = radius[index]\nsources[\"excess\"] = excess[range(nsou), index]\nsources", "_____no_output_____" ], [ "# Plot candidates sources on top of significance sky image with radius guess\nplt.figure(figsize=(15, 5))\n\n_, ax, _ = images[\"sqrt_ts\"].plot(add_cbar=True, cmap=cm.Greys_r)\n\nphi = np.arange(0, 2 * np.pi, 0.01)\nfor ks in range(nsou):\n x = sources[\"x\"][ks] + sources[\"radius\"][ks] / pixsize * np.cos(phi)\n y = sources[\"y\"][ks] + sources[\"radius\"][ks] / pixsize * np.sin(phi)\n ax.plot(x, y, \"-\", color=colors[ks], lw=1.5);", "_____no_output_____" ] ], [ [ "Note that the optimal radius of nested sources is likely overestimated due to their neighbor. We limited this example to only the most significant source above ~8 sigma. When lowering the detection threshold the number of candidated increase together with the source confusion.", "_____no_output_____" ], [ "## What next?\n\nIn this notebook, we have seen how to work with images and compute TS and significance images from counts data, if a background estimate is already available.\n\nHere's some suggestions what to do next:\n\n- Look how background estimation is performed for IACTs with and without the high-level interface in [analysis_1](analysis_1.ipynb) and [analysis_2](analysis_2.ipynb) notebooks, respectively\n- Learn about 2D model fitting in the [image_analysis](image_analysis.ipynb) notebook\n- find more about Fermi-LAT data analysis in the [fermi_lat](fermi_lat.ipynb) notebook\n- Use source candidates to build a model and perform a 3D fitting (see [analysis_3d](analysis_3d.ipynb), [analysis_mwl](analysis_mwl) notebooks for some hints)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
d0b8e544a8dd7f1c6268670536c8961008107980
397,403
ipynb
Jupyter Notebook
_notebooks/2020-07-08-poisson-quasi-Laplace-posterior.ipynb
banskt/glm-ash-notes
6eed322195ab2eb48276ab2460665c6432eff60e
[ "Apache-2.0" ]
null
null
null
_notebooks/2020-07-08-poisson-quasi-Laplace-posterior.ipynb
banskt/glm-ash-notes
6eed322195ab2eb48276ab2460665c6432eff60e
[ "Apache-2.0" ]
1
2022-02-26T09:03:50.000Z
2022-02-26T09:03:50.000Z
_notebooks/2020-07-08-poisson-quasi-Laplace-posterior.ipynb
banskt/glm-ash-notes
6eed322195ab2eb48276ab2460665c6432eff60e
[ "Apache-2.0" ]
null
null
null
605.797256
227,728
0.941659
[ [ [ "# Quasi-Laplace approximation for Poisson data\n\n- toc: true \n- badges: true\n- comments: true\n- categories: [jupyter]", "_____no_output_____" ], [ "### About\n\nThe [quasi-Laplace approximation]({% post_url 2020-06-22-intuition-for-quasi-Laplace %}) may be extended to approximate the posterior of a Poisson distribution with a Gaussian, as we will see here.\nWe approximate the regularized likelihood \n$\\mathscr{L}_{\\mathrm{reg}}(\\boldsymbol{\\beta})$\ndefined as the product of the likelihood and a Gaussian *regularizer*,\n\\begin{equation*}\n\\mathscr{L}_{\\mathrm{reg}}(\\boldsymbol{\\beta}) \\triangleq p\\left(\\mathbf{y} \\mid \\mathbf{X}, \\boldsymbol{\\beta}\\right)\n\\mathcal{N}\\left( \\boldsymbol{\\beta} \\mid \\mathbf{0}, \\mathbf{\\Lambda}^{-1} \\right) \\propto\n\\mathcal{N}\\left( \\boldsymbol{\\beta} \\mid \\mathbf{m}, \\mathbf{S} \\right)\n\\end{equation*}\nsuch that the mode of the regularized likelihood is near the mode of the posterior.\n \nThe prior $p\\left(\\boldsymbol{\\beta}\\mid g \\right)$ is a mixture of Gaussians with known variances but unknown mixture proportions.\nThe precision matrix $\\mathbf{\\Lambda}$ is defined as a diagonal matrix, $\\mathbf{\\Lambda} \\triangleq \\mathrm{diag}\\left(\\boldsymbol{\\lambda}\\right)$, whose elements $\\lambda_j$ are roughly set to some expected value to ensure that the regularized likelihood is centered at the mode of the posterior.", "_____no_output_____" ] ], [ [ "#collapse_hide\n\nimport numpy as np\nnp.set_printoptions(precision = 4, suppress=True)\n\nfrom scipy import optimize\nfrom scipy.special import gammaln\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib import ticker as plticker\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport sys\nsys.path.append(\"../../utils/\")\nimport mpl_stylesheet\nmpl_stylesheet.banskt_presentation(fontfamily = 'latex-clearsans', fontsize = 18, colors = 'banskt', dpi = 72)", "_____no_output_____" ] ], [ [ "### Generate toy data\n\nLet us consider a Poisson model with sparse coefficients, so that the number of causal variables `ncausal` is much less than the number of variables `nvar` in the model. This is ensured by sampling the betas from a Gaussian mixture prior of `nGcomp` components with variances given by $\\sigma_k^2$ (`sigmak2`) and the mixture proportions given by `probk`. The sparsity is controlled by the variable `sparsity` that specifies the mixture proportion of the zero-th component $\\mathcal{N}(0, 0)$ (or the delta function).", "_____no_output_____" ] ], [ [ "nsample = 20\nnvar = 30\nnGcomp = 3\nsparsity = 0.8\nprior_strength = 20\nnum_inf = 1e4 # a large number for 1/sigma_k^2 when sigma_k^2 = 0\n\nprobk = np.zeros(nGcomp)\nprobk[0] = sparsity\nprobk[1:(nGcomp - 1)] = (1 - sparsity) / (nGcomp - 1)\nprobk[nGcomp - 1] = 1 - np.sum(probk)\nsigmak2 = np.array([prior_strength * np.square(np.power(2, (i)/nGcomp) - 1) for i in range(nGcomp)])", "_____no_output_____" ] ], [ [ "We use the exponential link function $\\lambda_i = \\exp\\left(\\mathbf{X}_i\\boldsymbol{\\beta}\\right)$ to generate the response variable $y_i$ for $i = 1, \\ldots, N$ samples using the Poisson probability distribution\n\\begin{equation*}\np\\left(y_i \\mid \\mathbf{X}_i, \\boldsymbol{\\beta}\\right) = \\frac{\\lambda_i^{y_i}e^{-\\lambda_i}}{y_i!}\n\\end{equation*}\n$\\mathbf{X}$ is centered and scaled such that for each variable $j$, the variance is $\\mathrm{var}(\\mathbf{x}_j) = 1$.", "_____no_output_____" ] ], [ [ "# collapse-hide\n\ndef standardize(X):\n Xnorm = (X - np.mean(X, axis = 0)) \n Xstd = Xnorm / np.std(Xnorm, axis = 0)\n return Xstd\n\ndef poisson_data(X, beta):\n Xbeta = np.dot(X, beta)\n pred = np.exp(Xbeta)\n Y = np.random.poisson(pred)\n return Y\n\n\nX = np.random.rand(nsample * nvar).reshape(nsample, nvar)\nX = standardize(X)\n\ngammajk = np.random.multinomial(1, probk, size = nvar)\nbeta = np.zeros(nvar)\nfor j in range(nvar):\n if gammajk[j, 0] != 1:\n kidx = np.where(gammajk[j, :] == 1)[0][0]\n kstd = np.sqrt(sigmak2[kidx])\n beta[j] = np.random.normal(loc = 0., scale = kstd)\n\nncausal = beta[beta != 0].shape[0]\nbetavar = np.var(beta[beta != 0])\n\nY = poisson_data(X, beta)", "_____no_output_____" ] ], [ [ "Let us have a look at the generated data.", "_____no_output_____" ] ], [ [ "# collapse-hide\n\nprint(f'There are {ncausal} non-zero coefficients with variance {betavar:.4f}')\n\nfig = plt.figure(figsize = (12,6))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\n\nXbeta = np.dot(X, beta)\nax1.scatter(Xbeta, np.log(Y+1), s = 10)\nax2.hist(Y)\n\nax1.set_xlabel(r'$\\sum_i X_{ni} \\beta_i$')\nax1.set_ylabel(r'$\\log(Y_n + 1)$')\nax2.set_xlabel(r'$Y_n$')\nax2.set_ylabel('Number')\nplt.tight_layout(pad = 2.0)\nplt.show()", "There are 5 non-zero coefficients with variance 3.2003\n" ] ], [ [ "### True posterior vs quasi-Laplace posterior\n\nWe select two causal variables (with maximum effect size) and fix all the others to optimum values to understand how the likelihood and posterior depends on these two chosen variables. To avoid the sum over the indicator variables, we use the joint prior $p\\left(\\boldsymbol{\\beta}, \\boldsymbol{\\gamma} \\mid g\\right)$.", "_____no_output_____" ], [ "Some useful function definitions:", "_____no_output_____" ] ], [ [ "# collapse-hide\n\ndef get_log_likelihood(Y, X, beta):\n Xbeta = np.dot(X, beta)\n logL = np.sum(Y * Xbeta - np.exp(Xbeta))# - gammaln(Y+1))\n return logL\n\ndef get_log_prior(beta, gammajk, probk, sigmak2):\n logprior = 0\n for j, b in enumerate(beta):\n k = np.where(gammajk[j, :] == 1)[0][0]\n logprior += np.log(probk[k])\n if k > 0:\n logprior += - 0.5 * (np.log(2 * np.pi) + np.log(sigmak2[k]) + b * b / sigmak2[k]) \n return logprior\n\ndef plot_contours(ax, X, Y, Z, beta, norm, cstep = 10, zlabel = \"\"):\n zmin = np.min(Z) - 1 * np.std(Z)\n zmax = np.max(Z) + 1 * np.std(Z)\n ind = np.unravel_index(np.argmax(Z, axis=None), Z.shape)\n\n levels = np.linspace(zmin, zmax, 200)\n clevels = np.linspace(zmin, zmax, 20)\n cmap = cm.YlOrRd_r\n\n if norm:\n cset1 = ax.contourf(X, Y, Z, levels, norm = norm,\n cmap=cm.get_cmap(cmap, len(levels) - 1))\n else:\n cset1 = ax.contourf(X, Y, Z, levels,\n cmap=cm.get_cmap(cmap, len(levels) - 1))\n cset2 = ax.contour(X, Y, Z, clevels, colors='k')\n for c in cset2.collections:\n c.set_linestyle('solid')\n\n\n ax.set_aspect(\"equal\")\n ax.scatter(beta[0], beta[1], color = 'blue', s = 100)\n ax.scatter(X[ind[1]], Y[ind[0]], color = 'k', s = 100)\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.2)\n cbar = plt.colorbar(cset1, cax=cax)\n ytickpos = np.arange(int(zmin / cstep) * cstep, zmax, cstep)\n cbar.set_ticks(ytickpos)\n if zlabel:\n cax.set_ylabel(zlabel)\n\n #loc = plticker.AutoLocator()\n #ax.xaxis.set_major_locator(loc)\n #ax.yaxis.set_major_locator(loc)\n \ndef regularized_log_likelihood(beta, X, Y, L):\n nvar = beta.shape[0]\n Xbeta = np.dot(X, beta)\n\n ## Function\n llb = np.sum(Y * Xbeta - np.exp(Xbeta))# - gammaln(Y+1))\n #reg = 0.5 * np.sum(np.log(L)) - 0.5 * np.einsum('i,i->i', np.square(beta), L)\n reg = - 0.5 * np.einsum('i,i->i', np.square(beta), L)\n loglik = llb + reg\n \n ## Gradient\n pred = 1 / (1 + np.exp(-Xbeta))\n der = np.einsum('i,ij->j', Y, X) - np.einsum('ij, i->j', X, np.exp(Xbeta)) - np.multiply(beta, L)\n \n return -loglik, -der\n\ndef precisionLL(X, beta, L):\n nvar = X.shape[1]\n Xbeta = np.dot(X, beta)\n pred = np.exp(Xbeta)\n hess = - np.einsum('i,ij,ik->jk', pred, X, X)\n hess[np.diag_indices(nvar)] -= L\n return -hess\n\ndef get_mS(X, Y, beta0, L):\n nvar = X.shape[1]\n args = X, Y, L\n\n gmode = optimize.minimize(regularized_log_likelihood,\n beta0,\n args=args,\n method='L-BFGS-B',\n jac=True,\n bounds=None,\n options={'maxiter': 20000000,\n 'maxfun': 20000000,\n 'ftol': 1e-9,\n 'gtol': 1e-9\n #'disp': True\n })\n \n M = gmode.x\n Sinv = precisionLL(X, M, L)\n return M, Sinv\n\ndef get_qL_log_posterior(beta, L, M, Sinv, logdetSinv, logprior):\n blessM = beta - M\n bMSbM = np.dot(blessM.T, np.dot(Sinv, blessM))\n bLb = np.einsum('i, i', np.square(beta), L)\n logdetLinv = - np.sum(np.log(L))\n logposterior = 0.5 * (logdetSinv + logdetLinv - bMSbM + bLb)\n logposterior += logprior\n return logposterior", "_____no_output_____" ] ], [ [ "We calculate the likelihood, prior, *true* posterior and the quasi-Laplace posterior. Note that the posteriors are not normalized. We apply quasi-Laplace (QL) approximation with some $\\mathbf{\\Lambda}$ and show QL posterior distribution, which is given by\n\\begin{equation*}\np\\left(\\mathbf{y} \\mid \\mathbf{X}, \\boldsymbol{\\beta}\\right) p\\left(\\boldsymbol{\\beta}\\mid g \\right)\n\\propto\n\\frac{\\mathcal{N}\\left( \\boldsymbol{\\beta} \\mid \\mathbf{m}, \\mathbf{S} \\right)\n p\\left(\\boldsymbol{\\beta}\\mid g \\right)\n }{\n \\mathcal{N}\\left( \\boldsymbol{\\beta} \\mid \\mathbf{0}, \\mathbf{\\Lambda}^{-1} \\right)\n }\n\\end{equation*}\nHere, we assume that we know $\\mathbf{\\Lambda}$. In reality, we will not know $\\mathbf{\\Lambda}$ but will have to learn it from the data or make some educated guess from the prior choice.", "_____no_output_____" ] ], [ [ "np.max(beta)", "_____no_output_____" ], [ "# collapse-hide\n\nbchoose = np.argsort(abs(beta))[-2:]\nnplotx = 20\nnploty = 20\n\nb1min = -0.5\nb1max = 4\nb2min = -4\nb2max = 0.5\nbeta1 = np.linspace(b1min, b1max, nplotx)\nbeta2 = np.linspace(b2min, b2max, nploty)\nlogL = np.zeros((nploty, nplotx))\nlogPr = np.zeros((nploty, nplotx))\nlogPs = np.zeros((nploty, nplotx))\nlogQL = np.zeros((nploty, nplotx))\n\nthisbeta = beta.copy()\nmask = np.ones(nvar, bool)\nmask[bchoose] = False\n\ntrue_pi = np.sum(gammajk, axis = 0) / np.sum(gammajk)\n#reg = 1 / np.einsum('i,i', true_pi, sigmak2)\n#regL = np.repeat(reg, nvar)\nregL = np.repeat(num_inf, nvar)\nfor j, b in enumerate(beta):\n k = np.where(gammajk[j, :] == 1)[0][0]\n if k > 0:\n regL[j] = 1 / sigmak2[k]\nM, Sinv = get_mS(X, Y, beta, regL)\nsgndetSinv, logdetSinv = np.linalg.slogdet(Sinv)\n\nfor i, b1 in enumerate(beta1):\n for j, b2 in enumerate(beta2):\n thisbeta[bchoose] = np.array([b1, b2])\n logL[j, i] = get_log_likelihood(Y, X, thisbeta)\n logPr[j, i] = get_log_prior(thisbeta, gammajk, probk, sigmak2)\n logQL[j, i] = get_qL_log_posterior(thisbeta, regL, M, Sinv, logdetSinv, logPr[j, i])\nlogPs = logL + logPr", "_____no_output_____" ] ], [ [ "Here, we plot the contour maps. The x and y-axis show the two coefficients $\\beta_1$ and $\\beta_2$, which we chose to vary. The blue dot shows the coordinates of the true values of $\\{\\beta_1, \\beta_2\\}$ and the black dot shows the maximum of the log probabilities. Note how the non-Gaussian *true* posterior is now approximated by a Gaussian QL posterior.", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize = (12, 8))\nax1 = fig.add_subplot(221)\nax2 = fig.add_subplot(222)\nax3 = fig.add_subplot(223)\nax4 = fig.add_subplot(224)\n\nnorm = cm.colors.Normalize(vmin=np.min(logPs), vmax=np.max(logPs))\n\nplot_contours(ax1, beta1, beta2, logL, beta[bchoose], None, cstep = 5000, zlabel = \"Log Likelihood\")\nplot_contours(ax2, beta1, beta2, logPr, beta[bchoose], None, cstep = 5, zlabel = \"Log Prior\")\nplot_contours(ax3, beta1, beta2, logPs, beta[bchoose], None, cstep = 5000, zlabel = \"Log Posterior\")\nplot_contours(ax4, beta1, beta2, logQL, beta[bchoose], None, cstep = 5000, zlabel = \"Log QL Posterior\")\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "### Effect of regularizer\n\nLet us assume that we do not know $\\mathbf{\\Lambda}$ and all $\\lambda_j$'s are equal. Here we look at how the QL posterior changes with varying $\\beta_2$ for different values of $\\lambda_j$.\n\nHere we define a function for calculating the QL posterior and true posterior for changing the coefficients of a single variable:", "_____no_output_____" ] ], [ [ "# collapse-hide\n\ndef get_logQL_logPs_single_variable(X, Y, beta, regvar, betavar, bidx,\n regL, gammajk, probk, sigmak2):\n\n nvar = beta.shape[0]\n nplotrv = regvar.shape[0]\n nplotx = betavar.shape[0]\n logL = np.zeros(nplotx)\n logPr = np.zeros(nplotx)\n logPs = np.zeros(nplotx)\n logQL = np.zeros((nplotrv, nplotx))\n\n thisbeta = beta.copy()\n thisL = regL.copy()\n\n for j, b2 in enumerate(betavar):\n thisbeta[bidx] = b2\n logL[j] = get_log_likelihood(Y, X, thisbeta)\n logPr[j] = get_log_prior(thisbeta, gammajk, probk, sigmak2)\n logPs = logL + logPr\n\n for i, r1 in enumerate(regvar):\n thisL = np.repeat(r1, nvar)\n #thisL[bidx] = r1\n M, Sinv = get_mS(X, Y, beta, thisL)\n sgndetSinv, logdetSinv = np.linalg.slogdet(Sinv)\n for j, b2 in enumerate(betavar):\n thisbeta[bidx] = b2\n logQL[i, j] = get_qL_log_posterior(thisbeta, thisL, M, Sinv, logdetSinv, logPr[j])\n\n return logPs, logQL", "_____no_output_____" ] ], [ [ "And then look at the posteriors for $\\beta_2$.", "_____no_output_____" ] ], [ [ "#collapse-hide\n\nnplotx_rv = 200\nnplotrv = 4\nbmin = -4\nbmax = 0\nrvmin = 1\nrvmax = 100\nbidx = bchoose[1]\n\nfig = plt.figure(figsize = (8, 8))\nax1 = fig.add_subplot(111)\n\nbetavals = np.linspace(bmin, bmax, nplotx_rv)\nregvals = np.linspace(rvmin, rvmax, nplotrv)\nlogPs_rv, logQL_rv = get_logQL_logPs_single_variable(X, Y, beta, regvals, betavals, bidx,\n regL, gammajk, probk, sigmak2)\n\nax1.plot(betavals, logPs_rv, lw = 3, zorder = 2, label = 'True Posterior')\nax1.scatter(betavals[np.argmax(logPs_rv)], logPs_rv[np.argmax(logPs_rv)], s = 40, zorder = 10, color = 'black')\nfor i, r1 in enumerate(regvals):\n ax1.plot(betavals, logQL_rv[i, :], lw = 2, zorder = 5, label = f'$\\lambda =${r1:.2f}')\n ix = np.argmax(logQL_rv[i, :])\n ax1.scatter(betavals[ix], logQL_rv[i, ix], s = 40, zorder = 10, color = 'black')\nax1.axvline(beta[bidx], ls = 'dotted', zorder = 1)\n\nax1.legend(handlelength = 1.5)\nax1.set_xlabel(r'$\\beta$')\nax1.set_ylabel('Log Posterior')\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "What happens to the QL posterior for other variables? Let us look at every $\\beta_j$ and their individual maximum posterior, while all others are kept fixed at optimum values. Here, we have arranged the $\\beta_j$ in ascending order.", "_____no_output_____" ] ], [ [ "#collapse-hide\n\nbmin = -5\nbmax = 5\n\nbidx_sorted = np.argsort(beta)\nbidx_sorted_nz = bidx_sorted #bidx_sorted[beta[bidx_sorted]!=0]\n\nbetavals = np.linspace(bmin, bmax, nplotx_rv)\nregvals = np.linspace(rvmin, rvmax, nplotrv)\n\nmaxQL = np.zeros((nplotrv, len(bidx_sorted_nz)))\nmaxPs = np.zeros(len(bidx_sorted_nz))\nfor i, bidx in enumerate(bidx_sorted_nz):\n _logPs, _logQL = get_logQL_logPs_single_variable(X, Y, beta, regvals, betavals, bidx,\n regL, gammajk, probk, sigmak2)\n maxPs[i] = betavals[np.argmax(_logPs)]\n for j in range(len(regvals)):\n maxQL[j, i] = betavals[np.argmax(_logQL[j, :])]\n \nfig = plt.figure(figsize = (16, 8))\nax1 = fig.add_subplot(111)\n\nxvals = np.arange(len(bidx_sorted_nz))\n\nax1.plot(xvals, maxPs, lw = 2, zorder = 2, label = 'True Posterior')\nfor i, r1 in enumerate(regvals):\n ax1.plot(xvals, maxQL[i, :], lw = 2, zorder = 5, label = f'$\\lambda =${r1:.2f}')\n \nax1.scatter(xvals, maxPs, s = 20, zorder = 1)\nfor i, r1 in enumerate(regvals):\n ax1.scatter(xvals, maxQL[i, :], s = 20, zorder = 1)\nax1.scatter(xvals, beta[bidx_sorted_nz], s = 80, zorder = 10, color = 'maroon', label = 'Input')\n\nax1.legend(handlelength = 1.5)\nax1.set_xlabel(r'Index of $\\beta$')\nax1.set_ylabel(r'$\\beta$ at maximum log posterior')\nax1.set_xticks(xvals)\nax1.set_xticklabels([f'{idx}' for idx in bidx_sorted_nz])\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0b8f154521196d2d15433a7210d5e368c988025
30,968
ipynb
Jupyter Notebook
05 - Ciclo de Carnot.ipynb
StarBrand/IQ3201-Ciclos
4e39bc1aaac767b12bfdfdf547e039050b598a9c
[ "MIT" ]
null
null
null
05 - Ciclo de Carnot.ipynb
StarBrand/IQ3201-Ciclos
4e39bc1aaac767b12bfdfdf547e039050b598a9c
[ "MIT" ]
null
null
null
05 - Ciclo de Carnot.ipynb
StarBrand/IQ3201-Ciclos
4e39bc1aaac767b12bfdfdf547e039050b598a9c
[ "MIT" ]
null
null
null
154.84
27,028
0.900962
[ [ [ "# Modelo Ciclo de Carnot", "_____no_output_____" ], [ "Importar librerias", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Definir funciones\n\nPara la presión de un gas ideal isotérmico y adiabático", "_____no_output_____" ] ], [ [ "# Valores dados para que calce el gráfico\nR = 1\ngamma = 10\n\ndef pressure_ideal(T, v):\n return R*T / v\n\ndef pressure_adiabatic(v, P1, v1):\n return P1*np.power(v1/v, gamma)", "_____no_output_____" ] ], [ [ "## Definir valores a gráficar", "_____no_output_____" ] ], [ [ "Th = 850\nTc = 310\n\ndelta_v = 0.05", "_____no_output_____" ] ], [ [ "### Proceso Isotérmico", "_____no_output_____" ] ], [ [ "vC = 2.5\nvD = 2.0", "_____no_output_____" ] ], [ [ "Calculo de vA y vB", "_____no_output_____" ] ], [ [ "PC = pressure_ideal(Tc, vC)\nPD = pressure_ideal(Tc, vD)\n\nvA = ((PD*(vD**gamma))/(R*Th))**(1/(gamma-1))\nvB = ((PC*(vC**gamma))/(R*Th))**(1/(gamma-1))\n\nv = np.linspace(vA-delta_v, vC+delta_v) # v1, 3", "_____no_output_____" ] ], [ [ "### Proceso adiabático", "_____no_output_____" ] ], [ [ "v_2 = np.linspace(vB-delta_v, vC+delta_v)\nv_4 = np.linspace(vA-delta_v, vD+delta_v)", "_____no_output_____" ], [ "_, ax = plt.subplots(figsize=(8, 6))\n\nax.plot(v, pressure_ideal(Tc, v), '--b', label=\"Isoterma\")\nax.plot(v, pressure_ideal(Th, v), '--b')\nax.plot(v_2, pressure_adiabatic(v_2, PC, vC), '-.r', label=\"Adiabática\")\nax.plot(v_4, pressure_adiabatic(v_4, PD, vD), '-.r')\nax.set_title(\"Ciclo de Carnot\\n\", fontsize=16)\nax.set_xlabel(\"v\", fontsize=14)\nax.set_ylabel(\"P\", fontsize=14)\nax.tick_params(\n which='both',\n bottom=False,\n top=False,\n right=False,\n left=False,\n labelbottom=False,\n labelleft=False)\nax.grid()\nax.legend(fontsize=14)\n\nplt.savefig(\"../Matplotlib/ciclo_carnot.png\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0b8f96b5c96384ee0bd841dc7abd6bd2deb8efa
1,611
ipynb
Jupyter Notebook
python/GeometryAG/gaprimer/g4.ipynb
karng87/nasm_game
a97fdb09459efffc561d2122058c348c93f1dc87
[ "MIT" ]
null
null
null
python/GeometryAG/gaprimer/g4.ipynb
karng87/nasm_game
a97fdb09459efffc561d2122058c348c93f1dc87
[ "MIT" ]
null
null
null
python/GeometryAG/gaprimer/g4.ipynb
karng87/nasm_game
a97fdb09459efffc561d2122058c348c93f1dc87
[ "MIT" ]
null
null
null
23.691176
86
0.543762
[ [ [ "# G4: Standard 4D Model\n\n# Make SymPy available to this program:\nimport sympy \nfrom sympy import *\n\n# Make GAlgebra available to this program:\nfrom galgebra.ga import * \nfrom galgebra.mv import *\nfrom galgebra.printer import Fmt, GaPrinter, Format\n # Fmt: sets the way that a multivector's basis expansion is output.\n # GaPrinter: makes GA output a little more readable.\n # Format: turns on latex printer.\nfrom galgebra.gprinter import gFormat, gprint\ngFormat()", "_____no_output_____" ], [ "# g4: R^4 using cartesian coordiantes\ng4coords = (x,y,z,w) = symbols('x y z w', real=True)\ng4 = Ga('e', g=[1,1,1,1], coords=g4coords)\n(ex, ey, ez, ew) = g4.mv()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d0b91f6a2d8a464e455f913dde1164c75e6c0832
1,580
ipynb
Jupyter Notebook
src/App_Process_Controller.ipynb
Dustin-dusTir/Python-Projekt-Setup
d774478d6a93a5a4d961f4980438a24619245687
[ "MIT" ]
2
2021-04-23T21:57:41.000Z
2021-04-24T07:58:50.000Z
src/App_Process_Controller.ipynb
Dustin-dusTir/Python-Projekt-Setup
d774478d6a93a5a4d961f4980438a24619245687
[ "MIT" ]
null
null
null
src/App_Process_Controller.ipynb
Dustin-dusTir/Python-Projekt-Setup
d774478d6a93a5a4d961f4980438a24619245687
[ "MIT" ]
null
null
null
18.372093
77
0.53038
[ [ [ "# App and Process Controller\n\nThis notebook is used to start client-Processes", "_____no_output_____" ] ], [ [ "# from subprocess import call\r\n# import os", "_____no_output_____" ] ], [ [ "## Test if this notebook works", "_____no_output_____" ] ], [ [ "! start powershell python ./test/testConsoleScript.py", "_____no_output_____" ] ], [ [ "## Start a server", "_____no_output_____" ] ], [ [ "! start powershell python ./server.py", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0b9204e1242ce2b52200ae628af7e68042ce150
12,309
ipynb
Jupyter Notebook
classification/phases/phase-proportion-figure.ipynb
levon003/icwsm-cancer-journeys
f0b39f80380ace20912e989964475056be27ebc5
[ "MIT" ]
null
null
null
classification/phases/phase-proportion-figure.ipynb
levon003/icwsm-cancer-journeys
f0b39f80380ace20912e989964475056be27ebc5
[ "MIT" ]
null
null
null
classification/phases/phase-proportion-figure.ipynb
levon003/icwsm-cancer-journeys
f0b39f80380ace20912e989964475056be27ebc5
[ "MIT" ]
null
null
null
31.162025
289
0.378666
[ [ [ "# Phase Proportions Over Time\n\nGoal is to create a figure like that in Paul et al. [1].\n\n[1] Michael J. Paul, Ryen W. White, and Eric Horvitz. 2015. Diagnoses, Decisions, and Outcomes: Web Search as Decision Support for Cancer. In Proceedings of the 24th International Conference on World Wide Web - WWW ’15 (WWW’15), 831–841. DOI:https://doi.org/10.1145/2736277.2741662\n", "_____no_output_____" ] ], [ [ "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "import sys\nsys.path.append(\"../../annotation_data\")", "_____no_output_____" ], [ "from phase import *", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\nimport sklearn\nimport sklearn.metrics\nimport subprocess\nfrom tqdm import tqdm", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport matplotlib.dates as md\nimport matplotlib\nimport pylab as pl", "_____no_output_____" ], [ "vw_working_dir = \"/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/classification/phases/vw\"", "_____no_output_____" ] ], [ [ "## Load VW phase labels", "_____no_output_____" ] ], [ [ "all_predictions_filepath = os.path.join(vw_working_dir, \"vw_all_preds.pkl\")\npred_df = pd.read_pickle(all_predictions_filepath)\nlen(pred_df)", "_____no_output_____" ], [ "pred_df.head()", "_____no_output_____" ], [ "for site_id, site_preds in tqdm(pred_df.groupby(by='site_id', sort=False)):\n journal_creation_times = site_preds.loc[:, 'created_at']\n journal_labels = site_preds.loc[:,[phase_label + \"_pred_label\" for phase_label in phase_labels]]\n # do something with the labels and the creation time...", "100%|██████████| 4977/4977 [00:05<00:00, 950.92it/s]\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0b92c41994afa30d4220a5460160284e0885129
21,321
ipynb
Jupyter Notebook
autoEssayGrader4.ipynb
ogozuacik/automated_essay_scoring
56a0600da2dbbd9dca8a28b87d05afcfcc502e18
[ "MIT" ]
3
2019-04-01T10:55:31.000Z
2020-10-27T20:31:10.000Z
autoEssayGrader4.ipynb
ogozuacik/automated_essay_scoring
56a0600da2dbbd9dca8a28b87d05afcfcc502e18
[ "MIT" ]
null
null
null
autoEssayGrader4.ipynb
ogozuacik/automated_essay_scoring
56a0600da2dbbd9dca8a28b87d05afcfcc502e18
[ "MIT" ]
1
2020-06-13T19:39:07.000Z
2020-06-13T19:39:07.000Z
33.470958
327
0.548708
[ [ [ "import nltk\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import wordnet\nimport re, collections\nfrom collections import defaultdict\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import mean_squared_error, r2_score, cohen_kappa_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom spellchecker import SpellChecker\nfrom nltk.tokenize import word_tokenize\nimport string\nfrom sklearn.metrics import classification_report\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import MinMaxScaler\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams", "_____no_output_____" ] ], [ [ "## Loading data", "_____no_output_____" ] ], [ [ "#dataframe = pd.read_csv('all_essaysets.csv', encoding = 'latin-1')\ndataframe = pd.read_csv('training.tsv', encoding = 'latin-1', sep='\\t')\ndataframe.describe()", "_____no_output_____" ], [ "dataframe.head()", "_____no_output_____" ] ], [ [ "## Methods", "_____no_output_____" ] ], [ [ "# selecting which set to be used 1-8\n# in order to combine them all assign set number to 9\ndef select_set(dataframe,setNumber):\n if setNumber == 9:\n dataframe2 = dataframe[dataframe.essay_set ==1]\n texts = dataframe2['essay']\n scores = dataframe2['domain1_score']\n scores = scores.apply(lambda x: (x*3)/scores.max())\n for i in range(1,9):\n dataframe2 = dataframe[dataframe.essay_set == i]\n texts = texts.append(dataframe2['essay'])\n s = dataframe2['domain1_score']\n s = s.apply(lambda x: (x*3)/s.max())\n scores = scores.append(s)\n else:\n dataframe2 = dataframe[dataframe.essay_set ==setNumber]\n texts = dataframe2['essay']\n scores = dataframe2['domain1_score']\n scores = scores.apply(lambda x: (x*3)/scores.max())\n return texts, scores", "_____no_output_____" ], [ "# get histogram plot of scores and average score\ndef get_hist_avg(scores,bin_count):\n print(sum(scores)/len(scores))\n scores.hist(bins=bin_count)", "_____no_output_____" ], [ "#average word length for a text\ndef avg_word_len(text):\n clean_essay = re.sub(r'\\W', ' ', text)\n words = nltk.word_tokenize(clean_essay)\n total = 0\n for word in words:\n total = total + len(word)\n average = total / len(words)\n \n return average\n\n# word count in a given text\ndef word_count(text):\n clean_essay = re.sub(r'\\W', ' ', text)\n return len(nltk.word_tokenize(clean_essay))\n\n# char count in a given text\ndef char_count(text):\n return len(re.sub(r'\\s', '', str(text).lower()))\n\n# sentence count in a given text\ndef sent_count(text):\n return len(nltk.sent_tokenize(text))\n\n#tokenization of texts to sentences\ndef sent_tokenize(text):\n stripped_essay = text.strip()\n \n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n raw_sentences = tokenizer.tokenize(stripped_essay)\n \n tokenized_sentences = []\n for raw_sentence in raw_sentences:\n if len(raw_sentence) > 0:\n clean_sentence = re.sub(\"[^a-zA-Z0-9]\",\" \", raw_sentence)\n tokens = nltk.word_tokenize(clean_sentence)\n tokenized_sentences.append(tokens)\n return tokenized_sentences\n\n\n# lemma, noun, adjective, verb, adverb count for a given text\n\ndef count_lemmas(text):\n \n noun_count = 0\n adj_count = 0\n verb_count = 0\n adv_count = 0 \n lemmas = []\n lemmatizer = WordNetLemmatizer()\n tokenized_sentences = sent_tokenize(text)\n \n for sentence in tokenized_sentences:\n tagged_tokens = nltk.pos_tag(sentence) \n \n for token_tuple in tagged_tokens:\n pos_tag = token_tuple[1]\n \n if pos_tag.startswith('N'): \n noun_count += 1\n pos = wordnet.NOUN\n lemmas.append(lemmatizer.lemmatize(token_tuple[0], pos))\n elif pos_tag.startswith('J'):\n adj_count += 1\n pos = wordnet.ADJ\n lemmas.append(lemmatizer.lemmatize(token_tuple[0], pos))\n elif pos_tag.startswith('V'):\n verb_count += 1\n pos = wordnet.VERB\n lemmas.append(lemmatizer.lemmatize(token_tuple[0], pos))\n elif pos_tag.startswith('R'):\n adv_count += 1\n pos = wordnet.ADV\n lemmas.append(lemmatizer.lemmatize(token_tuple[0], pos))\n else:\n pos = wordnet.NOUN\n lemmas.append(lemmatizer.lemmatize(token_tuple[0], pos))\n \n lemma_count = len(set(lemmas))\n \n return noun_count, adj_count, verb_count, adv_count, lemma_count", "_____no_output_____" ], [ "def token_word(text):\n text = \"\".join([ch.lower() for ch in text if ch not in string.punctuation])\n tokens = nltk.word_tokenize(text)\n return tokens", "_____no_output_____" ], [ "def misspell_count(text):\n spell = SpellChecker()\n # find those words that may be misspelled\n misspelled = spell.unknown(token_word(text))\n #print(misspelled)\n return len(misspelled)", "_____no_output_____" ], [ "def create_features(texts):\n data = pd.DataFrame(columns=('Average_Word_Length','Sentence_Count','Word_Count',\n 'Character_Count', 'Noun_Count','Adjective_Count',\n 'Verb_Count', 'Adverb_Count', 'Lemma_Count' , 'Misspell_Count'\n ))\n\n data['Average_Word_Length'] = texts.apply(avg_word_len)\n data['Sentence_Count'] = texts.apply(sent_count)\n data['Word_Count'] = texts.apply(word_count)\n data['Character_Count'] = texts.apply(char_count)\n temp=texts.apply(count_lemmas)\n noun_count,adj_count,verb_count,adverb_count,lemma_count = zip(*temp)\n data['Noun_Count'] = noun_count\n data['Adjective_Count'] = adj_count\n data['Verb_Count'] = verb_count\n data['Adverb_Count'] = adverb_count\n data['Lemma_Count'] = lemma_count\n data['Misspell_Count'] = texts.apply(misspell_count)\n return data", "_____no_output_____" ], [ "def data_prepare(texts,scores):\n #create features from the texts and clean non graded essays\n data = create_features(texts)\n data.describe()\n t1=np.where(np.asanyarray(np.isnan(scores)))\n scores=scores.drop(scores.index[t1])\n data=data.drop(scores.index[t1])\n \n #scaler = MinMaxScaler()\n #data = scaler.fit_transform(data)\n\n #train test split\n X_train, X_test, y_train, y_test = train_test_split(data, scores, test_size = 0.3)\n\n #checking is there any nan cells\n print(np.any(np.isnan(scores)))\n print(np.all(np.isfinite(scores)))\n return X_train, X_test, y_train, y_test, data", "_____no_output_____" ], [ "def lin_regression(X_train,y_train,X_test,y_test):\n regr = LinearRegression()\n regr.fit(X_train, y_train)\n y_pred = regr.predict(X_test)\n\n # The mean squared error\n mse=mean_squared_error(y_test, y_pred)\n mse_per= 100*mse/3\n print(\"Mean squared error: {}\".format(mse))\n print(\"Mean squared error in percentage: {}\".format(mse_per))\n #explained variance score\n print('Variance score: {}'.format(regr.score(X_test, y_test)))", "_____no_output_____" ], [ "def adaBoost_reg(X_train,y_train,X_test,y_test):\n #regr = RandomForestRegressor(max_depth=2, n_estimators=300)\n #regr = SVR(gamma='scale', C=1, kernel='linear')\n regr = AdaBoostRegressor()\n regr.fit(X_train, y_train)\n y_pred = regr.predict(X_test)\n # The mean squared error\n mse=mean_squared_error(y_test, y_pred)\n mse_per= 100*mse/3\n print(\"Mean squared error: {}\".format(mse))\n print(\"Mean squared error in percentage: {}\".format(mse_per))\n #explained variance score\n print('Variance score: {}'.format(regr.score(X_test, y_test)))\n\n feature_importance = regr.feature_importances_\n\n # make importances relative to max importance\n feature_importance = 100.0 * (feature_importance / feature_importance.max())\n feature_names = list(('Average_Word_Length','Sentence_Count','Word_Count',\n 'Character_Count', 'Noun_Count','Adjective_Count',\n 'Verb_Count', 'Adverb_Count', 'Lemma_Count' ,'Misspell_Count'\n ))\n feature_names = np.asarray(feature_names)\n sorted_idx = np.argsort(feature_importance)\n pos = np.arange(sorted_idx.shape[0]) + .5\n plt.subplot(1, 2, 2)\n plt.barh(pos, feature_importance[sorted_idx], align='center')\n plt.yticks(pos, feature_names[sorted_idx])\n plt.xlabel('Relative Importance')\n plt.title('Variable Importance')\n plt.show()", "_____no_output_____" ], [ "# convert numerical scores to labels\n# (0-1.5) bad (1.5-2.3) average (2.3-3) good\n# bad: '0'\n# average '1'\n# good '2'\ndef convert_scores(scores):\n def mapping(x):\n if x < np.percentile(scores,25):\n return 0\n elif x < np.percentile(scores,75):\n return 1\n else:\n return 2\n return scores.apply(mapping)", "_____no_output_____" ], [ "# selecting which set to be used 1-8\n# in order to combine them all assign set number to 9\ndef select_set_classification(dataframe,setNumber):\n if setNumber == 9:\n dataframe2 = dataframe[dataframe.essay_set ==1]\n texts = dataframe2['essay']\n scores = dataframe2['domain1_score']\n scores = scores.apply(lambda x: (x*3)/scores.max())\n scores = convert_scores(scores)\n for i in range(1,9):\n dataframe2 = dataframe[dataframe.essay_set == i]\n texts = texts.append(dataframe2['essay'])\n s = dataframe2['domain1_score']\n s = s.apply(lambda x: (x*3)/s.max())\n s = convert_scores(s)\n scores = scores.append(s)\n else:\n dataframe2 = dataframe[dataframe.essay_set ==setNumber]\n texts = dataframe2['essay']\n scores = dataframe2['domain1_score']\n scores = scores.apply(lambda x: (x*3)/scores.max())\n scores = convert_scores(scores)\n return texts, scores", "_____no_output_____" ] ], [ [ "## Dataset selection", "_____no_output_____" ] ], [ [ "# 1-8\n# 9:all sets combined\ntexts, scores = select_set(dataframe,1)\nget_hist_avg(scores,5)\nX_train, X_test, y_train, y_test, data = data_prepare(texts,scores)", "_____no_output_____" ] ], [ [ "## Regression Analysis", "_____no_output_____" ] ], [ [ "print('Testing for Linear Regression \\n')\nlin_regression(X_train,y_train,X_test,y_test)\nprint('Testing for Adaboost Regression \\n')\nadaBoost_reg(X_train,y_train,X_test,y_test)", "_____no_output_____" ] ], [ [ "## Dataset selection 2", "_____no_output_____" ] ], [ [ "# 1-8\n# 9:all sets combined\ntexts, scores = select_set_classification(dataframe,1)\nX_train, X_test, y_train, y_test, data = data_prepare(texts,scores)", "_____no_output_____" ] ], [ [ "## Classification analysis", "_____no_output_____" ] ], [ [ "a=[0.1,1,10,100,500,1000]\nfor b in a:\n clf = svm.SVC(C=b, gamma=0.00001)\n clf.fit(X_train, y_train) \n y_pred = clf.predict(X_test)\n print (b)\n print (clf.score(X_test,y_test))\n print (np.mean(cross_val_score(clf, X_train, y_train, cv=3)))", "_____no_output_____" ], [ "clf = svm.SVC(C=100, gamma=0.00001)\nclf.fit(X_train, y_train) \ny_pred = clf.predict(X_test)\nprint('Cohen’s kappa score: {}'.format(cohen_kappa_score(y_test,y_pred)))", "_____no_output_____" ], [ "print(classification_report(y_test, y_pred))", "_____no_output_____" ] ], [ [ "## Data Analysis", "_____no_output_____" ] ], [ [ "sns.countplot(scores)", "_____no_output_____" ], [ "zero = data[(data[\"Character_Count\"] > 0) & (scores == 0)]\none = data[(data[\"Character_Count\"] > 0) & (scores == 1)]\ntwo = data[(data[\"Character_Count\"] > 0) & (scores == 2)]\nsns.distplot(zero[\"Character_Count\"], bins=10, color='r')\nsns.distplot(one[\"Character_Count\"], bins=10, color='g')\nsns.distplot(two[\"Character_Count\"], bins=10, color='b')\nplt.title(\"Score Distribution with respect to Character_Count\",fontsize=20)\nplt.xlabel(\"Character_Count\",fontsize=15)\nplt.ylabel(\"Distribuition of the scores\",fontsize=15)\nplt.show()", "_____no_output_____" ], [ "zero = data[(data[\"Lemma_Count\"] > 0) & (scores == 0)]\none = data[(data[\"Lemma_Count\"] > 0) & (scores == 1)]\ntwo = data[(data[\"Lemma_Count\"] > 0) & (scores == 2)]\nsns.distplot(zero[\"Lemma_Count\"], bins=10, color='r')\nsns.distplot(one[\"Lemma_Count\"], bins=10, color='g')\nsns.distplot(two[\"Lemma_Count\"], bins=10, color='b')\nplt.title(\"Score Distribution with respect to lemma count\",fontsize=20)\nplt.xlabel(\"Lemma Count\",fontsize=15)\nplt.ylabel(\"Distribuition of the scores\",fontsize=15)\nplt.show()", "_____no_output_____" ], [ "zero = data[(data[\"Sentence_Count\"] > 0) & (scores == 0)]\none = data[(data[\"Sentence_Count\"] > 0) & (scores == 1)]\ntwo = data[(data[\"Sentence_Count\"] > 0) & (scores == 2)]\nsns.distplot(zero[\"Sentence_Count\"], bins=10, color='r')\nsns.distplot(one[\"Sentence_Count\"], bins=10, color='g')\nsns.distplot(two[\"Sentence_Count\"], bins=10, color='b')\nplt.title(\"Score Distribution with respect to sentence count\",fontsize=20)\nplt.xlabel(\"Sentence Count\",fontsize=15)\nplt.ylabel(\"Distribuition of the scores\",fontsize=15)\nplt.show()", "_____no_output_____" ], [ "zero = data[(data[\"Word_Count\"] > 0) & (scores == 0)]\none = data[(data[\"Word_Count\"] > 0) & (scores == 1)]\ntwo = data[(data[\"Word_Count\"] > 0) & (scores == 2)]\nsns.distplot(zero[\"Word_Count\"], bins=10, color='r')\nsns.distplot(one[\"Word_Count\"], bins=10, color='g')\nsns.distplot(two[\"Word_Count\"], bins=10, color='b')\nplt.title(\"Score Distribution with respect to word count\",fontsize=20)\nplt.xlabel(\"Word_Count\",fontsize=15)\nplt.ylabel(\"Distribuition of the scores\",fontsize=15)\nplt.show()", "_____no_output_____" ], [ "zero = data[(data[\"Average_Word_Length\"] > 0) & (scores == 0)]\none = data[(data[\"Average_Word_Length\"] > 0) & (scores == 1)]\ntwo = data[(data[\"Average_Word_Length\"] > 0) & (scores == 2)]\nsns.distplot(zero[\"Average_Word_Length\"], bins=10, color='r')\nsns.distplot(one[\"Average_Word_Length\"], bins=10, color='g')\nsns.distplot(two[\"Average_Word_Length\"], bins=10, color='b')\nplt.title(\"Score Distribution with respect to Average_Word_Length\",fontsize=20)\nplt.xlabel(\"Average_Word_Length\",fontsize=15)\nplt.ylabel(\"Distribuition of the scores\",fontsize=15)\nplt.show()", "_____no_output_____" ] ], [ [ "### Kappa Score Reliability\nAccording to Cohen's original article, values ≤ 0 as indicating no agreement and 0.01–0.20 as none to slight, 0.21–0.40 as fair, 0.41– 0.60 as moderate, 0.61–0.80 as substantial, and 0.81–1.00 as almost perfect agreement. McHugh says that many texts recommend 80% agreement as the minimum acceptable interrater agreement.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d0b940f23a3c41112106c98107ee4d70adc48126
5,397
ipynb
Jupyter Notebook
notebooks/003-hello-segmentation/003-hello-segmentation.ipynb
MilanaShhanukova/openvino_notebooks
dc5d0989943aec549db528e78e2ba0ecf0942f02
[ "Apache-2.0" ]
634
2021-04-02T04:43:18.000Z
2022-03-31T17:01:18.000Z
notebooks/003-hello-segmentation/003-hello-segmentation.ipynb
MilanaShhanukova/openvino_notebooks
dc5d0989943aec549db528e78e2ba0ecf0942f02
[ "Apache-2.0" ]
231
2021-04-03T15:43:28.000Z
2022-03-31T17:30:29.000Z
notebooks/003-hello-segmentation/003-hello-segmentation.ipynb
MilanaShhanukova/openvino_notebooks
dc5d0989943aec549db528e78e2ba0ecf0942f02
[ "Apache-2.0" ]
169
2021-04-02T13:18:57.000Z
2022-03-30T16:59:22.000Z
26.072464
355
0.570317
[ [ [ "# Hello Segmentation\n\nA very basic introduction to using segmentation models with OpenVINO.\n\nWe use the pre-trained [road-segmentation-adas-0001](https://docs.openvinotoolkit.org/latest/omz_models_model_road_segmentation_adas_0001.html) model from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/). ADAS stands for Advanced Driver Assistance Services. The model recognizes four classes: background, road, curb and mark.", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ] ], [ [ "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nfrom openvino.inference_engine import IECore\n\nsys.path.append(\"../utils\")\nfrom notebook_utils import segmentation_map_to_image", "_____no_output_____" ] ], [ [ "## Load the Model", "_____no_output_____" ] ], [ [ "ie = IECore()\n\nnet = ie.read_network(\n model=\"model/road-segmentation-adas-0001.xml\")\nexec_net = ie.load_network(net, \"CPU\")\n\noutput_layer_ir = next(iter(exec_net.outputs))\ninput_layer_ir = next(iter(exec_net.input_info))", "_____no_output_____" ] ], [ [ "## Load an Image\nA sample image from the [Mapillary Vistas](https://www.mapillary.com/dataset/vistas) dataset is provided. ", "_____no_output_____" ] ], [ [ "# The segmentation network expects images in BGR format\nimage = cv2.imread(\"data/empty_road_mapillary.jpg\")\n\nrgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage_h, image_w, _ = image.shape\n\n# N,C,H,W = batch size, number of channels, height, width\nN, C, H, W = net.input_info[input_layer_ir].tensor_desc.dims\n\n# OpenCV resize expects the destination size as (width, height)\nresized_image = cv2.resize(image, (W, H))\n\n# reshape to network input shape\ninput_image = np.expand_dims(\n resized_image.transpose(2, 0, 1), 0\n) \nplt.imshow(rgb_image)", "_____no_output_____" ] ], [ [ "## Do Inference", "_____no_output_____" ] ], [ [ "# Run the infernece\nresult = exec_net.infer(inputs={input_layer_ir: input_image})\nresult_ir = result[output_layer_ir]\n\n# Prepare data for visualization\nsegmentation_mask = np.argmax(result_ir, axis=1)\nplt.imshow(segmentation_mask[0])", "_____no_output_____" ] ], [ [ "## Prepare Data for Visualization", "_____no_output_____" ] ], [ [ "# Define colormap, each color represents a class\ncolormap = np.array([[68, 1, 84], [48, 103, 141], [53, 183, 120], [199, 216, 52]])\n\n# Define the transparency of the segmentation mask on the photo\nalpha = 0.3\n\n# Use function from notebook_utils.py to transform mask to an RGB image\nmask = segmentation_map_to_image(segmentation_mask, colormap)\nresized_mask = cv2.resize(mask, (image_w, image_h))\n\n# Create image with mask put on\nimage_with_mask = cv2.addWeighted(resized_mask, alpha, rgb_image, 1 - alpha, 0)", "_____no_output_____" ] ], [ [ "## Visualize data", "_____no_output_____" ] ], [ [ "# Define titles with images\ndata = {\"Base Photo\": rgb_image, \"Segmentation\": mask, \"Masked Photo\": image_with_mask}\n\n# Create subplot to visualize images\nf, axs = plt.subplots(1, len(data.items()), figsize=(15, 10))\n\n# Fill subplot\nfor ax, (name, image) in zip(axs, data.items()):\n ax.axis('off')\n ax.set_title(name)\n ax.imshow(image)\n\n# Display image\nplt.show(f)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0b941a4f2ffdbb0e1a805d083d2bd7130e3bac4
4,972
ipynb
Jupyter Notebook
.ipynb_checkpoints/TD-ImplementationArbreBinaire-checkpoint.ipynb
egouello/TermNSI
be767e787f998b57b2786e04d4d6fdb02d9cba5f
[ "MIT" ]
null
null
null
.ipynb_checkpoints/TD-ImplementationArbreBinaire-checkpoint.ipynb
egouello/TermNSI
be767e787f998b57b2786e04d4d6fdb02d9cba5f
[ "MIT" ]
null
null
null
.ipynb_checkpoints/TD-ImplementationArbreBinaire-checkpoint.ipynb
egouello/TermNSI
be767e787f998b57b2786e04d4d6fdb02d9cba5f
[ "MIT" ]
null
null
null
34.054795
240
0.567377
[ [ [ "# TD - Implémentation des arbres en POO\n\nOn va dans ce TD créer une classe arbre binaire qui va nous permettre d'implémenter cette structure de données avec toutes ses caractéristiques.\n\nOn se souvient du cours sur les arbres https://pixees.fr/informatiquelycee/n_site/nsi_term_structDo_arbre.html dans lequel on définit le noeud racine, les sous-arbres gauche et droit.", "_____no_output_____" ] ], [ [ "class ArbreBinaire:\n def __init__(self, valeur): #un arbre est un noeud contenant une valeur et deux enfants gauche et droit\n self.valeur = valeur #le noeud contient la valeur passée en paramètre\n self.enfant_gauche = None #l'enfant gauche est vide au départ\n self.enfant_droit = None #l'enfant droit est vide au départ\n\n def insert_gauche(self, valeur): #on insere valeur à la racine du sous-arbre de gauche\n if self.enfant_gauche == None:\n self.enfant_gauche = ArbreBinaire(valeur)\n else:\n new_node = ArbreBinaire(valeur)\n new_node.enfant_gauche = self.enfant_gauche\n self.enfant_gauche = new_node\n\n def insert_droit(self, valeur): #on insere valeur à la racine du sous-arbre de droite\n if self.enfant_droit == None:\n self.enfant_droit = ArbreBinaire(valeur)\n else:\n new_node = ArbreBinaire(valeur)\n new_node.enfant_droit = self.enfant_droit\n self.enfant_droit = new_node\n \n def get_valeur(self): #on renvoit la valeur du noeud racine\n return self.valeur\n\n def get_gauche(self): #on renvoit le sous arbre gauche\n return self.enfant_gauche\n\n def get_droit(self): #on renvoit le sous arbre droit\n return self.enfant_droit\n \n def __str__(self): #affichage d'un arbre avec print()\n if self!= None: #on écrit (enfantgauche)valeur(enfantdroit)\n return \"(\"+self.enfant_gauche.__str__()+\")\"+str(self.valeur)+\"(\"+self.enfant_droit.__str__()+\")\"\n else:\n return \"\"", "_____no_output_____" ] ], [ [ "Il est important de comprendre comment la structure de donnée est implémentée : un arbre est défini par une valeur et deux sous arbres gauches et droits qui sont tous les deux initialiés à <code>None</code> au démarrage.\n\nLors de l'insertion d'une valeur à gauche ou à droite, on crée un nouvel arbre que l'on insère à la racine du sous-arbre de gauche (dans le cas de <code>insert_gauche()</code>) ou droite (dans le cas de <code>insert_droit()</code>)\n\n## Exercice 1\nSur papier, dessinez ce qui se passe étape par étape quand vous éxécutez le code suivant :", "_____no_output_____" ] ], [ [ "arbre=ArbreBinaire(3)\narbre.insert_gauche(1)\narbre.insert_gauche(2) #on insère 2 à gauche, la valeur 1 se décale à gauche du 2\nprint(arbre)", "(((None)1(None))2(None))3(None)\n" ] ], [ [ "## Exercice 2\nÉcrire le code permettant de créer l'arbre suivant\n<img src=\"nsi_term_algo_arbre_1.png\">\n\nVoila le début du code ci-dessous pour vous aider à démarrer...", "_____no_output_____" ] ], [ [ "arbre=ArbreBinaire(\"A\")\narbre.insert_gauche(\"B\")\narbreg=arbre.get_gauche()\narbreg.insert_gauche(\"C\") #les deux lignes précédentes peuvent-être abbrégées en arbre.get_gauche().insert_gauche(\"C\")\n# à suivre...\nprint(arbre)", "(((None)C(None))B(None))A(None)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0b95b1fb00a3ea673ac950f8513b7e6b094c9bd
77,880
ipynb
Jupyter Notebook
notebooks/BigFloat_Test.ipynb
JuliaPackageMirrors/Jacobi.jl
ed5da8181c31ae2cb194d9e4b6569cad4c35994c
[ "MIT" ]
null
null
null
notebooks/BigFloat_Test.ipynb
JuliaPackageMirrors/Jacobi.jl
ed5da8181c31ae2cb194d9e4b6569cad4c35994c
[ "MIT" ]
null
null
null
notebooks/BigFloat_Test.ipynb
JuliaPackageMirrors/Jacobi.jl
ed5da8181c31ae2cb194d9e4b6569cad4c35994c
[ "MIT" ]
null
null
null
285.274725
38,062
0.930547
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0b968f4884848261d1c948345f2cbfba1778878
687,836
ipynb
Jupyter Notebook
5_LinearRegression.ipynb
matbesancon/TuringTutorials
0674f9f0e6187fb6c74c9a831035803986c8a430
[ "MIT" ]
null
null
null
5_LinearRegression.ipynb
matbesancon/TuringTutorials
0674f9f0e6187fb6c74c9a831035803986c8a430
[ "MIT" ]
null
null
null
5_LinearRegression.ipynb
matbesancon/TuringTutorials
0674f9f0e6187fb6c74c9a831035803986c8a430
[ "MIT" ]
null
null
null
115.254021
1,322
0.650261
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0b96d4abdd4b0c3914ea41ab193c6c4a6f5b093
210,923
ipynb
Jupyter Notebook
python/data_sci/prices_CSV_REG.ipynb
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[ "MIT" ]
16
2018-11-26T08:39:42.000Z
2019-05-08T10:09:52.000Z
python/data_sci/prices_CSV_REG.ipynb
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[ "MIT" ]
8
2020-05-04T06:29:26.000Z
2022-02-12T05:33:16.000Z
python/data_sci/prices_CSV_REG.ipynb
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[ "MIT" ]
5
2020-02-11T16:02:21.000Z
2021-02-05T07:48:30.000Z
233.064088
92,112
0.89289
[ [ [ "from __future__ import print_function\n\nimport math\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format", "/home/jimut/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ], [ "ls\n", "LUND.ipynb README.md Test1_Regressor.ipynb\r\nPandas.ipynb Regressor.ipynb test.csv\r\nprices.csv reinforcement_q_learning.ipynb Untitled.ipynb\r\nprices_CSV_REG.ipynb Stock_Kaggle_1.ipynb WIKI-PRICES.csv\r\n" ], [ "dataset_all = pd.read_csv('prices.csv')", "_____no_output_____" ], [ "dataset_all.head()", "_____no_output_____" ], [ "dataset_all", "_____no_output_____" ], [ "def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):\n \"\"\"Trains a linear regression model of one feature.\n \n Args:\n features: pandas DataFrame of features\n targets: pandas DataFrame of targets\n batch_size: Size of batches to be passed to the model\n shuffle: True or False. Whether to shuffle the data.\n num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely\n Returns:\n Tuple of (features, labels) for next data batch\n \"\"\"\n \n # Convert pandas data into a dict of np arrays.\n features = {key:np.array(value) for key,value in dict(features).items()} \n \n # Construct a dataset, and configure batching/repeating.\n ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit\n ds = ds.batch(batch_size).repeat(num_epochs)\n \n # Shuffle the data, if specified.\n if shuffle:\n ds = ds.shuffle(buffer_size=10000)\n \n # Return the next batch of data.\n features, labels = ds.make_one_shot_iterator().get_next()\n return features, labels", "_____no_output_____" ], [ "def train_model(learning_rate, steps, batch_size, input_feature=\"close\"):\n \"\"\"Trains a linear regression model of one feature.\n \n Args:\n learning_rate: A `float`, the learning rate.\n steps: A non-zero `int`, the total number of training steps. A training step\n consists of a forward and backward pass using a single batch.\n batch_size: A non-zero `int`, the batch size.\n input_feature: A `string` specifying a column from `california_housing_dataframe`\n to use as input feature.\n \"\"\"\n \n periods = 10\n steps_per_period = steps / periods\n\n my_feature = input_feature\n my_feature_data = dataset_all[[my_feature]]\n my_label = \"open\"\n targets = dataset_all[my_label]\n\n # Create feature columns.\n feature_columns = [tf.feature_column.numeric_column(my_feature)]\n \n # Create input functions.\n training_input_fn = lambda:my_input_fn(my_feature_data, targets, batch_size=batch_size)\n prediction_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)\n \n # Create a linear regressor object.\n my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n linear_regressor = tf.estimator.LinearRegressor(\n feature_columns=feature_columns,\n optimizer=my_optimizer\n )\n\n # Set up to plot the state of our model's line each period.\n plt.figure(figsize=(15, 6))\n plt.subplot(1, 2, 1)\n plt.title(\"Learned Line by Period\")\n plt.ylabel(my_label)\n plt.xlabel(my_feature)\n sample = dataset_all.sample(n=300)\n plt.scatter(sample[my_feature], sample[my_label])\n colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]\n\n # Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics.\n print(\"Training model...\")\n print(\"RMSE (on training data):\")\n root_mean_squared_errors = []\n for period in range (0, periods):\n # Train the model, starting from the prior state.\n linear_regressor.train(\n input_fn=training_input_fn,\n steps=steps_per_period\n )\n # Take a break and compute predictions.\n predictions = linear_regressor.predict(input_fn=prediction_input_fn)\n predictions = np.array([item['predictions'][0] for item in predictions])\n \n # Compute loss.\n root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(predictions, targets))\n # Occasionally print the current loss.\n print(\" period %02d : %0.2f\" % (period, root_mean_squared_error))\n # Add the loss metrics from this period to our list.\n root_mean_squared_errors.append(root_mean_squared_error)\n # Finally, track the weights and biases over time.\n # Apply some math to ensure that the data and line are plotted neatly.\n y_extents = np.array([0, sample[my_label].max()])\n \n weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]\n bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')\n\n x_extents = (y_extents - bias) / weight\n x_extents = np.maximum(np.minimum(x_extents,\n sample[my_feature].max()),\n sample[my_feature].min())\n y_extents = weight * x_extents + bias\n plt.plot(x_extents, y_extents, color=colors[period]) \n print(\"Model training finished.\")\n\n # Output a graph of loss metrics over periods.\n plt.subplot(1, 2, 2)\n plt.ylabel('RMSE')\n plt.xlabel('Periods')\n plt.title(\"Root Mean Squared Error vs. Periods\")\n plt.tight_layout()\n plt.plot(root_mean_squared_errors)\n\n # Output a table with calibration data.\n calibration_data = pd.DataFrame()\n calibration_data[\"predictions\"] = pd.Series(predictions)\n calibration_data[\"targets\"] = pd.Series(targets)\n display.display(calibration_data.describe())\n\n print(\"Final RMSE (on training data): %0.2f\" % root_mean_squared_error)", "_____no_output_____" ], [ "train_model(\n learning_rate=0.002,\n steps=100,\n batch_size=10,\n input_feature=\"close\"\n)", "Training model...\nRMSE (on training data):\n period 00 : 98.68\n period 01 : 87.72\n period 02 : 76.76\n period 03 : 65.80\n period 04 : 54.83\n period 05 : 43.87\n period 06 : 32.92\n period 07 : 21.97\n period 08 : 11.05\n period 09 : 2.62\nModel training finished.\n" ], [ "def train_model(learning_rate, steps, batch_size, input_feature=\"close\"):\n \"\"\"Trains a linear regression model of one feature.\n \n Args:\n learning_rate: A `float`, the learning rate.\n steps: A non-zero `int`, the total number of training steps. A training step\n consists of a forward and backward pass using a single batch.\n batch_size: A non-zero `int`, the batch size.\n input_feature: A `string` specifying a column from `california_housing_dataframe`\n to use as input feature.\n \"\"\"\n \n periods = 10\n steps_per_period = steps / periods\n\n my_feature = input_feature\n my_feature_data = dataset_all[[my_feature]]\n my_label = \"open\"\n targets = dataset_all[my_label]\n\n # Create feature columns.\n feature_columns = [tf.feature_column.numeric_column(my_feature)]\n \n # Create input functions.\n training_input_fn = lambda:my_input_fn(my_feature_data, targets, batch_size=batch_size)\n prediction_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)\n \n # Create a linear regressor object.\n my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n linear_regressor = tf.estimator.LinearRegressor(\n feature_columns=feature_columns,\n optimizer=my_optimizer\n )\n\n # Set up to plot the state of our model's line each period.\n plt.figure(figsize=(15, 6))\n plt.subplot(1, 2, 1)\n plt.title(\"Learned Line by Period\")\n plt.ylabel(my_label)\n plt.xlabel(my_feature)\n sample = dataset_all.sample(n=300)\n plt.scatter(sample[my_feature], sample[my_label])\n colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]\n\n # Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics.\n print(\"Training model...\")\n print(\"RMSE (on training data):\")\n root_mean_squared_errors = []\n for period in range (0, periods):\n # Train the model, starting from the prior state.\n linear_regressor.train(\n input_fn=training_input_fn,\n steps=steps_per_period\n )\n # Take a break and compute predictions.\n predictions = linear_regressor.predict(input_fn=prediction_input_fn)\n predictions = np.array([item['predictions'][0] for item in predictions])\n \n # Compute loss.\n root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(predictions, targets))\n # Occasionally print the current loss.\n print(\" period %02d : %0.2f\" % (period, root_mean_squared_error))\n # Add the loss metrics from this period to our list.\n root_mean_squared_errors.append(root_mean_squared_error)\n # Finally, track the weights and biases over time.\n # Apply some math to ensure that the data and line are plotted neatly.\n y_extents = np.array([0, sample[my_label].max()])\n \n weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]\n bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')\n\n x_extents = (y_extents - bias) / weight\n x_extents = np.maximum(np.minimum(x_extents,\n sample[my_feature].max()),\n sample[my_feature].min())\n y_extents = weight * x_extents + bias\n plt.plot(x_extents, y_extents, color=colors[period]) \n print(\"Model training finished.\")\n\n # Output a graph of loss metrics over periods.\n plt.subplot(1, 2, 2)\n plt.ylabel('RMSE')\n plt.xlabel('Periods')\n plt.title(\"Root Mean Squared Error vs. Periods\")\n plt.tight_layout()\n plt.plot(root_mean_squared_errors)\n\n # Output a table with calibration data.\n calibration_data = pd.DataFrame()\n calibration_data[\"predictions\"] = pd.Series(predictions)\n calibration_data[\"targets\"] = pd.Series(targets)\n display.display(calibration_data.describe())\n\n print(\"Final RMSE (on training data): %0.2f\" % root_mean_squared_error)", "_____no_output_____" ], [ "train_model(\n learning_rate=0.002,\n steps=100,\n batch_size=10,\n input_feature=\"high\"\n)", "Training model...\nRMSE (on training data):\n period 00 : 98.58\n period 01 : 87.51\n period 02 : 76.45\n period 03 : 65.38\n period 04 : 54.31\n period 05 : 43.24\n period 06 : 32.18\n period 07 : 21.12\n period 08 : 10.08\n period 09 : 1.53\nModel training finished.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0b98344be9416e40e634f0372c4e8920a67f971
161,911
ipynb
Jupyter Notebook
Chapter7_Exercise3_Tips.ipynb
packkkk/data-science-and-machine-learning-fundamentals
801cdbf226749fa02b49bc6ff843ee8d06760c69
[ "MIT" ]
null
null
null
Chapter7_Exercise3_Tips.ipynb
packkkk/data-science-and-machine-learning-fundamentals
801cdbf226749fa02b49bc6ff843ee8d06760c69
[ "MIT" ]
null
null
null
Chapter7_Exercise3_Tips.ipynb
packkkk/data-science-and-machine-learning-fundamentals
801cdbf226749fa02b49bc6ff843ee8d06760c69
[ "MIT" ]
null
null
null
484.763473
49,915
0.941659
[ [ [ "## Chapter 7 - Exercise 3: Tips", "_____no_output_____" ], [ "#### Cho dữ liệu tips có sẵn trong seaborn library. Hãy vẽ những biểu đồ theo yêu cầu, và đối chiếu với kết quả:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "# Load dữ liệu tips có sẵn trong seaborn library\n#total_bill: Total bill (cost of the meal), including tax, in US dollars\n#tip: Tip (gratuity) in US dollars\n#sex: Sex of person paying for the meal (0=male, 1=female)\n#smoker: Smoker in party? (0=No, 1=Yes)\n#day: 3=Thur, 4=Fri, 5=Sat, 6=Sun\n#time: 0=Day, 1=Night\n#size: Size of the party\n\n", "_____no_output_____" ] ], [ [ "<details>\n <summary>Nhấn vào đây để xem kết quả !</summary>\n \n<pre>&lt;class 'pandas.core.frame.DataFrame'&gt;\nRangeIndex: 244 entries, 0 to 243\nData columns (total 7 columns):\ntotal_bill 244 non-null float64\ntip 244 non-null float64\nsex 244 non-null category\nsmoker 244 non-null category\nday 244 non-null category\ntime 244 non-null category\nsize 244 non-null int64\ndtypes: category(4), float64(2), int64(1)\nmemory usage: 7.3 KB\n</pre>\n\n<div>\n<style scoped=\"\">\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>total_bill</th>\n <th>tip</th>\n <th>sex</th>\n <th>smoker</th>\n <th>day</th>\n <th>time</th>\n <th>size</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>16.99</td>\n <td>1.01</td>\n <td>Female</td>\n <td>No</td>\n <td>Sun</td>\n <td>Dinner</td>\n <td>2</td>\n </tr>\n <tr>\n <th>1</th>\n <td>10.34</td>\n <td>1.66</td>\n <td>Male</td>\n <td>No</td>\n <td>Sun</td>\n <td>Dinner</td>\n <td>3</td>\n </tr>\n <tr>\n <th>2</th>\n <td>21.01</td>\n <td>3.50</td>\n <td>Male</td>\n <td>No</td>\n <td>Sun</td>\n <td>Dinner</td>\n <td>3</td>\n </tr>\n <tr>\n <th>3</th>\n <td>23.68</td>\n <td>3.31</td>\n <td>Male</td>\n <td>No</td>\n <td>Sun</td>\n <td>Dinner</td>\n <td>2</td>\n </tr>\n <tr>\n <th>4</th>\n <td>24.59</td>\n <td>3.61</td>\n <td>Female</td>\n <td>No</td>\n <td>Sun</td>\n <td>Dinner</td>\n <td>4</td>\n </tr>\n </tbody>\n</table>\n</div>\n\n</details>", "_____no_output_____" ] ], [ [ "# Câu 1: Vẽ violinplot cho cho cột total_bill\n# Bạn nhận xét gì về biểu đồ vừa tạo\n", "_____no_output_____" ] ], [ [ "<details>\n <summary>Nhấn vào đây để xem kết quả !</summary>\n \n<div class=\"output_subarea output_png\"><img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAWAAAAEHCAYAAACQkJyuAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3deXhV1b3/8fc6Y07meYCEhISQgIAoFFFEQcERB/ypONH7UymKA2ItDtinauvU4Xqt1TpfaxUrTx1apb+2VymORSGggAqSAAJhSEIGkpx5WL8/ErgogwnkZJ2TfF/Pc54kJyfnfDZJPqysvffaSmuNEEKI3mcxHUAIIforKWAhhDBEClgIIQyRAhZCCEOkgIUQwhBbdx6cnZ2tS0pKohRFCCH6ppUrV+7WWud89/5uFXBJSQlVVVU9l0oIIfoBpdSWg90vUxBCCGGIFLAQQhgiBSyEEIZIAQshhCFSwEIIYYgUsBBCGCIFLIQQhkgBCyGEIVLAQghhiBSwEEIYIgUshBCGSAELIYQhUsBCCGGIFLAQQhgiBSyEEIZIAQshhCFSwEIIYYgUsBBCGCIFLIQQhkgB93Faa3w+H1pr01GEEN/RrYtyitjm8/lYtWoVy5YtY/mKFbS0tBDw+9Fak5KSSmVlBRUVFZx88slUVlaajitEv6e6MzIaO3aslqsixx6Px8OiRYt4ddEi/D4fymonkFKAdqagrXZQViz+VmyeRpS3GbRm+DHHcOkll3DyySdjs8n/w0JEk1JqpdZ67Hfvl9+8OBYOh1m8eDHP//cLtO5pIZhRQnBQBeGUfLBYD/FFAewN1Xy1cR333nsvg0tLWXDXXZSXl/dueCGEjIDjVWtrK7/4xf2sWLGccEo+vsKxRJJzu/4EOoKt6RtctcuxhPxceeUVzJw5E4fDEb3QQvRThxoBy064OFRTU8OPZs9mxcoqfMUn4ak4u3vlC6AshLJKaTtmOv6Mwbz00kvceNNNNDY2Rie0EOIAUsBx5tNPP2XODTdQ19yGu+IcgrmVoNSRP6HNia/0FLxDTqNm4yZmX3c9mzZt6rnAQohDkgKOI1VVVdx9908J2FNoH3Z+90e9hxHKKKG94hya2jzccOONLF++vMeeWwhxcFLAceLzzz/nrgULCDpSaB96Jtru6vHXiCRl01Y5Da8lkbsWLODTTz/t8dcQQvwvKeA4sH79eu64404CtiTcQ88EW0LUXks7kmgfehYhZxp33/1TVqxYEbXXEqK/kwKOcU1NTSy4+278yo67PDoj3wPYnLQPPZOgM5UFCxawatWq6L+mEP2QFHAMC4VC/Oyee2hu2YO77DS0I7H3XtyWgLv8TAL2FO5asIANGzb03msL0U9IAcewJ554gi/WrsVTPIFIYlavv762J+AuPwO/tnH77Xewc+fOXs8gRF8mBRyjlixZwptvvkkg7xhCWWXGcmhHIu7yqexp9zD/9jtoa2szlkWIvkYKOAbV19fzm//8TyLJufiLfmA6DhFXBu6y06jdvp0FC+4mGAyajiREnyAFHGO01vzq17/G5w/iGXwKqNj4FoVTC/CWnMzatWt49NFHZXlLIXpAbPx2i33efvttqlaswDtwDDoh1XScbwllleEvGMXf/vY33njjDdNxhIh7UsAxZMeOHTzxxO8Jpw4gmDvMdJyDCgwcQyh9EI8//jiyMJMQR0cKOEZorfnPRx4hEI7gLTn56NZ3iCal8JaeSsSVwc/uuYfa2lrTiYSIW1LAMeKDDz5gZVUV3gHHoZ3JpuMcntWOe8jpeANhFtx9Nx6Px3QiIeKSFHAM8Hq9PPbY79BJWTE79fBd2pmCe/CpbN26lYceflh2yglxBKSAY8BLL71EY+NuvEXjY+aoh64Ipw3EVziWDz/4gIULF5qOI0TciZ/f9j5q69atLFq0iGB2OeGUPNNxui2YN4JgZinPPf88n3zyiek4QsQVKWDDfvf440SUDX/hAVcriQ9K4Ss5GZ2YyX0//znbtm0znUiIuCEFbNDKlStZsXw53oJRvbPKWbRYbbjLTscXjHDXgrtxu92mEwkRF6SADYlEIvz+909CQgrB3OGm4xw17UzGXTqZ2tptPPDgg0QiEdORhIh5UsCGvPvuu2zcWIN3wPGHvoR8nAmnFuArGse/P/6YF154wXQcIWKeFLABfr+fp595lkhSNqHMUtNxelQwdziB7KG89NJLLFmyxHQcIWKaFLABb7zxBo27G/AV/iB2z3g7UkrhLz6RSEo+Dz/8MOvXrzedSIiYJQXcy9ra2njp5ZcJpRUSTi0wHSc6LFY8ZZMJWhO4a8ECGhoaTCcSIiZJAfey1157DY/bjb9wjOkoUaXtLtxlp9Oyp4077rxTTlcW4iCkgHtRS0sLry5aRDCjxMglhnpbJDETd9lkNm3axD333ksoFDIdSYiYIgXci1599VX8fj+BgcebjtJrwmmF+AadyIrly3nsscdkzQgh9iMF3EsaGxt5/Y03CGaWEnGlm47Tq4K5lfjzR/LWW2/x8ssvm44jRMyQAu4lL7/8MsFgCP+A40xHMSJQOJZgVhnPP/88b7/9tuk4QsQEKeBeUF9fz1tvv00ge0jMXWao1yiFr2QiobRCHnnkEd5//33TiYQwTgq4F7zyyiuEwxECBaNNRzHLYsFbdhrh5Bx+/otfsGLFCtOJhDBKCjjK6uvreXvxYgLZ5bF/pYveYLXhHjKVkDONBQsWsGrVKtOJhDBGCjjKXnnlFcKRCIGCY01HiR02J+7yMwjYk7nzzrtYvXq16URCGCEFHEX7Rr9ZMvr9Lm134R56Fn6ri9tvv4M1a9aYjiREr5MCjqKFCxfK6Pcw9pWwJYHbfvITmRMW/Y4UcJTU19ezWEa/30s7EmmvOJuAPYU777qLDz/80HQkIXqNFHCULFy4kLDWMvrtAm130T70LIIJmdxzzz3885//NB1JiF4hBRwFMvo9AjYn7qFnEkzO56GHHuLll1+W05ZFnycFHAWvvPKKjH6PhNWOp3wqwcwynnvuOR555BFZwEf0aVLAPUyOfDhKFiu+0lPw54/i7bffZsGCBbS3t5tOJURUSAH3MDnutwcoRaBoLL7ik1i+ooo5c26gtrbWdCohepwUcA+qq6uT0W8PCuZW4hl6JrW76rnuuuupqqoyHUmIHiUF3IP++Mc/Eo7I3G9PCqcW0FY5jXZtZ/78+bz88styyXvRZ0gB95Dt27fz97//g0D2UBn99jCdkEp75TQCGYN57rnnWLBgAW1tbaZjCXHUpIB7yIsvvohGyeg3Wqx2fKWn4hs0nk+WL+eaa69l7dq1plMJcVSkgHvAli1beOedd/DnVKIdiabj9F1KEcwbjrviHBpavcydO5cXX3yRcDhsOpkQR0QKuAf84Q8vgsVGoGCk6Sj9QiQ5l7bhFxDIKOWFF15g7txb2L59u+lYQnSbFPBRqq6uZul7S/HlDkPbXabj9B9WB76yU/EOPoWvvq7m6quv4c0335QddCKuSAEfpaeefhplcxLIl9GvCaHsIbQdcyFeVza//e1v+fGPfyzHDIu4IQV8FKqqqlhZVYU3fxTYnKbj9FvakYSn/Ax8JRNY/cVXXH311SxcuFBOYxYxTwr4CEUiEZ566mlwJhPMrTQdRyhFMKeCtmMuwps8gGeffZZZs34kC72LmCYFfISWLl1KTU013gHHg8VmOo7opB2JeIecjnfI6Xyzs4G5c+fywAMP0NjYaDqaEAeQAj4CgUCAZ559Fp2URSirzHQccRChjGLajrkIf8GxvLPkX1x55VUsXLgQv99vOpoQ+0gBH4HXXnuNul278A4cC0qZjiMOxWojUDiG9mOm056Qw7PPPsuVV13Fu+++K0dLiJggBdxNDQ0NvPjiHwmlDyKcNtB0HNEFOiEVb/kUPBVn0+CJcP/99zPrRz/i008/lUXfhVFSwN301FNPEQgG8RWNMx1FdFM4tYD2YefjLT2VTdsbuOOOO5g79xY+//xz09FEPyUF3A1r1qxhyZIl+PJGoBNSTccRR0IpQllltB0zHV/xiaz9uoZ58+Zxy7x5rF692nQ60c/I7vsuCofD/Nejj4IzWRbc6QssVoK5wwhml2Ov/5rVX63llltuYcTIkcy86irGjRuHkvl9EWVSwF305z//mc2bNuEtmwxW+WfrMyw2gvnHEMytwN7wNV9s+JI77riDIUPKueKKyznllFOw2eT7LaJDdWcnxNixY3V/vCrBtm3buOaaa/AmF+AtO12OfOjLImFsjRtx1a0F7x5y8/K5bMalnH322bhcstaHODJKqZVa67EH3C8FfHiRSISbb57LV19X03bMdFlusr/QGlvLVpx1a7G01ZOYlMQF55/P9OnTyc3NNZ1OxJlDFbD8bfU93nzzTb788gu8gydK+fYnShHKKCaUUYylrY5g3Zf86dVXWbRoERMnTuSiiy5i1KhRMk8sjooU8GHU1tby9NPPEEorJJQ1xHQcYUgkJQ9fSh5+fzuO+q/44N+f8P7771NSMpiLLprOlClTSEyU/5xF98kUxCH4/X6unzOHb7btoG3Y+XKdN/G/wiHsTZtwNqxDuRtJcLk484wzOP/88ykrk1PTxYFkCqKbHnvsMTZv2oSnfKqUr/g2q41gzlCC2eVY3A0E69fz17cX89e//pWhFRWcf955nHbaaTIqFt9LRsAH8T//8z88+OCD+PNHESg64D8tIQ4U8mPfXYOzcQPK04zD4WTSpFM5++yzOfbYY7FY5Jyn/kyOguiijRs3MueGG/A5MnBXnAVKfnFEN2iNxd2AffcGnM3foEMBcnPzOOOMqUydOpXi4mLTCYUBUsBdUFdXx/VzbqDZ7aO98jw56kEcnXAIW8sW7I012Fp3gNYMGVLO6aefxqRJkygoKDCdUPQSKeDv0drayo033kTtzl20V5xDJDHTdCTRh6igB1vjJhzNm7G0NwBQPnQop0ycyMknn0xJSYkc0taHSQEfht/v58e33caXX63DU34G4VQZmYjoUf427E2bsbdsxdJeD0B+QQEnjh/P+PHjGT16NE6nXGOwL5ECPgS3281ddy1gzZrVeMsmEcosNR1J9CMq4MHWshVby1bs7bvQ4RA2m51jjjmGMWOOZ/To0VRUVEghxzk5DO0gmpubmT//dmo2bsRbeqqUr+h12pFIMLeSYG4l3kgIa1sdtj21fF6zjdWrO9YpttpsVAytYNSokVRWVjJs2DByc3NlyqIP6LcFvGPHDn4yfz47d9XhGXI64fQi05FEf2exEU4bSDhtIH6AkA9bWx3W9jq+2FrPV+v/DJEwAKlpaVQMHUp5eTlDhgyhrKyMgQMHysptcaZffrfee+89fvnLX+ELhnGXn0k4Jc90JCEOZEvYtx6FHyASxuJpwurZTdC9m+VfbWJF1UrQHde3s9vtFBeXUFZWSklJyb5bXl6eHIcco/pVAfv9fp544gneeustIsm5eMpPRTtTTMcSomssViLJOUSScwjuvS8SxuJtxuJtJuBpZn19Exu3fYT2/3Pfl9kdDoqKiigpLqaoqIhBgwZRWFjIwIEDSU6WszxN6hcFrLXmo48+4vEnnqBu1y4C+SPxDxwDMioQ8c5iJZKUTSQpm1DnXV6AkB+rtwWLr4WAt4UNDS1s3r4C7fvXt748JTWNgQMGMGBAAfn5+eTl5ZGTk0Nubi7Z2dmkpqbK6DmK+nwBV1dX8+RTT7Fq5Up0YgbeirMIpw4wHUuI6LI5CafkfWt6zQsQCWHxtWLxt6J8bQT8e2je0cL6LdvB175vOmMvi8VCalo6GRnppKelkZqaSmpqKklJSftuLpcLl8tFQkLCvtv+H7tcLhwOR+9uf5zokwWstWb58uW8+uoiPvtsFcrmxDfoBII5w2TUK/o3i41IYubBTzTSEVTQiwq4sQTcqKAHFfTiC/rY3eRF1e/AGtmMCgUgFEBHQgc+xyFYbTZcrkQSExNJS00hLS2NlJQU0tPTycjIIDMzk+zsbHJycsjJySElJaVfHOXRZwpYa83GjRtZunQp7y5ZQt2uXeBMwl84lkBOBdjkOEohDktZ0I4ktCOJyPc/GiJhVDgIkSAqHOp4GwlBJIQKhzreDwdRkSCEg3jDQZqDAXbUe7Hs3IMl7EcFfeiQ/4CndiYkkJ+Xz8CBAygoKGDgwIH7bvn5+X3maI+43QqtNQ0NDaxdu5ZVq1axomol9XW7Oq5kkDKA4OBTCGUOBovVdFQh+iaLFW2xAgl0/XSug4iEUEEfKujpGHkH3AQC7bTvaeeb3euxfLoCHd632xGL1Up+fj6DioooLCykqKhoXznn5OTEVTnHfNJAIEBjYyM7duxg+/bt1NbWsnHjJjZs2EBbWysAyu4kkJRHuPgkQhnFaHvfvHiic+snWDxN0XnycAAVCqBtDrCan6+LJGbiHzTedAzRGyw2tDMZ7Uw++Mhba1TIh8W3B+Vvw+LbwxZPK9vXbMCyogod/t+pEKvVSm5ePgMHFFBQUEBeXh65ubnk5OSQnZ1NZmYmiYmJMTO90SsFrLVmw4YNuN1ugsEggUAAv9+Pz+fD5/Ph8XjweDy43W5aW1vZs2cPzS17aGxspL2zZPexWNGudIKuPCIZwwkn53bMZx1k2cioFpYBVk9jx598UZCQkMC086exePFifDHwb6Y9jX3qe9fb+tR/YEqh7S7Cdhek5H/7c1p3jJz327G41dvK9vVbsK75Eh3wHvB0doeD9PQM0tJSSU9LIy0t7Vs7Fb+789DhcOB0Ohk2bFiPnxL+vQWslJoNzAYYNGjQEb1IVVUV8+fP7/bXhRPSiWQNIZyUTcSVQcSZgnYkyWXho2DatGncdNNNaK15/fXXTccRomuUQjuSCDuSCHOQRbTCIVTQjSXgQfnbsHqbCHmaCDQ20VBf162XmjVrFldddVUPBe/wvQWstX4GeAY6FuM5khepqKhg8uTJ1NTUHPC5SCRCREMg4Mft9uDzevZ9zuprweprwd5Yg3IkEnIkE3GmEnGlE07MIpKYhbYnHPJ1+8wIoJNr/f/D1rYrKs+9ePFitNb87W9/i8rzd1c4MQtv5TmmY4h4Eg5i8bdi8beh/O1Y/O2oQDu2oAdL0IMOeA76ZUqpfUdoOBwOLBZ1wBRFVlYWEyZM6PHIMbcaWigU2jcN0dTUxO7du9m9ezc7duygtnY7W7dtpblpvz9NXWkEkvMIpwwgnDrgsIUc72QOWPR74VBHyfr27Jt2sPhasQVaD5huSEhwkZefT35e7r4TSzIzM8nMzCQ9PZ20/aYfon2ySdyshmaz2fb9Iw0ePPigj9mzZw81NTVUV1ezZs0aPvvsc7wNGzqOgEgdQDBjMKGM4j536JkUkujztEaFvPtGsB2j2Tas/jZsgTa0v/1bD0/PyGRQaSGFhWP2HQlRUNCxAy4ejiWOuRHwkQiFQlRXV/PRRx/xzrtLqK/bhbLa8WeVE8gbjk5INR1RiP5F647Dy/Y7FliFg/v+0lIhPyrs7zjxI+jFEvJhC3nRfvcBZ+OlpqVRVFhEYWFHwe5/6Fm8XHm63yzIrrVm/fr1/OUvf+Hdd98lHIkQzCjBX/gDuby8EF3VeejX3oJUIV9Hae69RYIQCqAiASyREJa9ZRsJQTj0reN2D0UpRVJyCpmZGWRlZu47Cy47O5u8vDwKCjrWp3C54v+w0n5TwPtraGjgjTfe4LXXXycUiuDLH0EgfxRYY27mRYjepXXHKcf+to7Dtzr/3LcE3diCnoOORPdKcCWSlJRIUlIyyclJJCUmHrAWxHfXg0hOTiYxsWNHV0pKyr71JKzW/nGiVL8s4L3q6+t58smnWLr0X5CQgnvwqUSSc03HEiL6wgEs3j1YfC2db/dgD7SCr3Xf4u7QsehOVnYOBfkdJy7sv9MqIyNj306r5OTkuDrTLFb06wLea/Xq1TzwwIPUNzTgGziGYP4IOaZY9A3hUEfJepqwepuxeFuw+/d8a6eVxWqloKCA4kGDKOo8jXfvTqt4O4U33sTNURDRdOyxx/L888/xq1//mg8/+ABb2y68ZZPAajcdTYiuC/mwuhuxenZjcTdh9zWBd8++T9vtDgYNGkRp6XBKSkoYNGgQxcXFDBgwQEo2xvSrEfBeWmv+8pe/8Lvf/Y6QKwtP+ZQ+u36EiHM60jGqba/D2l6P3dPYMX3QKSc3j4qh5ZSVlVFWVkZpaSkFBQX9Zm41XsgIeD9KKaZPn05eXh733HMPlq//Tnv5GXKUhDBPR7C4G7G17cTathN7e/2+Iwoys7IZMW40lZWVVFZWUl5eTkqKXFIrnvXLEfD+1qxZwx133Ik3YqG94my5RpzofSEftpZabHtqcbTtQAd9ABQNGsSY449n5MiRjBw5ktxc2XEcr2Qn3GFUV1dzy7x5eCJ22irPAVvfPZ1ZxAblb8fWvAV7yxas7XWgNSmpaZw4/gTGjRvHcccdR1ZWlumYoodIAX+P1atXc9ttPyGQkIG74iyw9MvZGRFFKuDG1vQNjubNWNrrASguKeGUiROZMGECQ4cOlQtg9lEyB/w9jj32WH7607u59777cG18D++Q0w66xrAQ3RIOYGvegqNxI9bWHQCUlQ3htMsv4NRTT6WwsNBwQGGSFPB+Jk2axNymJh577DEcOz4nMPB405FEPNK6Ywfa7mocLVvQ4RD5BQWcOf0/mDJlCkVFRaYTihghBfwdF110ERs2bOAf//gH4eQ8wmkDTUcScUIF3Nh3V+NsrAZfG67ERKacczZnnXUWw4cPj/mVuUTvkwI+iHnz5rFu/Xq2bn6ftuEXdFyFQ4iD0RrrnlocDV9j27MNtGb0cccx7dxzmThxYo9fwkb0LVLAB5GQkMDP77uP2bOvI3HTe7iHng2yc0TsRwV92HdvwLn7a/C1kZqWzrTLL+ecc86ReV3RZVLAh1BcXMz8+T/h/vvvx7FrDYEBo01HEjHA4t6No34djqZN6EiYUaOO5cILL2DixInY7XJKu+geKeDDmDJlCh9//DFL33+fUEYxEVeG6UjCBB3B1rwVZ/2XWNrqcDoTOOu8aVx44YWHvGqLEF0hBfw9brnlFqpWriTyzUe4K8+VQ9P6k3AQ++5qEuq/Al8rubl5XPLDGzn77LNJTpbT1sXRkwL+Hunp6dw6bx4///nPse/6kmDBSNORRJSpoBd73Vck7F6PDvoZPvwYLrtsBhMmTJBFbkSPkgLugsmTJ7NkyRL+vewTQhmD0AlppiOJKFD+dhx1X+DcvQEdCXPSSRO4/PLLGDFihOlooo+SAu4CpRQ//vGPWTVzJqGtn+ApP0MWcu9DlL8dx841OBo3YAGmTp3KFVdcQXFxselooo+TAu6irKwsrrn6ap544gmsLdsIZwwyHUkcJRVw49ixGkfjBqxKce60aVx++eUUFBSYjib6CSngbpg+fTpvvb2YbbXLaUsbIAv2xKuQD+fOtTjr12FRmnOnTePKK68kLy/PdDLRz0iDdIPNZmPeLXO57bbbcOz6Qo4NjjeREI66r0jYtQYdDjJ16lSuvvpqGfEKY6SAu2nMmDGccsopfPjxvwlmDZGraMQDrbE1bca1YyX42jhh/Hhmz55NaWmp6WSin5MCPgI33HADy5Ytw7l9Jb7SU03HEYdh8TTi2voJlrY6SsvKuOnG+zj+eFnlTsQGKeAjkJ+fz8UXX8yf/vQnAvkjiCTKlQtiTsiPc/sqHA3rSUlJ4fr58znrrLPkOF4RU+S0riN0xRVXkJSUTELtStNRxP46pxtSv3wTZ8N6LrzgAl5ZuJBzzz1XylfEHCngI5SSksIPfzgT657afVc6EGapgBtXzRJcG5dSNmggTz/9NPPmzZMrB4uYJQV8FC688EKyc3JIqK2CblxbT/QwrbHtriblyzdJ9NQxZ84cnnrqSYYOHWo6mRCHJQV8FJxOJz+aNQuLeze2ps2m4/RLKuDBVfMurs0fMmJ4JX/4wwvMmDEDm012b4jYJwV8lKZMmUJxSQmunZ+BjpiO069YW7aS8tVfcLl3ceONN/LbRx9lwIABpmMJ0WVSwEfJarVy7TXXgHcPtsaNpuP0D5EQzi3LSKx+l8GDBvLcc89xySWXyCXdRdyRv9N6wMSJEykbMoSNtatpyyyTyxdFkfK1krRpKcrdyMUXX8zs2bNxOBymYwlxRKQpeoBSilnXXgu+VuyN1abj9Fm25i2krHuLZPw89NBD3HTTTVK+Iq5JAfeQ8ePHU1FZScLO1RAJm47Tt+gIjm0rcNUsYUhpCc899ywnnnii6VRCHDUp4B6ilOJHs2aBvx17w9em4/Qd4QCu6iU4d61l2rRpPPH447J4jugzZA64B40ZM4YRI0fyxYa1BHOGynKVR0n5WknauASrbw9z583jwgsvNB1JiB4lI+AepJTimquvBr8be8MG03HimqW9npT1i0lWQX7zm99I+Yo+SQq4hx133HGMGDmShLq1EAmZjhOXbM1bSP76H+RlZ/D000/J6mWiz5IC7mEyCj469vp1uGqWMLR8CE89+SSFhYWmIwkRNVLAUSCj4COgNY4dn5OwZRnjx4/n0Uf/i/T0dNOphIgqKeAokFFwN2mNo7YK5/ZVTJ06lfvvvx+Xy2U6lRBRJwUcJTIK7iKtcW79BOeutZx//vncddddspCO6DekgKNERsFdoDXOrctw1K9jxowZ3HrrrbKeg+hX5Kc9imQUfBha49z6KY769Vx22WVcf/31KKVMpxKiV0kBR5GMgg9Ba5zbVuCo/4qLL76Y6667TspX9EtSwFF23HHHMXLkKBkF78exczWOui+YPn06N954o5Sv6LekgKNMKcU118goeC97/Tqc21dxxhlncPPNN0v5in5NCrgXjB49mlGjjiVh1xoI999RsK1p077jfG+//XbZ4Sb6PfkN6AVKKWbNuhYCHuz160zHMcLauhPX5g84ZsQI7rvvPjnUTAikgHvNqFGjGPuDH+CqWwvhoOk4vcribSZp478oKizk4Ycewul0mo4kREyQAu5Fs669Fh304aj70nSUXqOCHpJq3iUtOZFf/+pXpKSkmI4kRMyQAu5FlZWVTJgwgYS6LyHkNx0n+sIhEmuW4IgEePjhhz/xPGQAAAsZSURBVMjPzzedSIiYIgXcy6699lp0OIBj11rTUaJLaxI2f4DFvZt77vkZlZWVphMJEXOkgHtZaWkpp02eTEL9OlTAYzpO1Dh2fI69+Ruuv+46JkyYYDqOEDFJCtiAWbNmYSGCY/sq01Giwta0GeeOzzjzzDOZMWOG6ThCxCwpYAMGDBjARRddhKOxGoun2XScHmXxNJL4zYcMGz6c2267TU60EOIwpIANmTlzJomJiThrV5iO0mNU0EtSzRIy09N54P77cTgcpiMJEdOkgA1JTU3lhzNnYttTi7V1h+k4Ry8SwbXpPWxhPw88cD+ZmZmmEwkR86SADZo+fTo5ubm4aleAjpiOc1Sctcuxtu5k/vyfyBEPQnSRFLBBTqeTG+bMQbkbsTd8bTrOEbPtrsZR17G05Jlnnmk6jhBxQwrYsEmTJnHcccfj2r4KFfSajtNtlvYGErf8m9GjR3P99debjiNEXJECNkwpxbx5t2DRIZy1VabjdIsKeknatJTs7CzuvfdeWWBHiG6SAo4BxcXFXHrppdh3V2NpqzMdp2siERI3LsWuAzz4wANyCXkhjoAUcIyYOXMmWVnZJG5dBpHY3yHn3PoJlrZd3HH77ZSXl5uOI0RckgKOEYmJicybdwvK04Rj5+em4xyWvX4djob1zJgxgylTppiOI0TckgKOIRMnTmTq1Kk4d67G4t5tOs5BWVt3krD1E8adcAKzZ882HUeIuCYFHGPmzp1LRkYmid98GHMX8VS+VpI2LaWosIh7fvYzrFar6UhCxDUp4BiTkpLCnXfcjvI044ylxXpCPpJr3iXRYeOhhx4kKSnJdCIh4p4UcAw64YQTmDZtGo5dX2Bt2WY6DkRCJNUswRps56GHHqSwsNB0IiH6BCngGHXTTTdRWlZG0uYPUL5Wc0G0JmHTB1ja6vjp3XczatQoc1mE6GOkgGNUQkICD9x/P4kJDpI2/svMhTy1xrn1U+zN3zBnzhwmT57c+xmE6MOkgGNYQUEB9917DxZvMwmbPwKte+/Ftca5bQWO+q+49NJLufTSS3vvtYXoJ6SAY9zYsWO57rrrsDdvxrn1k94pYa1x1FbhqPuC6dOnM2fOHFlYXYgokJP348CMGTNobm5m0aJFoCz4i8ZBtAqxs3ydu9Zy3nnnMXfuXClfIaJECjgOKKW4/vrrCQaDvPHGG2hlJVA4pudLOBIiYfOH2Js2c95553HrrbdK+QoRRVLAcUIpxc0330woFOKtt97CEnTjK54A1p75Fqqgl8SaJVja65k9ezaXX365lK8QUSYFHEc6lq6cR3Z2Nv/9wgvYfC24SyejE1KP6nmtLdtI3LoMe8TP3ffey6RJk3omsBDisGQnXJyxWCz88Ic/5OGHHiIJHynr38Zev+7IVlAL+UnY9AGJ1e9QlJfF448/LuUrRC+SAo5T48eP59lnnmFEZQUJW5aR8tWb2Jq+6dK15VTAjWNbFalfvIazeRNXXXUVzz/3LBUVFdEPLoTYR+luHNY0duxYXVUVX1dt6Ou01ixbtownn3yKbdu2ouwuAqkDCaUVEnEmg9WOttiw+FqxenZjaW/Avmcbio7V12bOnCnr+QoRZUqplVrrsd+9X+aA45xSipNOOolx48bx0Ucf8dFHH7Fs2Se4G2sO+vi8/HxOPeMSpk+fTkFBQS+nFULsTwq4j7DZbEyaNIlJkyYRCoWorq6mpaUFr9eLz+cjLy+PoUOHkpKSYjqqEKKTFHAfZLPZGDZsmOkYQojvITvhhBDCEClgIYQwRApYCCEMkQIWQghDpICFEMIQKWAhhDBEClgIIQyRAhZCCEOkgIUQwhApYCGEMEQKWAghDJECFkIIQ6SAhRDCEClgIYQwRApYCCEMkQIWQghDpICFEMIQKWAhhDBEClgIIQzp1mXplVINwJboxTmkbGC3gdeNBtmW2CTbEpv6yrYUa61zvntntwrYFKVUldZ6rOkcPUG2JTbJtsSmvrQtByNTEEIIYYgUsBBCGBIvBfyM6QA9SLYlNsm2xKa+tC0HiIs5YCGE6IviZQQshBB9jhSwEEIYEtMFrJQ6Syn1tVKqRil1p+k83aGU+m+lVL1S6ov97stUSr2jlKrufJthMmNXKaWKlFJLlVLrlFJfKqVu6bw/7rZHKZWglFqulFrduS33dd4/WCn1aee2LFJKOUxn7SqllFUp9ZlSanHnx3G5LUqpb5RSa5VSnyulqjrvi7ufse6I2QJWSlmBJ4CzgeHA5Uqp4WZTdcsfgLO+c9+dwBKtdTmwpPPjeBACbtNaDwPGAzd2fi/icXv8wGla62OB0cBZSqnxwC+B/+rclmbgWoMZu+sWYN1+H8fztkzWWo/e79jfePwZ67KYLWBgHFCjtd6ktQ4ArwIXGM7UZVrrD4Cm79x9AfBi5/svAhf2aqgjpLXeqbVe1fl+Gx2/7AOJw+3RHdo7P7R33jRwGvBa5/1xsS0ASqlC4Fzguc6PFXG6LYcQdz9j3RHLBTwQ2Lbfx7Wd98WzPK31TugoNSDXcJ5uU0qVAMcBnxKn29P5J/vnQD3wDrARaNFahzofEk8/a48CtwORzo+ziN9t0cD/KKVWKqVmd94Xlz9jXWUzHeAw1EHuk2PmDFJKJQOvA/O01q0dg634o7UOA6OVUunAm8Cwgz2sd1N1n1JqGlCvtV6plJq09+6DPDTmt6XTBK31DqVULvCOUmq96UDRFssj4FqgaL+PC4EdhrL0lDqlVAFA59t6w3m6TCllp6N8F2qt3+i8O263B0Br3QK8R8e8drpSau+AJF5+1iYA5yulvqFjiu40OkbE8bgtaK13dL6tp+M/xnHE+c/Y94nlAl4BlHfu0XUAlwFvGc50tN4C/qPz/f8A/mowS5d1zis+D6zTWj+y36fibnuUUjmdI1+UUi5gCh1z2kuBizsfFhfborW+S2tdqLUuoeP3419a6yuJw21RSiUppVL2vg+cAXxBHP6MdYvWOmZvwDnABjrm6O42naeb2f8E7ASCdIzmr6Vjfm4JUN35NtN0zi5uy8l0/Bm7Bvi883ZOPG4PMAr4rHNbvgB+1nl/KbAcqAH+DDhNZ+3mdk0CFsfrtnRmXt15+3Lv73s8/ox15yanIgshhCGxPAUhhBB9mhSwEEIYIgUshBCGSAELIYQhUsBCCGGIFLAQQhgiBSx6nVIqXSl1w/c8pkQpdUUXnqtk/yU/D/L5/6uUevwQn/v3d59DKTVp77KOQkSbFLAwIR04bAEDJcD3FvDR0FqfFM3nF+L7SAELEx4GyjoX3v515+2LzsW4Z+z3mImdj7m1c5T6oVJqVeetO+VZpJT6R+fi/vfsvVMp1X64LxIi2mJ5NTTRd90JjNBaj1ZK/R/geuBYIBtYoZT6oPMxP9FaTwNQSiUCU7XWPqVUOR2neo89+NMfYBwwAvB0Pv/ftNZVPbtJQnSfFLAw7WTgT7pjicg6pdT7wA+A1u88zg48rpQaDYSBod14jXe01o0ASqk3Ol9TClgYJwUsTOvqosK3AnV0jJQtgK8br/HdBU9kARQRE2QOWJjQBqR0vv8BMKPzKhU5wCl0rOS1/2MA0oCdWusIMBOwduP1pnZe3NFFxyVtPj7aDRCiJ8gIWPQ6rXWjUurjzkO//k7H0pCr6RiZ3q613qWUagRCSqnVdFzg9PfA60qpS+hY79bdjZf8CHgJGAK8IvO/IlbIcpRCCGGITEEIIYQhMgUh+gSl1JnAL79z92at9XQTeYToCpmCEEIIQ2QKQgghDJECFkIIQ6SAhRDCEClgIYQw5P8D3fsnmWfixhEAAAAASUVORK5CYII=\n\"></div>\n\n</details>", "_____no_output_____" ] ], [ [ "# Câu 2: Vẽ swarmplot cho cột total_bill theo sex\n# Bạn nhận xét gì về biểu đồ vừa tạo\n", "_____no_output_____" ] ], [ [ "<details>\n <summary>Nhấn vào đây để xem kết quả !</summary>\n \n<div class=\"output_subarea output_png\"><img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAX8AAAEGCAYAAACNaZVuAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOydd3hUZfbHP3cmvfeekEYLLUDoTUARBOxgwV6wYVsbP7vrrq7urrq6sooFGyBVBRSl95pQQkJCCCE9pPc+M/f3xzuZyZCoCCSRzPt5njzMfee9d84EOPe95z3nexRVVZFIJBKJdaHpagMkEolE0vlI5y+RSCRWiHT+EolEYoVI5y+RSCRWiHT+EolEYoXYdLUB54qPj48aHh7e1WZIJBLJJUVCQkKJqqq+Z49fMs4/PDyc+Pj4rjZDIpFILikURclqb1yGfSQSicQKkc5fIpFIrBDp/CUSicQKkc5fIpFIrBDp/CUSicQKkc7fyqht1JGUV0mTztDVpkgkki7kkkn1lFw4G48X8uSyI9Q06vBxseezO+MYFOrR1WZJJJIuQK78rQSDQeXF749R06gDoKSmkdfXHe9iqyQSSVfR4St/RVEygWpAD+hUVY1TFMULWAaEA5nAbFVVyzvaFmumQaensKrRYiyrrK6LrJFIJF1NZ638J6qqGquqapzxeD6wWVXVnsBm47GkA3Gys2F8L8sK72n9A7rIGolE0tV0Vcz/GuAy4+svgW3Ac11ki9Xw/s2xvLsxjaT8KkZHeTNvUnRXmySRSLqIznD+KrBBURQV+FhV1YWAv6qqBQCqqhYoiuLXCXZYPR5Odrx2Tf+uNkMikfwJ6AznP0ZV1Xyjg9+oKErquZ6oKMpcYC5AWFhYR9knkUgkVkeHx/xVVc03/lkEfAcMBwoVRQkEMP5Z9CvnLlRVNU5V1Thf3zaKpBKJRCI5TzrU+SuK4qwoimvLa2AKkASsAe40TrsT+KEj7ZBIJBKJJR0d9vEHvlMUpeWzlqiq+rOiKAeB5Yqi3AtkA7M62A7J71DfpMfBVoPx70oikXRzOtT5q6qaAQxqZ7wUmNyRn23tpBRU8d6mNIqrG7l+SAi3jewBQHJ+JUl5lYyI8Cbcx5miqgYe+/Yw+zLKCPNy4u0bBzIy0ruLrZdIJB2NlHfohtQ16Zjz6X7KapsAOJRdgYu9DUXVDbzxk9hv12oUPrhlML8kn2FfRhkA2WV1PLb0MHvmT8JGK4u/JZLujHT+3ZD4zHKT429hfVIBu9NLTcd6g8p/Np2k2WAp8FZU3UhRdSNBHo6dYqtEIuka5PKuG9LD24mzQ/c9vJ1p1OktxmqbdIw6K8QT4eNMoLtDR5sokUi6GOn8uyE9vJ15ekpv7IyhmyFhHjx8WRSz4kIt5t0xqgf/d1VfbhgSgpezHSMivPjotqFy01cisQIUVVW72oZzIi4uTo2Pj+9qMy4pymubqKxvJtzHGRChnlWHcknOq2RUlA9TpbaPRNLtURQloZWumgkZ8+/GeDrb4elsZzrWahRmx4XCWU8AEonE+pBhH4lEIrFCpPOXSCQSK0Q6f4lEIrFCpPOXSCQSK0Q6f4lEIrFCZLaPFVFY1cDff0whKb+S0VHezJ/WFxd7+U9AIrFG5P98K+LRJYc5kCl0fDKKa2lsNvDPWW109yQSiRUgwz5WQm2jzuT4W9h6oriLrJFIJF2NdP5WgpOdlhBPS7G2Xv4uXWSNRCLpaqTztxIUReHtGwfi52oPCAG3V6/u18VWSSRGTu+ADS/C4W9A39zV1lgFMuZvRYyO8mHP/EkUVjcS5O4gBdwkfw6OLIXvHzQfn9oKN37WdfZYCXLlb2XYaDUEezhKxy/583DgY8vjpFVQW9I1tlgR0vlLJJKuxeas/hEaG/Ej6VCk85dIJF3LuKdAY2s+HvkgOHp0nT1WgnT+Eomka+l5Bcw7AMPvh4jxoGuC8qyutqrbI5+tJBJJ11OeBQc+BVSR+ZO6Dh49BLaypWhHIVf+Eomk60lcBrTqKliVJ24Ckg5DOn+JRNL1uPid25jkoiGdv0Qi6XpGPgxekebj2NsgKLbr7LECZMxfIpF0Pa4B8MgByNoDzj7gL6vPOxrp/CUSyZ8DrS1ETuhqK6wGGfaRSCQSK0Su/CUArEzI5Zt9WTjZaXl0Uk9GRXl3tUkSiaQDkc5fwva0Yp5ecdR0nJBVzvZnJhLgLnOsJZLuigz7WCnVDc0k5lbQqNOzJaXQ4r1GnYGdJ2WjF4mkOyNX/lbIz0lneGr5EWqb9Pi42HH9kJA2c6L9ZKMXiaQ70ykrf0VRtIqiHFYUZZ3xOEJRlP2KopxUFGWZoih2nWGHtVFZ18w7G9N4ctkRfkk+A4DeoPLSD0nUNukBKKlpIj6zjCti/AGw0Sg8MCGSwWGeXWa3RCLpeDpr5f84kAK4GY/fAt5VVfVbRVE+Au4F/tdJtlgNdyw6wNGcCgC+O5zHuzcNYkpMAMXVjRbz8irqWf3wGAqrGrC30eDhJO/FEkl3p8NX/oqihADTgU+NxwowCVhpnPIlcG1H22FtpBVWmxx/Cyvic3G2t2FCL1+L8asGBALg7+YgHb9EYiV0xsr/PeBZwNV47A1UqKqqMx7nAsGdYIdV4e5oi0YBQyutLA8nO5bH5xDgbs+kPr7UNOgZHe3Nw5dFd52hEomkS+jQlb+iKDOAIlVVE1oPtzNVbWcMRVHmKooSryhKfHGxzD75I/i7OTB3fJTp2MPJFoOq8uzKRJYdzGVLajGT+/rxxOW9sLORSV8SibWhqGq7fvfiXFxR3gRuB3SAAyLm/x1wJRCgqqpOUZRRwKuqql75W9eKi4tT4+PjO8zW7sqJM9XklNURG+bBqDc306w3/32HeDqy67lJXWidRCLpaBRFSVBVNe7s8Q5d8qmq+n+qqoaoqhoO3AxsUVV1DrAVuNE47U7gh460w5rpHeDK5TH+uDvaYm+jtXjPyU7LpzszuOnjvfzf6mMUVjV0kZUSiaSz6arn/eeAvyiKko7YA/isi+ywGmy1Gh6f3NN0rNUoxAS68bcfU9h/uoylB7K554uDXWihRCLpTDqtyEtV1W3ANuPrDGB4Z322taLTG2jWqzjaiRX//eMjGR3tTXJeFSMivZi35LDF/OT8KjJLagn3ce4KcyUSSSciK3y7KcsOZvPm+lSq6puZNiCQf88aRE2jjq/2ZHEsr5K0wmqCPBw4lldpOsfRVou3i0z1lEisAen8uyH5FfU8/10SemOe54+JBcQEurE7vYQ9p0oBOF5QxYyBgQR7OJJXUY+djYYXZ/TF1cG2K02XSCSdhHT+3ZDUM1Umx9/C0ZwKk+NvYf/pMvbOn0RKQTXBno54OctVv0RiLcgE727I4FBPHG0tM3vG9fQh6CyJ5ggfZ2y0GgaEuEvHL5FYGdL5d0M8ne345I44Boa4E+TuwKOTopkzogdvXD8ATycR1gn2cOSVmTFdbKlEIukqOrTI62Iii7wuDg3NevIq6gn3dkaraa/YWiKRdCd+rchLxvytDAdbLVG+UqtfIrF2ZNhHIpFIrBDp/CUSicQKkc5fIpF0PPUV8N2D8E4MLL0VKnK62iKrR8b8JRJJx/PT03BshXhdlQe1xXDfxq61ycqRzl8ikXQ8p7ZaHucegKZasPsdHans/ZC6FjzDIXYO2Dp2mInWhnT+Eomk4wkYABmtbgBekWDr9NvnpP4E396KqddT6o9w+3cdZqK1IWP+EgCqGpq72gRJd+aqf4F/f/HaIwxChsEnE2HNY1BbIsZTfxT7AWsehdJTcGAhFk3+Tm2BkvRON727Ilf+3ZRjuZX833eJpJ2pYUJvX96+YSCe7Ug4JOVV8ti3h8korqVvoBv/vXWwrAOQXHx8ouGh3VBXBlv+DvGfivH8w1B+GkY/ZlzlG0n7BQJjz7qIAjb2nWZyd0eu/LshBoPKw0sSSMqroklvYOPxQv72YwpNOgNf7D7NU8uPsiohF1VVeWr5UTKKawFIKaji+dXHuth6SbfGyQtO/GQ5dnoHHFlqOVZTCOFjwaZVjH/wHPAI7XgbrQS58u+GFFU3klNWbzGWkFXG/NWJrD6UB8CqQ7lkl9VxorDaYt7x/KpOs1NipXhHQXW++dg1sH2nHjUR+t8A6RvFhm/EhE4z0RqQK/9uiJ+rPSGellkRg0I8+OFIvsXYyoRchod7WYyNjvbucPskVs6Vb4BbiHjt4A4z3oVR88C3j3nOsPvEJrF7MAy9CyIvA0VqUV1M5Mq/G6LRKCyYM4T5q46RVljNZb19eWF6X7aeKKKqQWea5+Zoy3s3x/LyD8kcza1gRIQXr13drwstl1gFgQPh8aNQmi42f+2MWT8P7YW8eHDyFk8Hkg5FqnpaEV/vzeTlNcmoKthpNXx0+xAm9fHvarMkEkkHIlU9Jdw+KpyxPX1Jzq9kWLgX/m4Ov3+SRCLplkjnb2VE+DgT4dN+VWVpTSNf78uipKaR6wYHM7SHFzlldby6Jpmk/ErGRPnwysx+uDvJPr8SyaWOdP5WREpBFc+uTCQpv5JRkd68MzuWAGNrR53ewKyP95rSPpceyGHxfSN4c30qR3MqAFh9OA+DqvLezYO77DtIJJKLg8z26SYczi5nzqf7mPLudj7afor29nIe//Ywx/IqUVXYc6qUF79PMr23/3SZyfED6A0qi/dlmRx/C7vSLZvASySSSxO58u8G1DTquOPzA1QbM3n+sT4VTydbbhoWZjEnrbDG4rwjOeWm124ObUM5Xs52hHs7kVlaZxqLCXK72OZLJJIuQK78uwEJWeUmx9/C1tRiABp1egBc7G3oG2jpuON6eJFbXsdbP6fyw5E8xvf0Mb3n42LPvWMj+ffsWMK8RCpevyA3Xr9GpoJKJN0BufLvBkT7uaBRwNAq0uPnZs81H+7maE4FfQPdePemQXxwSyzPrTpGUl4lo6O8eWpKL675725Ka5sAcLLT8v7NsdhoNUzo5YuzvQ1h3k5sf+Yyqup1cqNXIulGyJV/NyDYw5GXZ8TgZKcFYFxPH1LPVJni9SkFVfxl2VGi/Vz5/M5hLLl/BAvmDCU+q9zk+AHqmvRklNRy1YBAnO3N6wJFUaTjl0i6GXLl3024a0wEs4eFUtuox9fVnv6v/GLx/vGCKtYcyefZVUdpaDbg4WTLPWPC21zHxV7+k5BIrAG58u9GONnZ4OsqJG9HRFhq9gzt4clf1x2nodkAQEVdM5tTi4gN9TDNifR1ZtZQqZookVgDcpnXTXnz+gE8/10SB06XMijUg5dnxDDlvR0WcwoqGtgzfxI7T5bQpDdwWW9f7G20XWSxRCLpTH7T+SuKshaLVjqWqKp69UW3SHJR8HNz4NM7LeU8JvfxY1NKkel45qAgqhp0HMwsI6e8HoNBZdqAQIqrG3np+yQOZpYxOMyD16/tT6C77J0qkXQnflPYTVGU3xTQVlV1+29eXFEcgB2APeJGs1JV1VcURYkAvgW8gEPA7aqqNv36laSw2/mQXlRDUXUDw8K9sNVqqG5o5r9b0knKr2R0lA/3j4vgugV7SG6l4f/uTYNYd7SAzanmm8ToKG+W3D+yK76CxNopOApHloC9Gwy7F1wDutqiS47zEnb7Ped+DjQCk1RVrVEUxRbYpSjKeuAvwLuqqn6rKMpHwL3A/y7wsySteHVNMl/syQQgxNORZQ+MwtXBBhUwGES3r9Qz1RaOH4TGf0JWucXYnlOlqKqKIvXUJReL4jQ4vR0CB0HocPP4mWPg6CV0/POPwGdXgN64Ljz6LTyy3ywBLbkgfi/sc4zfDvsM/K3zVfFY0VJWamv8UYFJQEvDzi+BV5HO/6KRXlRjcvwAueX1LNx+itzyetOKfm9GKbnl9W3qA7yd7RkQ7M7BTPMNoF+Qm3T8kotH8new8h5QRfIBE18QzVu+vg4KjoCigVGPgK7J7PgBKrMhfRPEyGjzxeD3NnxnXOgHKIqiBRKAaOBD4BRQoapqS0lqLhD8K+fOBeYChIWFtTdF0g7F1Y1txgoqG9hyoshibOuJIh6cEMWCbacA8HGx49FJ0SgKPLr0CCkFVfT0c+GfNw7qFLslVsKOf5kdP8Cud6G5Tjh+EO/t+QCG3NX2XAcpL3Kx+L2wT9aFfoCqqnogVlEUD+A7oG97037l3IXAQhAx/wu1pbtT3dCMs50NceGehHo5WvTxvX5ICIdzKixuDCGejjw7tQ/XDwkht7yOERHeOBoLxdY/Po6aRp3M+5dcfJot+0ujb4aK7LbzQoeLlX5VrjiOnCj7+F5Efi/ss0tV1bGKolQjHLTS+k9VVc/5NqyqaoWiKNuAkYCHoig2xtV/CJD/mydLfpPCqgbmLTnEwcxygj0ceeuGgSybO4qFOzIoqm7g2thgpvQLwKCqPLX8KPXNejydbHlhegwg5CGi/VxM10svquafv5wgr6KeGQODmDsuEo1Ghn0kF4kRD8L6Z8zHQ26H6CsgaZV5zMkbYq6BftfByQ2i12/EBNnH9yLSoW0cFUXxBZqNjt8R2AC8BdwJrGq14ZuoquqC37qWzPb5dR7/9rBFc3YfF3v2/t8ktIpCSU0jvq72pph9ZX0zp4priAl0w8G2bU5/s97A+Le3UlDZYBp7ZWYMd4+J6PgvIrEe0jfBqa1iw7f/jaDRwLGVcPgbcPaBcU+BX3tBAskf5YLbOCqKMgQYi1j571JV9fA5nBYIfGmM+2uA5aqqrlMU5TjwraIofwMOA5+dqx2StpydsVNS08iW1EJeX5dCbnk9kT7OLLhtCH0C3HB3tGVImKdprsGgsvRgNgdOlxEb6kH/YHcLxw+wKaVQOn/JxSX6cvHTmgE3ih9Jp3BOzl9RlJeBWcBq49AXiqKsUFX1b791nqqqiUCbtk+qqmYAw9ueITkfxkR5k15k1uqP8HHmnz+fILdcxFYzSmp54bskVj00mj3pJRzLq2RUlDcDQzx46+dUPt6RAcAPR/K5bnAQdloNTXrzhlykjwsSSYdTlApJK8HJB2JvlZu7Hcy5rvxvAQarqtoAoCjKPxDFWb/p/CWdw3PT+tCoM7AltYie/i68ND2Gqf/ZaTHnxJlq3tlwgve3pJvG/jVrEMvjcyzm/Zh4hhen9+XN9anUN+sZEOzOo5OjO+V7SKyY/MPw2ZWgNyYkHP4aHtgBGik30lGcq/PPBByAlniAPSJlU/InwMnOhn/cYFlyMTbah13pJRbHn+w8bTFnwbZ0PJ3sKK9rNo25O9lyx+hwrhsSTFltEz2822/2LpFcVOIXmR0/QGESZO6EyMu6yqJuz2+qeiqK8oGiKO8jKnWTFUX5QlGURUAS5uItyZ+Qd24axMxBQYR6OXLDkBBeuyYGw1mb+waDynPT+mCnFf8MtBqF+VP7AODqYCsdv6TzsLFvO6ZtZ0xy0fi9lX9Lek0CIke/hW0dYo3kvKhr0vH6uhS2GsM+r8yMIdrPlUcnRbPqUC6u9jZoNRruGNXDYvV/37hIruwXwK7nJnIou4IBIe4Ee0gBN0kXMPwBSFwODaIBERHjIayVnpSqwon1UJgMURMhpE3yiuQPclFSPRVFWaWq6g0XwZ5fRaZ6/jqtdXxAbPgumDOY6xbsMen3h3k58csT49h5soSk/CpGR3kzMtIbEOmfSXmV9A10w8vZriu+gsQaSPgCtv9TSDaMfAjG/UWM15VBbbHQ9EldJ1I9e00Dbau16U/PwoGPjQcKXL8QBs7u7G9wSXLBqZ6/Q+RFuo7kPGgd2wc4XVLLol2ZJscPkF1Wx/a0YqbEBDAs3AtPo5PfnlbMQ98kUNekx95Gw3s3xTJtQGCn2i+xAgqOwtrHzcebXwP/flB8Ara8Lm4IAQNgzipw9bc8t7Ea4j9vNaDC7vel879ALlYnLym90IX0C7JMifNxscPbpW28NLe8ngn/2srg1zcy7T87OV1Sy99/PE5dkx6ARp2Bv6473ik2S6yMrL1tx9J+gU2vmsXbzhyDnf82v3/mGCR/D3Xlbc+Vhb4XjBRu6QY8f1Vf8srric8qJ9DdgbdvHEi0nwurD+dSWCUyKIaFe/L13kyT3k9KQRUv/5DEmbMKuoqrG9EbVLRSzkFyMQkZ1nbMLQhUveVYmag5YdNrsOsd8drOFfrOhOSWMiMFxjzRYaZaCxfL+UtP0YX4uzmw8qHRZJXWUl7XRP8gd2y0Gjb9ZQKbU4pwsbdhWLgng/660eK8lIIqrhsczJd7zfp9MwcFSccvufiEDIUr34DtbwshtxEPwOjHRDinKs88r+8MqC2BPe+bx5qqoakWbl0BhccgahIEtakdlfxBLpbzf+4iXUdynizen8Vra47TpDcQ7OHIV/cOJ8rXhWsHm9WyB4d5cDi7wnQ8rqcvL86IIcjDkf1GeYe54+X2jaSDGPWI+FFVs0Db7d/DtjehIgs8wowKnzlg0Fme21AJvaaIH8lF4ffaOP5aM5cWVc/fbOZyMZHZPr9OdUMzw/++mfpm8yP0VQMCWDBnqMW83PI6Xl1znOR8Ie/wysx+uDvaWszJKK7hjZ9SOFVcy+Q+fjwztbds6i7pWOrL4ePxZllntxDwCIXsVvsE1yyAwXO6xr5LnPPN9rngZi6Sjqe0psnC8QPklNWzIfkMi/dn42yv5cEJUQwM8bBo6q6qKjvSisksrWVibz9CPB2578t4MkpqAfh012lstBrmT+vTqd9HYmUkrbbU86/KheFzoecUKD0FfaZDn6vE6n/Xu3AmCaInizlS/uG86fBmLpKOJ9zHmZhAN44XmNU9BwS788A3CbQ82G0/Ucz2Zyfi0yoL6P9WH+Pbg0Lbx06bwls3DDQ5/ha2nSiSzl/SwfxK9MHORej4OxpVaFfcDac2i9fpG0V9wKQXOsfEbsg5pXoqijJSUZSDiqLUKIrSpCiKXlGUqt8/U3KhVDU088ORPLanFWMw/HqIbtHdw7h1RBgjI714aUYMtlqF1hG92iY9204Um44LqxpY1krUrUlvYNWh3DZhoN4Brhfvy0gk7dHvenAPNR+7BkLaetHwZd+HsGgaHF1mdvwtHFvRuXZ2M851w/e/wM3ACiAOuAPRk1fSgeSU1XHdgt2U1Ig86Mt6+/LF3e0rYfu7OfDGdQNMx5/uzGgzR6uBv/94HI1GYVJvP87e7tEbVN6+cSDPrz5GaW0TA4LdeW6qedW/PD6Hn44VEOLpyCMTowl0l1IQkvMk+XvRy1ffKDJ/HtghnLlqgOA4+Ky11r8KRxaLp4CGSvOwW7utvyXnyDln+6iqmq4oitbYk3eRoih7OtAuCbBod6bJ8QNsO1FMfGYZBhWTE75leBjO7fTZvWV4GBuOF3LgdBmKAjMGBPLy98lUN4osimUHc5jQy5ftaeJpQFHgrjHhXNkvgIm9/aioa8LPzcF0vW8PZDN/9THT8d5TpWx8coJs7yj54xSnwcq7zU3cf3wKvKLETQBEto+pY6wRe1eRKrr2CTA0g4MHXPFaZ1verThX51+nKIodcERRlLeBAkBKPnYwdU26NmO700t4b/NJ06r9l+QzrHhwNJX1zXx3KJeaRh3XxAYT6uXE8gdGkVZYjaOtlrWJ+axNLDBdp6KumfG9fJg+IJCTRdUEezoSEygqhe1sNKQV1vDSD0k42mq5f3wka45atlk+VVzL8YIq+ge7d9wvQNI9Ob3d7PhbOLVFpHse/kY0c+lzFaT+KN6zdRJFXaHDxCZwSRoEDQE7p863vRtxrs7/dsT+wDzgSSAUuL6jjJIIbhoWyqpDuTTrhaeP9HXmUHa5RbjmYGY5R3Mq+MvyI5wqFpu1H2/P4Id5Y4j0dcHHxZ7qhmZcHWzbXN/D0Y7+we68/UsqJTVNvLb2OM9c2ZvRUT7cuegAeuMew6aUIib09rU410aj4OcmJXcl50HAgLZj+kZL7R8Hd7hlOdQWQc8rwDVAjLv4iR/JBXOuzv9aVVX/g2jm8hqAoiiPA//pKMMkMDjMk+8eHsN3h/PwcrbjluFhvPRDUpt5ibmVJscPUN2oY9nBHOxttSzYmo7OoDK0hydRvs6mef2D3Zg+MJAnlx0xhZZUFd7beJKCigaT4weoadTRP9CNozkV5JbXo1HgySt64efqgETyhwkbCROeE+JshmYYfDvUllnOaagU0g9Dbu8aG62Ac3X+d9LW0d/VzpjkItM/2J3+we6mENBDE6LYfqKYGmPsfnZcCIHubZ1wVUMzS3eYN30Tssp5fHJPvF3sWJ90BjutwtbUIoqqGy3Oa9IbcHVs+8+if4g728ZfxtHcCoI8HOVmr+TCmPg8jH0SDHqwd4Ftb7Wd4y1zSjqS33T+iqLcAtwKRCiKsqbVW25AaUcaJjHzt3XH+cqov3PbyB5sfXoCK+Jz2XGymPSiGnr61dI/yI2kfJF96+Vsx8Bgd5Zi2Z83o6SGxftLTSv97Wkl3D4yjIQss2rioBB3Hrksmn2nSjlklIKYMTCQsdE+KIrC0B5enfGVJdaAbasFxMgHxV5A1m7Q2ML4p8G3V9fZZgX83sp/D2Jz1wdopbVKNZDYUUZJzGxNLeLTXebuW5/vPs3wCE++3pdFgVGR81B2Ba9eHcN9jnZU1TczpZ8/TvY2vLE+leoG86ZxiIcTJTUFFtevbdTz71mD+CX5DBE+zjwwIYrUM1XMigvl4cuiCfdxItrPldWHctmRVkzvADfuGh2Oo52srJRcAHodZGwVcs7Rl4sY/90/CVVPBw9wkouMjuZcKnyzgFGKovgDLbqsKaqqtk1FkVx0WlfttrA9rdjk+FvYklrMjAGBvLf5JH9dd5xrBwfz5d3DWbDtFGcq6xkW7sXkvn78b/spi/OCPBy5YWgINwwNAeCfv6Ty4VYxx1arsPCOODanFPHm+lTjGfkczi5n4R2yjZ7kPNE1wqKrIM+o1eXTC+7dCI4e4NWOsKDBAJqL1XpE0sK5VvjOAg4As4DZwH5FUW7sSMMkAhFusRy7oq8/Nmfl1/u42DF/dSJltU3oDCorE3I5nFPBiAgvkguqWLQnk7u/OMj0AYGm6/UJcGXOyDASssooqWmkuqGZT3aYnzKa9Sr/3ZLO8njL8NHGlEIq6pqQSM6L1HVmxw8idfPIkrbzakvgmxvhrycZJZYAACAASURBVF7w3+GQvb/zbLQCznXD90VgmKqqRQCKovgCm4CVHWWYRDAo1IN3Zg/i4+0ZqCrMHR/JpL7+zJ/Wh7d/OUGTzkBMoBsjIrxYfSjP4txD2eVsTik0pYZWN+ioamhmxzMTKa9rQqsoXP3f3RRXN2Kn1fDs1N40Gyzzr+ub9Hg521lkEznZanGwlWEfyXnS0I4yTF0pbHwFMndB8BCxIbzhRaHhA1ByQhSGPXFMirldJM7V+WtaHL+RUi5eC0jJ73Dd4BCuGxxiMTYrLpRZQ0Mprmkk2s+FouoG7LQamvRm5z0oxJ0fEy1j/CU1TYR6ORHq5cTdiw5QbMz2adIbeG/TSaYPCGRdq3PuHN2DMC9n7vniIPXNehRjmqd0/pLzpu/VsOVvUGfsPW3rDKXpcPx7cZwXL6p8S09anleVB1X5Qu5ZcsGcq/NfryjKL8BS4/FNwE8dY5Lkt0jOr+SxpYc5VVxLb39X/nur6Gjk5+rA/24bwts/n6CsronZcSHcNzaSHWklFg3erxoQwP6MUvoHu5NfYblvUNOoY/7UPozr6cOJMzVM7OPLuJ6iuOuTO4byxZ5Mov1cmD1M/ueTXADO3nD/FkhYBLomGHonfD7Vck7azxA7R9wUWvDoIfV8LiK/2czFNElR3gL2A2MRohs7gJGqqnZaBy/ZzEUw/f2dJOebH5uH9vBk1UOjf3V+dUMzn+48TXpxDZ5OtiyPz6VJZ8DNwYZpAwJZdtAczx8W7smKB9tea19GKXM+3W8q/OoX5MbaeWOlro/k4vHJJMhLMB+7h8GDO2DdX+DkBvDtAzPegcBBXWfjJcr5NnNp4Qqjo2/poIyiKK8h2zd2Kqqqtsn+Sc6vJCGrnH9vOEFJTSM3DAnhgQlRpvddHWx58ope6A0qI97YRJNOhIWqGnSkFlTx8owYtp4ooqefK/MmmYtqMktqqaxvZmCIO0v2Z1tU/CbnV5GQXc6w8HNLxyuqbiC7tI4BIe6yK5ikfaa9DUtvEXIODu7C0Tt6wqxFXW1Zt+X3irweAh4GIhVFaZ3X7wrs7kjDrJEW5+7pZEeQR9sKWkVRGB3lze50c33diAhv7vr8gEmt8831qXg62zE7zjI006QzUFprmaFzpqqBe8ZGcM/YCIvxF747xuL9orNS30A3+rSj6e94jjH/r/Zm8te1x9EZVHxd7fn63uH0CXA7p3MlVkRInGjMcvAzEdpx8T/3c3e9ZxSE84ZJL0LEuI6zsxvxeyv/JcB64E1gfqvxalVVy9o/RXI+lNU2cdun+zleUIWiwP3jInn+qr4AVNQ1odUouDrY8s7sWF7+IYnD2RUMC/diSoy/SZa5hW0nxN78h1vT0elV7hsXwd1jIri8rz8bjxea5l09KIikvEoWbEunql7HzcNDCfNyMjl+gJSCKuJ6eOLuaEtlfTMAV/bz/1U1z/LaJo7lVdIvyA17Wy1v/pSKzvjUUFzdyL9+SbNoJSmRAHD8B7Ow25lE0b/3iWPg0Gqh0FQL658Tap8+PWHaW1B8Aja9It4vPQlLZsOTybJI7Bz4vSKvSqASuKVzzLFePtuVYQrpqCos3JHBtbFBfLYrk+8O56LVKNw9JoLnr+rLx7fHcSy3kuqGZoI8HNAo0LrJl7ujLc+uND+ovbb2OD39XHn3plg+3JLOsbxKxvb04cahwUz813ZTFfCu9BIendRWT6VRp2fb05exObUIP1d7xvX04cDpMl5dk0xueR3TBwbyysx+7E4v4eHFh2jUGbCz0fDyjJg2vYULKus74Lcn+dOTc1BU84aNMhds1ZZAZQ4EDISUdZbzGyqE1EPvaeaxrW/A4a+N19sPS2+FHmftUTXXQdYe6Cvbj/8e59zM5XxQFCUU+AoIAAzAQlVV/6MoihewDAgHMoHZqqqW/9p1rIHc8rZOcfWhPFYdygXAoFdZuCODib19+XpfFj8dOwNAtJ8LT03pzYdb06lr0jO+ly8hnm1DRnszSkytGyvqmnB3ssXP1d5C/gGEc/ZytqOsVYho+oBADueUU9ekI9LXi0adgblfx1NRJ54Elh7IwdfFnrWJBTQa9xSadAb+t+0UsaEeHMmpMF1r5qCgC/xNSS4p9DqxGm9pwRgYC3etgyNLYcML4obg0QN6Xdn23LOrfTN3Wh5X54Pr2eEhBfxjLpr53ZkOdf6ADnhKVdVDiqK4AgmKomxEKIJuVlX1H4qizEeElKx683j6gEB+OGJumOLjYtduW+tfkgtNjh8gvagGnV5l9/yJbEwuwsvZDif7tvH4MC8nnluVaArB/JhYgEc7Gv+Rvi7cOrwHC3dkoDMYmB0XyqpDeaZmLm/8lMJrV/czOf4W9p0uo6jKMnW0uKaRNfPG8OHWU6QX13BFXz9uG9njnH8nkm5A2s+WvXcLjsDBT2Hrm8Lxg2jiUn0GQkeIFb3GRih++vYW0g6ZO8GgEzeOgqPmazl6wbinoTxLhILsnGHiC+1LREja0KHOX1XVAoQwHKqqViuKkgIEA9cAlxmnfQlsw8qd/5R+Abx/y2BWxOfg42LPIxOjKKlp4rNWom42GoUwr7bdi/Ir65n10T7Si2oAodX/xOSefLrrNDqDgTtGhuPv5mBy/C0U1zRy1+hwvtqbiUGFuB6e1DfpuGnhXpr1ogdAoLujRRevhmYDG5MLcbG3MclKA8SGetDL34Vv9pn3C66LDcbbxZ6XZ8qVmNVSW9x2rDxLNG9pTXWBqOrd/7HY7B1yp9AA+vJqyNkn5vj2hcjLIGMbeITBjPdEbP/mxVBXJlRCbaXU+LnS0St/E4qihAODEfUC/sYbA6qqFiiK0m5rHkVR5gJzAcLCwjrH0C7k6kFBXN0qLBLtB/+aNYhFu09jZ6PhkcuiGRnlzYJt6SZZZq1GwdXexuT4AZLyqrh3bAQPjI/k4+2n+GZ/FioqTnZa6prMMfhQLyfSi2roE+DG1P7+XD0omIn/3maSg0jIKmfpAbMzb8EAvH9LLC99n0xBZT1X9gvgsck9sbfREOblxMHMcoaEeXLP2PAO+T1JLiH6TBeyDY3GxutaOxh+v1jhFx03zwsYAF9fj6lvb/pmkbnT4vgBilNg2L1wyzKwscdC9Epu8P5hOsX5K4riAqwCnlBVtUo5W6nsV1BVdSGwEESRV8dZ+OflxqEh3GhU3GzU6dmZVsIL0/tyKKuCmkYdNw0LtdDjb+FYbiWf7840HX+y8zRPT+nFL8mFFFU3cFX/QJbsz6K+WcTojxdUoTfA2TV/lfXNjOvpw86TokpYq1G4c3Q4E3r5Mmm+Pzq9ARutWelj7vgo5o6/yL8EyaWLix/cuwH2fyTCPHH3gH8/mLMStr8lJJz7zoS8w1g0bK/Khdx2ijrry8FWdpC7GHS481cUxRbh+BerqtpSJFaoKEqgcdUfCBT9+hUkIFJBr1+wm8zSOgDGRHvz1T0j0GoUgj0c+WjbKVOuv5ezHS72bf9qm/Qqax8dC8C6xHwW7cm0eD+nrA5PJ1vKW8Xzr4jxZ2r/AL4/nEdOeR3T+gdapHm2dvwSSbv49YGZ71mOuQfD1e+bjze81Pa8PtMgaYVo6QiikXv/G8Q+QHU+uAZKkbcLoKOzfRTgM4T+/zut3lqDaA35D+OfP3SkHd2BJfuzTI4fYHd6KTtOFjOxtx+hXk78MG8M3x7MQatRuHV4GCU1jby/Jd3iGm4ONsz9SqymLo9pW0TTO8CV+8ZF8t6mNEpqGrl+SAgeTrbc8fkBmvUG7h4T8av5/RLJHyL/MBxfI0TaBt0Cox6BlLVQbtzjGnSraPJy3xaxQWxoFk8NTbXwwWAozwS3EFEBHDq8S7/Kpco5afuc98UVZSywEziGCBUDPI+I+y8HwoBsYNbvFY1dito+tY06GnUGvJztLvhaf//xOJ/sPG0x9s7sQfi5OlDTqOOy3r5tlDYX789iwdZT6A0q1w0J4tOdp2nWi79vO62G2cNCWHYwh2a9yriePjw4IYpVCbnY2Wi4Z2wEtloNV7yz3WKjeOWDo4g7R1mHX6Oyrhl3J8tMo+LqRlwdbKRaqDWQvhkWzxIN2gEixsOda0HfLCSdnX3EHkB7fDLZsheAbx94ROr8/xYXqu1zXqiqugshBNcekzvys7uaBdvSeX/zSRp1BqbE+POfmweTX1HPK2uSOXGmmvG9fHn16n7thmfa4/ohIXy5N8ukzePras+K+Bz2Zoh7ZoinI6sfHo2fqzkeOnNQEDcMCcHBVsuHW9NNjh+EhHOIpxMHnr+cumY91Q3NXP3BbpMk9I+JBcydENkmQ2hzatF5O//UM1XMW3KY9KIaov1c+OCWwQS6O/DgNwnsyyjD1d6Gl2bESNXQ7s6BT8yOH+D0Dig8LiSbW/T8/fuLPYIfn4JjK0SIZ+qbUJRiea3iE7LT13nSadk+1kRaYTVv/3zCdPxLciHf7MtieXwOaYUiK2dlQi62Wg1/u7Y/KxNySMytZGSkt6kIymBQSSuqJtDdEXdHW/oGurH6odEsO5iDk72WPgGuPLnMnPOcW17P4n3ZPHlFLxp1ep5ekciPifk42Gp5fHLPdgu/QjwdcXGwwdPZjn/+kmXRC6C6UWdR6NVCtK+LqQeAr6s9AM16A+uTznDGmPnTw9sZg0Fl9eE8DmWXMzzci2tig3hu1TFTVlJ6UQ3PrUpkWLgX+4w3sOpGHS9+n8Tkvn54u9hf0N+B5E+MTTtPwonLYHerfYHRjwqBt5aK3vLTsOJuiJoEJ340z4ueLB3/eSKdfweQVljdZuxoToXJ8bewO72EV9YkmXLjF+/P5nRJLdfGBnPnogOcLqnF3kbDq1f345bhYcQEunH9kGA8newsZJ1bqGowVtzuz2atMTe/rknPm+tT+emxsUzu48fmVLG3PjrKm4+3n2LeksMMCnE36fa3ZnSUD6oKX+/LwqCqzBgQyJ5TJTy98igKcMOQEN66YSD3fxXPthMin/vfG9JYOnckPyUWmBrPL9mfTVphNcfzKy2ufzy/CndHy/BPk97A6ZJa6fy7M6Mfg5MbhRQDQP8bxeq+NQc+bSvQpqsXGv9OXkLCISQOrnyjc2zuhkjn3wGMjPTGwVZDQ7N5JT2lXwDxWeUWjdf7Brqy/GCuxbmL92eRXlTD6RLRNrFRZ+Cva48zOsqbB75OIPWMuLHcPCyEQHcH0/XstBquGywaXaQUtL35nCyq4bO7hpFeVA0oPLvyKMfyxA3kaG4lGkVhYIg7ibmVxu/gxZbUQoqrm3jj2gFM7OvLoaxyHvzmECCS8lYk5NI7wNXk+FvsXbQ7k02tBOTE98pmTLSPxdwx0T6M7+VrSiMFUdksN5W7EQY97P2vcPa+vWHCfOG058VD0irwioDe0+GDIZbnaW0hZLjQ8m/BxlFo+ZxJBF2DqBPIPww9r+jc79RNkM6/A/Bxsefzu4bx3qaTVNY1c/PwUGYOCsLX1Z6nVxwlt7yeIWEevDQ9hn0Zu2iqN98knO1tyCqrs7hefbOehTsyTI4f4NuDuXxx9zAOZpZRXttMVlkt1364myAPR4tCMQA7Gw1JeVX8a8MJvJ3tefbK3hzNtVyFJ+VXcuL1aSRkl6NR4PFvj5jCMZtSClkwZ4jphtSa7LNsBTCoKs72Nhaibi72Nrx940BeW3Oc+Kwyhvbw5NWr++HjbE9Ng441R/MI8nDkual95Kbvn4GaIpFaae9yYdfZ/jZs/4d4nbkTzhyD27+H9c8KSQZbJ1HZO/4Z+OERTLn+4/4CIx8Wwm/HVoBrAFz5pmj+3nK9qjz4dg48mSTqCSR/COn8O4jRUT6MjvKxGBsZ6c2OZyZS26TD1air89SUXrz8QzIgCqieuqI3WWW1HG0lhtbL34X6Jkt1TIBmvcozV/bhjZ9STBr/ueX1fLU3i2eu7M2qQ7m4OdgyJMyDT3ZmAJBTVs+9X8YzJMyDg5nm4rAREd5UGJu3HM6uaCM098ORPB6f3It/bzhhUhDVahRuHhbG6ZJa0+rdTqvh7tHhjIv24f++O4aqgkYR39PP1YEP55y1wgMev7wnj1/e8w/9fiUdRGMNrLhLNE63dRJaOaPnnf/1WvrytpCzH3b8SzhxgOZa0ah9XjzM/griF4knhLh7RBVv7K1C96e5ToR9Tu+wvJ6+EbL3QczV52ZP6o9wZAk4+8LYJ8Az/Py/2yWOdP6djMaoy9/CHaPCGRnpTWJuJcPDvQjzdsJgUNEqChuOFxLu7cxfpvQivaiG1YfzTOf5uNgxJtobgCPZFRafUdOoY2y0D49MFPLM85Ycsni/vlnPjUNCcLKz4VBWOQND3alv1jHk9Y24O9ry8GVRnI2/mwM55XXcNCyUlIIq7LRa7h8fSUyQG5/eGcePiQUUVDYwtX8AUb4uxIV7ERfuxZGcCob28CTCx9nieoVVDXg42crOXn829v1POH4QDnfDi9DnqvMXS/MIg+JU87G9G1RknjVJhRM/iabu+kbI2CKeEm5dBl9dK5w+iLFh91meqmh+PS30bE78DN/eaj4+uQEePWS1FcPS+f8J6OXvSi9/c7csjUbhgQlRFu0Ygz0cWXj7UJbH56IooDeoPLL4ELcMD2NEpBcHMs1lEh5OtvTyNz+uxwS5sS6xwHx9Bcb09OGm4UIv6fV1x00CcpX1zbyzMY07RvXg631ZqCqEezuRUVzDV3uzAHCy07L8gVGm2Ly9jZbrh4S0+V7Rfi5E+1mGDYqqGpj7dQJHcipwd7Tlb9f2lzLPfyZa6+0AoEJBIiR/B2eSRHZN7Byhq5N/2BzL7zPTnHXT3CCUObU2cPmrItRTXQA2DjD1H+K9pFXmj7B1gsIkS7G3wiRxI9KdLXWugYE3i1CQgxtMfkXsGzRUClucfSBigrCvKh8SvhTXjb2t7aZyVZ7oGRDdrbPOfxXp/C8hpvQLYESEN+Pe3kKVUYd/64livrhrGLcMD2VdYgGhnk6EejkS+9eNuDrY8tzU3twzJoJjuZX8nHwGV3sbJvXxY86n+1FVuH98JKlnLDOHGnUGrh0czH1jIymqbsDeRsvM/+4yvV/XpOfzXad556bYP/wd/rXhhEnfv7K+mfmrEpnYx++c6x0kHUzPKZC82nxs5yrCJCd/EcfJq0UYxjtKpF62xOgH3wYz/gPrnoCjS43yyi/CiLlwzwbY8bYQdQuKFdo+NYXG1oteIrSUsqatLR7hbcc8w6EyWzyJBPQX9paegs+mQJ0xcaD3dLj2Q1g4EWqM8ucHPoF+17W9nmvgef6iLn3k/7hO4nRJLa+uSSb1TBXjevryyswYXB1s2Z9Ryra0Ynr7uzJzUBBazW+L3m0/WWxy/C1sSCnkzesH8ub1A1m8P4sXvksCoLGmkedWJTIiwpv/3TaU6oZmUgqqmP2xWSnxpe+TuG1kmEVfYB8XO+qb9Ly//yRuDraMimpb1NW6JuC3OJZbycc7TtHQbOD2UT3apLvWNunJK6+ndzt9giVdQOwtUFcKRxaLVfTox2HxjZZzjiwRzdVbC7EdWQI+vcx5+Q2VsP4ZCB0mNmWrjCHLI4vh/q3i6aGhAipyoKkGhj8AicvFGIjV+/D7oOQExH8GqkHIPZScgENfijmlJ6EyF4IGmx0/iDqAvTFmxw/iM+zdwDsaSo2yJ8PnWnXjF+n8O4mHvjGnaa5MyMVGozAs3IunVpgLtXall/CvWYPIKq1lQ3IhAe4OTO0fgG0r8bTgdhq7ty7gOnxW/N+gwpHcCsK8nXB1sCW+HQVQb2d75k2MZm1iPsEejlw7OIg7Pj+AvqXxy7F8hoR5cMh4bVutwmW9/fho+ymifF2Y3McPRYF3Nqbxzb4sXBxseHpKb0ZFenPTwr0mGektqYXMjgu16OwV4unYJjQk6WJGzzNv8jY3gJ0LNLVKH3b2aSv/iiJCNWdzeLHZ8YNI0TyyGDK2Q+ExMXZsOdy4CB5NEBvBTj7Qa6oI3fjHiDaPjp4ihLT0rI6yuQfFvsK54OoPjxyAnANiw9enbctSa0I6/06guLrRIk0ThKM/lmeZbrn6UC4zBgbywNcJpnaIk/r48fldw6hr0rHtRDGuDjbcMiyUpQdzABgQ7MZV/c2PrsPDvViZYK4d0GoUiqsbuH7BbpztbRjfTjFXL38XssrqGBPtw1X9A1mfVGBy/ACFVY28cFVfbhiqo7CyAW8XO55blWiac1NcKKOivPnAKCRXXtfMk8uO8Jcreln0DzCoIuXz4cui+Dn5DBHezsyf1ud3n3YkXYitA1z+ikjNVA1g6wyTXhIr++W3izGAoXdCjzFi9d6CxgaC2mZ30VRrdvwtHFkM/a8Xuf2OnmK/IHEFrHvSPOera8SNoDLHPOYaBHH3igbwBuMTsU9vGPMkpG8S+xIgwkWxtwkV0B6jLvjX0h2Qzr8T8HK2I8jdgfxWBV79gtworLLsZqTVKCw5kG1y/ABbUovYnV7CMyuOms4fG+3D9qcvY+HODFYk5DLx39u4MiaA926O5cahIWSU1PLtwWzcHW2ZOTCQ19eZ9VAOnC7j3jERLD2YjUFVuXtMBJ/tOm1a1S/Zn81VAwLafIdAD0euNmr6zP54r8XNYUVCDs0GyzCQQYWqesvwFIgGMneODufZqX3O+fcn6WKG3y8KqYpSRKvFlsYpD+4ybvj2ET14FQXKTkPCF0KaYdILYgV/ZDFkGfeM3MNgyB1iTuuwkb2biNHnHxIZPKMfMyt8tlBXKuL2lbki5OPsC9d8AOFjRM+AxOXiqSTuXrB3hns3ioweXQP0mgZ2bbvgWTPS+XcCWo3COzfFmgq8BoV68PLMfhzPr+LBbxJMjvSu0eHkVbRt5L72aL7FjWNXegk700tYvN/cZevn5DMsPZDN5D7+1DbqGNfTl1lDQ9icYllp26gz0CfQlaOvTAHgVHEN/9t2ymJOWW0z4d5OFr0DPtmRwd/WHefawcGcrQSrAv2D3Fh9yPx4rygwKy6EwuoGU2/i4RFezIprmxUkuQTwDG+bE+/fT/y0ZsIz4qc1d66BU1tF3L3nFOGERz4M+z4U7zv7itV+y0azahA6P0PuPMsIRUg+DLlDrP5dAsw6QaWnRA2AahDjQ24XVcJ9pl+EL989kc6/k2gp8Kpu1Jn0bII9HNnw5Hh2phXTK8CV0VE+HMwsY9PxItOG6riePu1WvJ4qqmkzdrygiv9tO0WRUXhtXWI+d4xq2zC9tlHHw4sPoVUUrhkchKJYhnB9XOz46p7h7M0oxdFWw8OLD5naRh7NreSOkT1IyCo3FXtdNziYu0ZHkFNez9ID2Tjb2fD0lb3p6e/Kf24ezOOTe9LQbCAmyO2CfoeSSxBdI/w8X6R2uoeKp4aI8TD1DRg8R2z4RoyDtU+0PTd8nEg9zT0IGluY8JzI8qnMhaPLRAgndo5II109F9OTxJp54NMTwkZ26le91JDOvxPRaJQ2QmZRvi5E+Zo3PIeFe/HT4+P4OamAIA9Hpg8M5GRhDUv2Z5tuCMEejtw9JrxNiMjXxd7k+EE49NoGPZf39WNTShE2GoWrBwXxtx9TTFLNW08UcW1sMN8ZC8hcHWwYFeXN+5tPMjDEHVuNYnL8LeRXNrBm3li2pBYR5evC1P4BaDQKr8zsx8szYji7TWekr9zQtVp2/wfiPxevGyrh29vgL8dFb94jS8Tmrn8MxFwjNn5bcPSC3tNg4CyxqnfwAGdvqMyDj8ZBvbGu5cBC4xPCWRvQp7ZK5/87SOf/JyTaz4V5k8xyB/2D3Vn98GhWJuTi5mDDbSN74OfmwKK7hvHe5pNUN+i4dUQY/YPcWHBWCMfLxY5IP2ccbbVc0c+fjOJaC43+Rp2B/sHu3DGqBznl9eSW1ZlSRQHmjAhDo0BrWf8oX2f6B7u3K8B2rv2ZJVZC1m7L48ZKkar5ywuYHHbqOlFpe91C0bXLxU/o/di7iD2FXe+JzdyRD4mev/Wt+j5VF5jTQ1tzrlW/Vox0/n8Cahp1rIjPobCqkRkDA9t1qu052xGR3jwB2NtqGNpDbMJdPyTYFHuP9HXmYGaZKf1zbWIBtw5v2yglxNORwWGeDA7zZMw/tli8t+pQLs9O7cM7G9No0hkYHObBgxOiSC+q4a2fU8kqrWVqvwAem9xT9vOVtCV4KGRsMx/bOhsbs7du1p4HaT+LTeDcAyLE499f/Ln0ZnMWT85+GPFg288IHARjHod9H4nrxt0jY/3ngHT+XYyqqsz5ZJ9JZfPTnRksuX8kUb7OvLspjZSCasb19OHhy6KxszE716qGZm76eB8pBaI6d1xPHxbdNYx3Zscyd3wk5bXNeLvYMeVdSyGsU8W1jO/ly440Ia08Jsqb7w/nsmj3aWbHhWKjtVy522g03Dc2gluGh1FR10QPb2f0BpWrP9xFTpnYnE4rTMfORmPxtCKRADDuKZEBdPwHcAuCaW9D9t628zK2QfYe8drQLJQ79Y1mxw+AKuL8HmFQYUx28O0jQkZ2twq5aFRRXSz5XaTz72IOZVdYyCvrDCrf7MuiqLrBJKmckFVObaOOF6abqxFXxOeaHD/AzpMlbD1RTISPE+uOFuDhZMuE3r5tQjYeTrZ8fHscaYXVNDTruf2zA1TWiyYw+zLKuGNUD5OGD8CDEyKx0Wpwd9SY9itOFlWbHH8Lm1OLeGRiNOlFNfi62uPhdOF9iyVdTFMtaO1Fzv35Yucsmqwb9MJxg2jT2LpZ++DbxGedjaadf0OBg8TGb+IyY2XwXLOzP99UztJT4hqubVOcuzPS+XcxDrZtQyUaBZPjb2F90hkL519W23j2aSTmVjBvySHTJvCygzncNTqcz3dnAqLAKtLXhf9bncioKB8cbDQmx99CTYOOdY+OZV9GKQOC3RkRKZRDv9h9mg+3iWbwt40Mw9FWa6HXH+LpyLT/7CT1TDV2NhqevbI39407TyVISdfSVAvfPShi8Y6eMOXvQvYBoK5MOHGHP9hwR9Mqy5xmuAAAGihJREFUY801AOYdFN24nH1Eumjy95Ybvg7uMOoRqCsW4SBVhQE3Qr/rxZ7BhheN1cJL4baV51blm7kLUn8SukSxc0Qf4W9vFU8dikbcSKa99ce+1yWMdP5dTL8gd66I8WejsfOVq70N942LZOfJEkpb9dAN9XTiqeVHWXs0nwB3B+4dG4GdVmPKAHJ3tKWgst4i++dkUQ3PT+/LdYNDyCytZVNKoSmnf+kBcWM4mzBvJ9wdbSmva2LHyWKCPBwpqWnk1bVmtcf3N6czd3wESw/kUN2gY0CwO3ZajamKuUln4B/rU7k6NsiiobzkEmHPB2ahtbpSWPOoSM/c8rpYcStaGPEAXPl3MUffLFb2f0QaWWsLkRPMx/2uhYb3hdibs49Y3Tu6Q/8bwMkbQoaJQrL6cvjleeH4QWj9bPsHXLvA8vp5h4QEdGCs+JykVbDyHvP7JzeKFNOW/QjVAPs/Ei0lQ4f9oV/XpYp0/n8CPr5tKNvTiimsamBklDfBHo68fm1/nllxlNomPQFuDkT4OrPEWNSVXVbHWz+nsujuYaxKyKVJb2DexGhWtJJ1aMHBRsuAEHd6+rvw5LIjFu9tPF7IgxOi+GRnBnqDyuAwD6YPCGTGB7tMTwRL9me3Wytgp9Vy8IXLKa1tItjDkds/22/xvs6gklteL53/pUjBUctjQ7NIqTy6VByrBtGasdeVkJcgmrPom0Tx1bR/ivdT10J5pqis9WunmrumSFTfuoeYJZiH3il+Wtj5b9j8V/Px9H9D6Eiz42+h1DLDjUNfi1z/FibMb9sEJm39/7d35+FRVecDx79nsm8khARIgIQQCFuQxQQIiKCAAqKoqIBWwQWRWrSt1Wr5WRW1bq0LbkWpVkXrriBCUXaQfTOBsCVACHvIQkISsp7fH2cyk5sEsC1hYOb9PE8e5965d+aEZ3zn5r3nvK8pCV1XXqYEf3H+2GyK3nHhPPjpFh77Jo2wAB+mXtOFtVOHkJVbTEKLEO7+YIPlnJLyKrZkF/Bj+lGKyipZuzePp0d15etNB8gvMYE7KdZUXiwoKSfIz5sAXy+KalUEDfH3ZvLAeLyU4khhKXf2j2PpzhxLKii/pKJeFVGAXrFhfL3poKOMROeoEEsv3lZhAXSTXrwXp7iBprlKDd8Qs1irrt0/mL8Saqyfaco/7Pje2cFr8TPwq6+g3SDncYdT4f0RzmJx3W6B0e+ax0VHTP7dN9hM8axtxSvw2zQIjzdBukbC1SadExRpAvfKl63nrXodYvpY9ymbWUeQ+plzn08QxF95un8VtyPB/wIxY1kmC+2lGPJLKnjs61QGJkTSNTqUiqpqkmKbOmboAAT5evHBqn0UlZnAnFNUxswVe1n00CB+TD/CoYJTzFyxh3HvrsXfx8arY3ry+6EJPGVP33jbFA8O7sDNM1Y5yizP3nKIuy5rW29snVqG8H/XdObNJRlUVWvuGdCO8spq/vSNszjXxn15PHx1RxbvOEZ0WAC/G9LBUo1UXER6TzT19lM/g+AWMHSauTJf+7bzGJu3CZZ17Vlmbd1YXQmr37QG/1XTrVVC0z43+f1F0yBzkWn6MvARZ9G4GrrKzPPvdrNZJFZebFYBr53hLN/c7RaTgrKcV22mgmatcv7VkHyPqRNUXWVaR/qFmJlJHtQLWIL/BWLbIWtDlYoqzerM48xas591+/JIaBHMVV1asDLjOC1D/fnjsI5M+sjanjErr4TwIF/GJMcw7NXlFNsrap6qqGbad9tY9dhggv28Wbn7OKN6RuPv7WWpr19ZrTlaWEaH5sHstpeP6NQyhJGXmD4DTQN9OVp0ihGJUbyxJMPy3iUV1cRFBPHV5H6N8c8jziebl6nkOeQJ6/7r/26+ALz8TKAMb2eatNQO0m16w5ZZ1vPqln8uL6n/nps/MoEfTIBe9LQJ0OvfdR7T4Sp4q49z+mefyeYLoHbd/rTPTUXPn15x7utzr/nymbIRMhaZmv5t+5vnut1kfjyQBP8LRP/2ESzaccyxHeLvzVcbDzjaM+46epKqak36tGFUVWtyT5YxMCGSZbX+Ghie6JyqdviENS96tKiMv/2w01F2eU7qIaZcUb+eeWiAD99NuYwlO46hlOKKTpH4eXsxZsZq1u41Y3lt4W7GJNdfLBYvZRzcW49xzlk/NUbPhGUvQkWpmS1z6XhTSrnmhrHN29TZmTHQXF0PfASS7zI595ovjTZ9TOkHCw3xV5hqogc3Qmw/c2O39rz/9e82vJgrfhB0GGLy/FE9TA9iMPcXat9T8HAS/C8Q4/u1Jbe4jG83H6JFEz8eHd6ZX3+80XJMZk4xi7cf40/fpHGk8BTxkUFc3yOa/XklhAX6cCi/lJcW7ODey+MZ1SPaMl9/eGJLZq5wlsjVGr5PO8zgTs0dXzrhQb7c2T+OnKIyFm4/RnZeCYdPlNKjTZgj8IMpCVFQXO5YLObrZWPyoHjpxuWJEkebn9pueh+2zzaLu/zDYN5DzucOrIcHfzatHdO/NQG55+2wc761x65viOkPEBBmcvoAC5+yvk91FXQdbdYM1HyRhMeb87x8oO1l5/73dSMS/C8QXjbFw1d34uGrnTMjeseFMy/N+Sdt1+gmPD57K0cKzVV9Zk4xrZoGMjwximfnmZr9C3ccY2NWPh/e1YeWof6s3ZNHjzZh3Nm/LT+kL7K8Z2W15t07kvh2y0H25JxkfL84mgX5MvjlZew9bhbdrNuXx8QBcfXG6+1t48O7enOooJQgX29CA33qHSM8lJe38wuhdjMWMCmdzCVQUWzaKdq8zGyiS24200o3zzIF3K6YagL/9rnOm8oxfU29/xrdboauoyDoO/j5U5Ov73OfCfzirCT4X8CmjUqkqlqzKiOXrq2a8NiIzox6w1ooa+eRQnKKrDMx1uzJI7e4jP7xEbRuGsiA9hGEBfpyR99YZq50Xv3fM6AdbyzJ4NWFu6jW8NWmgzx1XVdH4K+ReuAE/eKbsSrT9Pn197FxZz/zhRDdQFtJIRwiG5jmeXCDmRkEpqbPwU1wx2wzNbQgy6zcLToMR9Ks3cIOrIPr3oS8DPO6ifZcfdvL5Cr/vyDB/wIWEezHjNuTLPt6tAmz9MAdmBDJkcIyth92HuPvY+PNxRnMsq8LCPbz5pOJfZh6TWdimwXyU0Yu13WPIqltOCnPL3aUfzh84hSzNx+0LB4DiAkP5NkbujF/62GOFp5ieGIUbcKlK5L4BXqNN7n3HXPBy9d06Ko9jRTMYqyVrzhnE5UVwpd3Q78p1pvJutrc3B3y5PkavVuT4H+RefO2XjwzN51thwrp3z6Cqdd0Zm9OMakHCigoqcCm4L7L45m+eLfjnJNllby1JJP+HSL485xtaG3aQ/5pRCdLO0aAvJIKHhvRiefm7aC8qpp2kUE8OKQDvt42RvVodcaxZeeVkJ1XQq/Ypg02oBFupLwElr9kirS1TjYrcv0auOHv4w9jP4aio+DtZ1I5hzabJi01fIPhSKr1vOqKBprEA5EdGx7P0XSTMvINNFU9m0RD1mpY+hcoyTedvfpM+u9/Xzek6rbku1AlJSXpDRs2nP1AN7RhXx7z0o7QumkAY3u3IdDXG601G7Ly8bIpesU0paS8ko1Z+bSLDKa0vIohLy+zvEZKu2akHy60LODq3DKEag07jzrnXL8wuhtjkmPIKy7naOEpOrYIwVanwfqxolN8ui6bkvIqbrq0Ne2bBzN90W5eWbgLraF5iB+fTOxL++Yy+8dtfTMZfv7EuZ04Gm56z1zlZy42JZm73mBy+sczzM3cgDDocauZq//RjaYuv80bhj1vrurnP+J8PZu3uTG87AUT1MGce+3rJi20+wezqCvucsjZCe8Mgkp7scGQaLhrAbzV19xbqHHT+6ZJvIdRSm3UWifV3d+oV/5KqfeAkcAxrXWifV848BnQFtgH3KK1zm/McVzMFqYfZeJHGxwXQQu2HeGfd/bm1plrHHX6+7dvxvsTejOgQ6TjvN5x4ayrNUPn5qTWPPyl9erqRGkF397fn7eXZZKdV8qIbi25sZfpsRse5Et4kC/5xeXMTT2El83GyO5R2JTi+jd+cvQU/nD1Pj64szfTF+12jPFYURnTF+1m+riejfSvIlwufXb97Y0fwHcPOPftWwHJE2HmEGdg3vgB3LfCNH9f9TqEtDA3bn2DTRDf8rGp5TN0mpkJ1O1mky5q1t7M+y/Ign8MhWL7FOdOI02htspaVWaLDsGat6yBH8wXhgcG/9Np7LTPP4E3gA9r7XsUWKS1fl4p9ah9+4+NPI6L1qy1WZa/ftfuzWPG8kxH4Af4KSOXH9KPcFWXlqzfl0eLJn68NyGZWWuyyMotYXhiSy5PiGTl7uN8vdnZZH1McgzNm/jzxLVdKa+s5pnv03l6bjrRYQE8PrIL7ZsHM3L6SsfsohnLM5k8MN7STL6kvIovNmZbuoMBHC2sU39FuJemsdbUTVgMrHnbeszmWSZ1Uzsw52w3XxQ/PgGF9lpUG96HScth5Mvmp8aWT+Dbyc7tAxvMF0Oxc20LO+aaNE9dzeqvYWnw5rMHa9Tgr7VerpRqW2f3KGCQ/fEHwFIk+J9WoG/93HlRA7V2dh89yXPzlnKwwPyPNqFfW568ritg0jRzUw8x8fI4EluFknbwBCdKy/lqUzbr9uXy2PDOLNlxzLEuIL+kgns/3MCkgfGOwA+QlVtC+uHCeu/dOiyALlFNLM/d0PPM9wfERW74i/DZr0wLRb9QGPFXWPik9RjlZVYD17V/tTPwg7ma3/Y1xKSY9FBghGnuvnaG9bytX5lUUl1xA83K3QL7upaYfnDpBNMyctmLZnpph6tN2Qrh4Iobvi201ocBtNaHlVKnLaahlLoXuBcgJuYX1Ot2Q/cNjGfZzhxHqYZbklpzW58YZq3JcpRvDvL1Yn9esSPwA/xz1T7uSInlUMEp7v5gvePYqSM6k9AihBf+vQOA/XmlTHh/PQktrPn5wlOVHDlR/+o9oUUIPWPCHH95tAoLYGyfGG7tG8vfl2ayP6+EEd2iuF6Cv3uLGwAP7YBj281NWN8gk4v/YoJzhk7K/eZG69YvTClmMAG+af11I+RnwfxHTfcuMOkf3zr3jGzeJu+fPtvU+QGISDCpnw5XQcaP9uJsV5h7DQMeMmmnilKTXhIWjX7D137lP7dWzr9Aax1W6/l8rXXTs72OJ9/wzSkqY8nOY7RpGkhKvGmu8nN2AR+uzsLbppjQvy2vLtzFgm1HLed9PimF5+Zvt6SIAn29SGwVarkfADC6V2u+2uS8Ggvw8WLuA5cx9p01jnUErcICmP/bAQT4eLF0Zw4l5ZUM6dyCID+ZNCbscnaaRVwtE51z74uPm1W4AWHQ8RpTymHGADOXH6BJa3Ns6qfW17rycXtJB/skhX5T4Kpn4MBGU3QuKBKS74bA8PP3+12EXHLD9zSOKqWi7Ff9UcCxs57h4SJD/LglyVpLp3ubMP7WxvEdyg09W1uCf9tmgfSKCavXqau0oor2kcGW4O/vY2PSwDhyi8tYviuHFk38efK6rsRHBjPvgQHM3nIQb5vi+p6taOJvVk8O7SJXUqIBkR3rT8cMioCkO53bwZEwaYVJ8SgbXHILLH2u/mvF9offrDOzhyI7O4uxtb7U/Ij/iSuC/xxgPPC8/b+zz3y4+CWGJbbk3TuS+GJDNmWV1YxNboO3l41xyTGO0g8Aw7q25NY+Mew9XszqPbmEBfpwZcfmXPfGT5yqqOaS1qG8Nz6JCHsTlsgQP2nHKM694EhI+bVzu/ckSP3c3EMAk8ePSDDF4JLvcc0Y3VxjT/X8F+bmboRS6gDwBCbof66UuhvYD9zcmGPwJLHNAlm7N48TpRUs25XDjT1b8fKYHrQM9WfZrhz8fWzMTzvC/K0radsskM8npRAd5s+gl5Y6ZuukHjjB28v28PjILmd5NyHOkSNpptb+jTOh6KCp57/lE3ipnekhPOx56D7W1aN0O40922fcaZ4a3Jjv66lmLNtjSfN8vfkgv76iPdd2j2bkJVFc9sISR1/gfbklvPLjLn43NKHeNM3MnJMIcV5s/hhm3w/YP4NDnjJTOffaFymW5sOcB6D9UFPwTZwz0mrJjZwsq2hgn5kWWlJeZZkNBKbBe/c2oUSGWKfjDeks+Xxxnix/EUfgB9O39+hW6zFVZZC7G3FuSfB3I7f2iaV2JYZurULp3tr00Q3y86Z3nHVWxJX2Ri0z70iib1w4XaObMHVEZ0b3asVjX6fSc9oPjHrzJzbvlwXYopFUVdTfbneFdV9gM4jqfv7G5CEk+LuRgQmRfDYphfEpsYxNbkPRqQo6TJ3P/Z9souhUBW+M68n1PaJp3zyYO1JieeLarqzOzGX8++tYszePA/mldIluwvTFGfxrXTb5JRX8nF3AxA83Ul5ZffYBCPGf6jvZut37HjOlc8BD0LSt6dF72xfgI6XDzzWZoO1mktuG061VKCnPLSK/xFxVfZ96mBYh/tyREsvxk+UcP1nG8ZNllFVW88ScrRTYjztRWsHjs7cSGmBthnH8ZBkZx07SJbrJef99hJvrN8WUXdi73HT++vkzyFhs+gcP/rOrR+fWJPi7ob3Hix2Bv8am/fms35dH2kHTK3Ve2hFsSrE/z9pMOzuvhAG9YywLw5r4exMXEdT4AxeeqcNQKDwIq6ab7eJj8Nnt8LttZkqoaBSS9nFD7SKDCA/ytezr3jrUEfhrrNmTy/DEKMu+YYlR/H5oRwZ3ao5SZlXva+N6EtBAjSEhzpm9K6zbVWWQvdY1Y/EQcuXvhvy8vXj7tl48MWcb+3KLGda1JY8M68SKjOPsyXGWue3WKpRnb0ikeRM/Nu7Lp1dsUx4c3IEgP2/+MSGZssoqfL1sKKXO8G5CnAPRPWHrl85t5SU3eRuZNHPxIKkHCvj95z+TcewkvWLCeG1sT2nHKC4MlWXw3YOm5IN/qJnv3+t2V4/KLZyuto8Efw90qqJK2iyKC1NlGdh8wCYZ6XPlQirsJlxMAr+4IG36yNnJa+AjkvZpZBL8hRCulz4b5vzGub1vJfw2DfxlenFjkb+thBCut32udftUAWT95JqxeAgJ/kII1wtvoGx4Q/vEOSPBXwjhen0nQ5u+5rHNGy5/pH5TGHFOSc5fCOF6AWFw9wI4nmEeB0W4ekRuT4K/EOLCEdHe1SPwGJL2EUIIDyTBXwghPJAEfyGE8EAS/IUQwgNJ8BdCXBjWvQvvDIJZN8GBja4ejduT2T5CCNdL+xLm/cG5nb3WlHcICHPdmNycXPkLIVxv17+t22WFkLXKNWPxEBL8hRCuF5HQwL4O538cHkSCvxDC9fpOhriB5rGXH1z5fxL8G5nk/IUQrucXAuPnQEG2eSy5/kYnwV8IceEIa+PqEXgMSfsIIYQHkuAvhBAeSIK/EEJ4IAn+QgjhgST4CyGEB5LgL4QQHkiCvxBCeCAJ/kII4YGU1trVY/hFlFI5QJarx+EmIoDjrh6EEKchn89zK1ZrHVl350UT/MW5o5TaoLVOcvU4hGiIfD7PD0n7CCGEB5LgL4QQHkiCv2d6x9UDEOIM5PN5HkjOXwghPJBc+QshhAeS4C+EEB5Igr8bUUpppdRHtba9lVI5Sqm5Zzlv0NmOEeKXUEpVKaW21Ppp24jvNUEp9UZjvb67k05e7qUYSFRKBWitS4GhwEEXj0l4llKtdQ9XD0KcnVz5u5/5wDX2x+OAf9U8oZTqrZRapZTabP9vx7onK6WClFLvKaXW248bdZ7GLdyUUspLKfWS/TOVqpSaZN8/SCm1TCn1uVJql1LqeaXUbUqpdUqpNKVUvP24a5VSa+2fx4VKqRYNvEekUuor+3usV0r1P9+/58VGgr/7+RQYq5TyBy4B1tZ6bgdwuda6J/Bn4C8NnD8VWKy1TgauAF5SSgU18piF+wiolfL5xr7vbuCE/TOVDExUSsXZn+sOPAh0A24HErTWvYGZwBT7MSuBvvbP7afAIw2872vAK/b3GG0/X5yBpH3cjNY61Z5nHQfMq/N0KPCBUqoDoAGfBl7iKuA6pdQf7Nv+QAywvVEGLNxNQ2mfq4BLlFI32bdDgQ5AObBea30YQCmVCfxgPyYNc/EB0Br4TCkVBfgCext43yFAF6VUzXYTpVSI1rroHPxObkmCv3uaA/wVGAQ0q7X/aWCJ1voG+xfE0gbOVcBorfXOxh2i8CAKmKK1XmDZqdQgoKzWrupa29U449PrwMta6zn2c55s4D1sQIr9Xpf4BSTt457eA6ZprdPq7A/FeQN4wmnOXQBMUfZLKKVUz0YZofAkC4DJSikfAKVUwn+YSqz9uR1/mmN+AH5Ts6GUkpvOZyHB3w1prQ9orV9r4KkXgeeUUj8BXqc5/WlMOihVKbXVvi3E/2ImkA5ssn+mZvCfZR2eBL5QSq3g9KWeHwCS7DeU04H7/ofxegQp7yCEEB5IrvyFEMIDSfAXQggPJMFfCCE8kAR/IYTwQBL8hRDCA0nwF0IIDyTBXwghPJAEfyHOwl7p9Hul1M9Kqa1KqTFKqUvtFSk3KqUWKKWi7P0T1ttLEKCUek4p9ayLhy9Eg6S2jxBnNww4pLW+BkApFYopnT1Ka52jlBoDPKu1vkspNQH4Uin1gP28Pq4atBBnIsFfiLNLA/6qlHoBmAvkA4nAj/YSSF7AYQCt9TZ7N7XvMIXGyl0zZCHOTIK/EGehtd6llLoUGAE8B/wIbNNap5zmlG5AAVCv6YgQFwrJ+QtxFkqpaKBEaz0LUyq7DxCplEqxP++jlOpqf3wjpoz25cB0pVSYi4YtxBlJYTchzkIpdTXwEqbGfAUwGagEpmPKDXsDrwLfAKuAwVrrbHve/1Kt9enKEAvhMhL8hRDCA0naRwghPJAEfyGE8EAS/IUQwgNJ8BdCCA8kwV8IITyQBH8hhPBAEvyFEMID/T+cmrrvkMUk9gAAAABJRU5ErkJggg==\n\"></div>\n\n</details>", "_____no_output_____" ] ], [ [ "# Câu 3: Vẽ boxplot cho cột total_bill\n# Bạn nhận xét gì về biểu đồ vừa tạo\n", "_____no_output_____" ] ], [ [ "<details>\n <summary>Nhấn vào đây để xem kết quả !</summary>\n \n<div class=\"output_subarea output_png\"><img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAWAAAAEHCAYAAACQkJyuAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAMPElEQVR4nO3df6zdd13H8dd77QxF0bluLKbDXfFixBCZySRE0CDptFPijyjB+IOaGIjRlLpICJolRFMTiYkCJf4BatYZRYmALro1FkRBTMAONjdTzK56MS2wjU4cuIK0+/jHPcX2em/bs9173peexyO56f1+z/fH57Od++y339t+b40xAsDsXdE9AIB5JcAATQQYoIkAAzQRYIAm26fZ+JprrhkLCwubNBSAy9M999zzmTHGtavXTxXghYWFHD16dONGBTAHquoTa613CwKgiQADNBFggCYCDNBEgAGaCDBAEwEGaCLAAE0EGKCJAAM0EWCAJgIM0ESAAZoIMEATAQZoIsAATQQYoIkAAzQRYIAmU/1MOJ68gwcPZmlpaabnPHHiRJJk165dMz3vNBYXF7Nv377uYUALAZ6RpaWl3PvAsZx5+tUzO+e2x/8rSfLpL27N/83bHn+0ewjQamt+ZV6mzjz96pz61h+Y2fl2fPyuJJnpOadxdnwwr9wDBmgiwABNBBigiQADNBFggCYCDNBEgAGaCDBAEwEGaCLAAE0EGKCJAAM0EWCAJgIM0ESAAZoIMEATAQZoIsAATQQYoIkAAzQRYIAmAgzQRIABmggwQBMBBmgiwABNBBigiQADNBFggCYCDNBEgAGaCDBAEwEGaCLAAE0EGKCJAAM0EWCAJgIM0ESAAZoIMEATAQZoMpMAHzx4MAcPHpzFqYCnyNfr7GyfxUmWlpZmcRpgA/h6nR23IACaCDBAEwEGaCLAAE0EGKCJAAM0EWCAJgIM0ESAAZoIMEATAQZoIsAATQQYoIkAAzQRYIAmAgzQRIABmggwQBMBBmgiwABNBBigiQADNBFggCYCDNBEgAGaCDBAEwEGaCLAAE0EGKCJAAM0EWCAJgIM0ESAAZoIMEATAQZoIsAATQQYoIkAAzQRYGBdJ0+ezGte85qcPHlyzeVp97/U16c9z4XO/6pXvSq33HJLlpaWnvQxNmIsaxFgYF2HDh3K/fffnzvuuGPN5Wn3v9TXpz3Phc7/4IMP5tSpUzlw4MCTPsZGjGUtAgys6eTJkzl8+HDGGDl8+HCWlpbOW77YFeHq/de6yl3r9YvtN83477777i8vLy8vT30VvFFjWc/2DT3aOk6cOJFTp05l//79szjdlrS0tJQr/md0D2NLueILj2Vp6XNz/b7YipaWlrJjx44cOnQoTzzxRJLkzJkzOXDgwHnLd9xxR2699dZ1j7N6/9Xbr/f6xfa7VIcOHcqXvvSl89YdOHAgt99++1TH2IixrOeiV8BV9eqqOlpVRx955JENOzGwtb33ve/N6dOnkySnT5/O8vLyectHjhyZav/V26/3+sX2m2b8qy0vL099jI0Yy3ouegU8xnhbkrclyU033fSkLuF27dqVJHnzm9/8ZHa/LOzfvz/3/NtD3cPYUp542tdm8dnXzfX7Yis6+yeSG264IXfddVdOnz6d7du35/rrr8/x48e/vHzzzTdf8Di7d+8+b//V26/3+sX2u1S7d+/OnXfeed66hYWFqY+xEWNZj3vAwJr27t2bK65YScS2bdty2223nbf8yle+cqr9V2+/3usX22+a8V955ZXnrbvtttumPsZGjGU9AgysaefOndmzZ0+qKnv27Mni4uJ5yzt37pxq/9Xbr/f6xfabZvy33HLLl5cXFhayuLg49TE2Yizrmck34YCvTHv37s3y8vJ5V6fnLk+7/6W+Pu15LnT+Y8eO5fjx41Nf/W70WNYiwMC6du7cmbe85S3rLk+7/6W+Pu15LnT8t7/97U/5GBsxlrW4BQHQRIABmggwQBMBBmgiwABNBBigiQADNBFggCYCDNBEgAGaCDBAEwEGaCLAAE0EGKCJAAM0EWCAJgIM0ESAAZoIMEATAQZoIsAATQQYoIkAAzQRYIAmAgzQRIABmggwQBMBBmgiwABNBBigiQADNBFggCYCDNBEgAGaCDBAEwEGaCLAAE0EGKDJ9lmcZHFxcRanATaAr9fZmUmA9+3bN4vTABvA1+vsuAUB0ESAAZoIMEATAQZoIsAATQQYoIkAAzQRYIAmAgzQRIABmggwQBMBBmgiwABNBBigiQADNBFggCYCDNBEgAGaCDBAEwEGaCLAAE0EGKCJAAM0EWCAJgIM0ESAAZoIMEATAQZoIsAATQQYoIkAAzQRYIAmAgzQRIABmggwQBMBBmgiwABNBBigiQADNBFggCbbuwcwT7Y9/mh2fPyuGZ7vZJLM9JzT2Pb4o0mu6x4GtBHgGVlcXJz5OU+cOJ0k2bVrq0buupb/LrBVCPCM7Nu3r3sIwBbjHjBAEwEGaCLAAE0EGKCJAAM0EWCAJgIM0ESAAZoIMEATAQZoIsAATQQYoIkAAzQRYIAmAgzQRIABmggwQBMBBmgiwABNBBigSY0xLn3jqkeSfGLzhrOlXJPkM92DaGT+8zv/eZ57sjnzv2GMce3qlVMFeJ5U1dExxk3d4+hi/vM7/3meezLb+bsFAdBEgAGaCPD63tY9gGbmP7/mee7JDOfvHjBAE1fAAE0EGKCJACepqj+oqoer6oFz1l1dVUeq6sHJr1/fOcbNUlXPqqr3V9Wxqvrnqto/WT8v839aVX2kqu6bzP/XJuu/qao+PJn/n1bVV3WPdTNV1baq+lhV/eVkeW7mX1XLVXV/Vd1bVUcn62by/hfgFbcn2bNq3euTvG+M8Zwk75ssX45OJ/nlMcZzk7wwyS9W1bdlfub/xSQvHWM8P8mNSfZU1QuTvDHJ70zm/59Jfq5xjLOwP8mxc5bnbf7fO8a48Zy//zuT978AJxljfCDJo6tW/3CSQ5PPDyX5kZkOakbGGJ8aY3x08vnnsvJFuCvzM/8xxvj8ZPHKycdI8tIkfzZZf9nOP0mq6vokP5jk9ybLlTma/zpm8v4X4PVdN8b4VLISqSTPbB7PpquqhSTfkeTDmaP5T/74fW+Sh5McSfKvST47xjg92eR4Vn5Tuly9KcnrkjwxWd6Z+Zr/SPLXVXVPVb16sm4m7//tm3FQvvJU1dckeVeSXxpjPLZyETQfxhhnktxYVVcleU+S56612WxHNRtV9bIkD48x7qmql5xdvcaml+X8J140xvhkVT0zyZGq+visTuwKeH0PVdU3JMnk14ebx7NpqurKrMT3j8YY756snpv5nzXG+GySv83KvfCrqursBcr1ST7ZNa5N9qIkP1RVy0n+JCu3Ht6U+Zl/xhifnPz6cFZ+A35BZvT+F+D13Zlk7+TzvUn+onEsm2Zyv+/3kxwbY/z2OS/Ny/yvnVz5pqp2JNmdlfvg70/y45PNLtv5jzF+ZYxx/RhjIclPJPmbMcZPZU7mX1VfXVXPOPt5ku9L8kBm9P73L+GSVNU7krwkK4+heyjJG5L8eZJ3JvnGJP+R5OVjjNXfqPuKV1UvTvLBJPfn/+4B/mpW7gPPw/y/PSvfZNmWlQuSd44xfr2qnp2VK8Krk3wsyU+PMb7YN9LNN7kF8doxxsvmZf6Teb5nsrg9yR+PMX6jqnZmBu9/AQZo4hYEQBMBBmgiwABNBBigiQADNBFggCYCzMxV1VVV9QsX2Wahqn7yEo61cO5jRNd4/Wer6q3rvPYPq49RVS85+0hG2GwCTIerklwwwEkWklw0wE/FGOO7NvP4cDECTIffTPLNkwdg/9bk44HJQ7Ffcc423z3Z5tbJVeoHq+qjk49p4vmsqjpcVf9SVW84u7KqPn+hnWCzeRoaHV6f5HljjBur6seS/HyS52fln4L/Y1V9YLLNa8cYL0uSqnp6kpvHGF+oquckeUeSm9Y+/P/zgiTPS/L45Ph/NcY4urFTgukJMN1enOQdk0dCPlRVf5fkO5M8tmq7K5O8tapuTHImybdMcY4jY4yTSVJV756cU4BpJ8B0u9QHD9+alQclPT8rt86+MMU5Vj/wxANQ2BLcA6bD55I8Y/L5B5K8YvJTKa5N8j1JPrJqmyT5uiSfGmM8keRnsvL0skt18+SHLO7Iyo+W+dBTnQBsBFfAzNwY42RVfWjyV7/uTvJPSe7LypXp68YYn66qk0lOV9V9Wfmhqb+b5F1V9fKsPKv2v6c45d8n+cMki1l53KDbD2wJHkcJ0MQtCIAmbkFwWaiq70/yxlWr/32M8aMd44FL4RYEQBO3IACaCDBAEwEGaCLAAE3+F/tD7moBSXBvAAAAAElFTkSuQmCC\n\"></div>\n\n</details>", "_____no_output_____" ] ], [ [ "# Câu 4: Tạo FacetGrid với 'time' và chỉ định thứ tự của các hàng bằng row_order, ánh xạ (map) của 'total_bill' lên lưới\n# Bạn nhận xét gì về biểu đồ vừa tạo\n", "_____no_output_____" ] ], [ [ "<details>\n <summary>Nhấn vào đây để xem kết quả !</summary>\n \n<img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABHgAAAEYCAYAAAAnPkG+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAVHUlEQVR4nO3df7Bmd10f8PcnuwsEQWMWTNuFdoXFUYoSdGUs2A44SV1aptBRhg5W1hmppalJTEsd2n+0nTKjZUaEtfkjtR02DtJafmlo2CHBtCAUMYGEhITWa0ktAROyCAQTSDb59o971t2sd7P3Zp+7z/ncfb1mntnnnHvu93zu+T733E/ezzlPaowRAAAAAPo6Z9kFAAAAAHB6BDwAAAAAzQl4AAAAAJoT8AAAAAA0J+ABAAAAaE7AAwAAANCcgAfOUlV1XlVdctzyX6mqdy2zpqmOO6vqaZs09kuq6v2bMTYAMA8z73FunR63V9W/raonzqlGoDcBD5y9zkvy583PGOMLY4wfX2I9AACLMOce56VjjO9N8sIkz0pyVXJmaqyq7Zs5PrB8Ah44e/1SkmdX1c1V9eaq2l1VtyVJVf1UVb2vqq6pqs9V1c9W1T+rqk9V1cer6vxpu2dX1aGquqmqPlJV370ZhVbVL1bVG45bvm2qd3dV3VFV/6GqPlNVH6yqc6dt9lTV9VV1S1V9sqqePX37U6rqXVX12ap6R1XVZtQMACzN7HucMcbXk7w+ySur6vw1anzPtP8/rKp/d/T7qurrVfWmqb/5eFVdMK1/elW9u6r+YHq8eFr/i1V1VVV9MMnVi/wZgPkR8MDZ641J/miMceEY41+s8fXnJXlNVt9helOS+8cYL0jyP5O8dtrmqiSXjjF+IMkbklx54iBV9dKpwTrx8bEF/RzPSfLvxxh/PclXkvzYtP4d0/rnJ3lRki9O61+Q5OeSPDer75y9eEF1AADz0KLHGWN8LcnnstrLnOjCJK9O8r1JXl1Vz5zWf0uSj0/9zYeT/KNp/VuTvGWM8YNZ7YV+/bixfiDJK8YYr1lPXUBfLtMDTuaGMcZ9Se6rqq8muWZaf2uS76uqp2Q1OPmvx10E88QTBxlj3JDVJmWzfG6McfP0/KYku6vqqUl2jTHeO9XwjSSZ6vzEGOPz0/LNSXYn+b1NrA8AmJc59Tgnu5L4Q2OMryZJVd2e5K8l+X9JHkxy9PMEb0py8fT8oiTPPa7eb536oST5nTHGA6dZJ9CAgAc4mW8e9/yR45Yfyeq545wkXxljPGZjU1UvTfKWNb50/xjjReus5UgefcXhk05S58NJzs3Jm6W1tnceBICzyyx6nCmA2Z3kfyf5tseo8fh+5aExxlhj/TlJ/saJQc4U+PzZqWoBtga3aMHZ674kTz3lVidx9LLiqnpVktSq56+x3Q3TJdInPtYb7iTJnUm+f9rP9yf5znXU9vmqeuX0PU+sqidvYH8AQF+z73Gmq4SuTPK+McafPt5aj/PBJD973PibefU0MFMCHjhLjTEOJ/no9IHFb36cw/xEkp+uqluSfCbJKxZU3qer6vPT41eSvDvJ+dMtVf8kq+90ncpPJrmsqj6d5GNJ/tKCagMAZmzmPc4N04cpfyLJHyf5xwsa97Ike6vq09MtXa9f0LhAI3XsCj8AAAAAOnIFDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACa276Rjfft2zcOHTq0WbUAACxCPZ5v0ucAAE2s2ets6Aqee++9dzGlAADMjD4HAOjMLVoAAAAAzQl4AAAAAJoT8AAAAAA0J+ABAAAAaE7AAwAAANCcgAcAAACgOQEPAAAAQHMCHgAAAIDmBDwAAAAAzQl4AAAAAJoT8AAAAAA0J+ABAAAAaE7AAwAAANCcgAcAAACgOQEPAAAAQHMCHgAAAIDmBDwAAAAAzQl4AAAAAJoT8AAAAAA0t33ZBTAvBw4cyMrKyrLLOKm77rorSbJr164lV/Joe/bsyaWXXrrsMgCAJZp7H3XUXPupudHfAd0IeHiUlZWV3HzbHXn4yecvu5Q1bbv/q0mSP/nmfF662+7/8rJLAABmYO591FFz7KfmRn8HdOSszl/w8JPPzwPf/XeWXcaazv3stUkyq/qO1gQAMOc+6qg59lNzo78DOvIZPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwLMiBAwdy4MCBZZcBrIPfV2BZnH8AWC9/M9io7csuYKtYWVlZdgnAOvl9BZbF+QeA9fI3g41yBQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyAB+AMOnz4cC677LIcPnx4oWMsYtytZNHHw/EFADhmI73Rsvqow4cP55JLLsnrXve6XHLJJZu+/zn0iwIegDPo4MGDufXWW3P11VcvdIxFjLuVLPp4OL4AAMdspDdaVh918ODB3H777VlZWcntt9++6fufQ78o4AE4Qw4fPpxDhw5ljJFDhw49rnR/rTEWMe5Wsujj4fgCAByzkd5oWX3U0f0e7wMf+MCm7X8u/eL2pex1C7rrrrvywAMP5PLLL192KadlZWUl5zw4ll1GK+d842tZWbmv/dyfTVZWVnLuueee8f0ePHgwjzzySJLk4YcfztVXX50rrrjitMcYY5z2uFvJIo7zZo7H2W2r9AusTR+1dejvmINl9aynspHeaFl91MGDB/PQQw89at1DDz20afufS794yit4qupnqurGqrrxS1/60pmoCWBLuv7663PkyJEkyZEjR3LdddctZIxFjLuVLPp4OL5bmz4HADZmI73Rsvqo66+/PmM8OnAfY2za/ufSL57yCp4xxlVJrkqSvXv3ekviJHbt2pUkeetb37rkSk7P5Zdfnpv+z93LLqOVR570rdnzrAvaz/3ZZFnvxl100UW59tprc+TIkWzfvj0XX3zxQsYYY5z2uFvJIo7zZo7HvJzpPmer9AusTR+1dejvmIO5XkG2kd5oWX3URRddlGuuueZRIU9Vbdr+59Iv+gwegDNk//79Oeec1dPutm3b8trXvnYhYyxi3K1k0cfD8QUAOGYjvdGy+qj9+/dnx44dj1q3Y8eOTdv/XPpFAQ/AGbJz587s27cvVZV9+/Zl586dCxljEeNuJYs+Ho4vAMAxG+mNltVHHd3v8V72spdt2v7n0i/6kGWAM2j//v258847TyvVX2uMRYy7lSz6eDi+AADHbKQ3WlYftX///qysrOTBBx/ME57whE3f/xz6RQEPwBm0c+fOvO1tb1v4GIsYdytZ9PFwfAEAjtlIb7SsPmrnzp258sorz+j+lt0vukULAAAAoDkBDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANLd92QVsFXv27Fl2CcA6+X0FlsX5B4D18jeDjRLwLMill1667BKAdfL7CiyL8w8A6+VvBhvlFi0AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh4AAACA5gQ8AAAAAM0JeAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh4AAACA5rYvuwDmZ9v9X865n7122WWsadv9h5NkVvVtu//LSS5YdhkAwAzMuY86ao791Nzo74COBDw8yp49e5ZdwmO6664jSZJdu+b0B/eC2R83AGDzdekH5tlPzY3+Duinxhjr3njv3r3jxhtv3MRyAABOWz2eb9LnAABNrNnr+AweAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADQn4AEAAABoTsADAAAA0JyABwAAAKA5AQ8AAABAcwIeAAAAgOYEPAAAAADNCXgAAAAAmhPwAAAAADRXY4z1b1z1pST/d/PKYZ2eluTeZRfBKZmnHsxTD+aph7nM071jjH0b/SZ9zqzM5bXEYzNPPZinHsxTD3OZpzV7nQ0FPMxDVd04xti77Dp4bOapB/PUg3nqwTyxKF5LPZinHsxTD+aph7nPk1u0AAAAAJoT8AAAAAA0J+Dp6aplF8C6mKcezFMP5qkH88SieC31YJ56ME89mKceZj1PPoMHAAAAoDlX8AAAAAA0J+ABAAAAaE7AM3NV9Z+q6p6quu24dedX1XVV9YfTv9++zBpJquqZVXVDVd1RVZ+pqsun9eZqRqrqSVX1iaq6ZZqnfz2t/86q+v1pnv5LVT1h2bWe7apqW1V9qqrePy2boxmqqjur6taqurmqbpzWOe+xIXqd+dPn9KDP6UWvM38d+xwBz/y9Pcm+E9a9McmHxhjPSfKhaZnlOpLkn48xvifJDyX5p1X13Jiruflmkh8ZYzw/yYVJ9lXVDyX55SRvmebpT5P89BJrZNXlSe44btkczddLxxgXjjH2TsvOe2zU26PXmTt9Tg/6nF70Oj206nMEPDM3xvhwki+fsPoVSQ5Ozw8meeUZLYq/YIzxxTHGJ6fn92X1ZL0r5mpWxqqvT4s7psdI8iNJ3jWtN09LVlXPSPJ3k/z6tFwxR50477Ehep350+f0oM/pQ6/T2qzPewKeni4YY3wxWf2Dm+Q7llwPx6mq3UlekOT3Y65mZ7oc9uYk9yS5LskfJfnKGOPItMnns9q0sjy/muTnkzwyLe+MOZqrkeSDVXVTVf3MtM55j0XwOpopfc686XPa0Ov00K7P2b7sAmArqaqnJHl3kp8bY3xtNYxnTsYYDye5sKrOS/LeJN+z1mZntiqOqqqXJ7lnjHFTVb3k6Oo1NjVH8/DiMcYXquo7klxXVZ9ddkHA5tHnzJ8+Z/70Oq2063NcwdPT3VX1l5Nk+veeJddDkqrakdWm5x1jjPdMq83VTI0xvpLkv2f1swTOq6qjgfczknxhWXWRFyf5e1V1Z5L/nNXLlX815miWxhhfmP69J6v/IfHCOO+xGF5HM6PP6UWfM2t6nSY69jkCnp5+J8n+6fn+JL+9xFrIn983+x+T3DHG+JXjvmSuZqSqnj69o5WqOjfJRVn9HIEbkvz4tJl5WqIxxr8cYzxjjLE7yT9I8rtjjJ+IOZqdqvqWqnrq0edJ/naS2+K8x2J4Hc2IPqcHfU4Pep0euvY5NYYrv+asqt6Z5CVJnpbk7iS/kOR9SX4ryV9N8sdJXjXGOPHDCTmDquqHk3wkya05di/tv8rq/enmaiaq6vuy+mFo27IacP/WGOPfVNWzsvoOyvlJPpXkH44xvrm8SkmS6bLlN4wxXm6O5meak/dOi9uT/OYY401VtTPOe2yAXmf+9Dk96HP60evMV9c+R8ADAAAA0JxbtAAAAACaE/AAAAAANCfgAQAAAGhOwAMAAADQnIAHAAAAoDkBDwAAAEBzAh5g3arqvKq65BTb7K6q16xjrN1VddtjfP2nqurXTvK1j504RlW9pKref6r9AgCsRZ8DdCfgATbivCSP2fgk2Z3klI3P6RhjvGgzxwcAzkr6HKA1AQ+wEb+U5NlVdXNVvXl63FZVt1bVq4/b5m9O21wxvfv0kar65PTYSNPyzKo6VFX/q6p+4ejKqvr6In8oAIDoc4Dmti+7AKCVNyZ53hjjwqr6sSSvT/L8JE9L8gdV9eFpmzeMMV6eJFX15CQXjzG+UVXPSfLOJHvXub8XJnlekvun8f/bGOPGxf5IAABJ9DlAcwIe4PH64STvHGM8nOTuqvofSX4wyddO2G5Hkl+rqguTPJzkuzawj+vGGIeTpKreM+1T4wMAbDZ9DtCOgAd4vGqd212R5O6svgN2TpJvbGAf4xTLAACbQZ8DtOMzeICNuC/JU6fnH07y6qraVlVPT/K3knzihG2S5NuSfHGM8UiSn0yybQP7u7iqzq+qc5O8MslHT/cHAAA4CX0O0JoreIB1G2McrqqPTv/Lzg8k+XSSW7L6jtPPjzH+pKoOJzlSVbckeXuSK5O8u6peleSGJH+2gV3+XpLfSLInyW+6Lx0A2Cz6HKC7GsOVgAAAAACduUULAAAAoDm3aAFLVVU/muSXT1j9uTHG319GPQAAi6LPAc4kt2gBAAAANOcWLQAAAIDmBDwAAAAAzQl4AAAAAJoT8AAAAAA09/8BMA0ogUe98LUAAAAASUVORK5CYII=\n\"></div></div><div class=\"output_area\"><div class=\"run_this_cell\"></div><div class=\"prompt\"></div><div class=\"output_subarea output_text\"><pre>&lt;Figure size 432x288 with 0 Axes&gt;</pre></div></div></div>\n\n</details>", "_____no_output_____" ] ], [ [ "# Câu 5: Tạo Factor plot (phiên bản mới là catplot) chứa point plot của giá trị 'total_bill'\n# Bạn nhận xét gì về biểu đồ vừa tạo\n", "_____no_output_____" ] ], [ [ "<details>\n <summary>Nhấn vào đây để xem kết quả !</summary>\n <div class=\"output_subarea output_png\"><img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAWAAAALICAYAAABBxipSAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOzdeXxU9b3/8dcnK0nYIWyGVUAQZA1q3aqi1aoVrALaXqu3LreLVqtt1dbfba3aa6t1aW1vi7W3tFYFXNG6IdUiWBGQfZF937JACNmX7++PmYQEJskAc+bMTN7Px4PHzJzZPhF8czjn8/0cc84hIiLRl+R3ASIirZUCWETEJwpgERGfKIBFRHyiABYR8YkCWETEJwpg8ZWZdTSz7zR43MvMXvKzpmAdW8ysq0effaOZPe3FZ0t8UQCL3zoC9QHsnNvlnLvGx3pEokYBLH57BDjZzJaa2aNm1s/MVkL9nuJrZvaGmW02s9vM7C4zW2Jmn5hZ5+DrTjazd8xssZl9ZGZDvCjUzP5iZtc0eHwoeHu+mX1oZi+Z2Voz+7uZWfC5cWb2sZktM7NPzaxd8O29gjWvN7NfeVGvxL4UvwuQVu9eYLhzbhSAmfU74vnhwGigDbABuMc5N9rMngC+ATwJTAW+5Zxbb2ZnAL8HLmz4IWZ2AfBEiO8vdc6dFYGfYzQwDNgFzAfONrNPgenAFOfcQjNrD5QFXz8q+J4K4HMz+61zbnsE6pA4ogCWWPeBc64YKDazIuCN4PYVwAgzawucBcwM7nQCpB/5Ic65DwiEnlc+dc7tADCzpUA/oAjY7ZxbGKzhYPB5gDnOuaLg49VAX0AB3MoogCXWVTS4X9vgcS2BP79JwIG6PeimRGgPuDr4fQQPMaQ1UWdNsDYDmhq2Eur10sroGLD4rRho1+KrmhDcq9xsZpMgEIxmNjLE6z5wzo0K8etYDj9sAcYG708AUlt4/VoCx3rHBWtrZ2YKWqmnABZfOecKgPlmttLMHj3Oj/k6cJOZLQNWEQjHSFhuZjuCvx4HngG+GDy2ewZQ0tybnXOVwBTgt8HaZhM4li0CgGkcpYiIP7QHLCLiEwWwiIhPFMAiIj5RAIuI+MTzlhgz20Kg1agGqHbO5QaXkE4n0Ky+BZjsnNvf3Odceuml7p133vG2WBERb1iojdHaA74g2HOZG3x8L4GVQIOAOcHHzcrPz/eyPhGRqPPrEMQEYFrw/jRgok91iIj4JhoB7ID3gpOqbg1u6+6c2w0QvO0W6o1mdquZLTKzRXl5eVEoVUQkeqKxLPJs59wuM+sGzDazteG+0Tk3lcCkK3Jzc7ViREQSiud7wM65XcHbfcCrwOnAXjPrCRC83ed1HSIiscbTADazrLoB1GaWBXwJWAnMAm4IvuwG4HUv6xARiUVeH4LoDrwanH+aAjzvnHvHzBYCM8zsJmAbMMnjOkREYo6nAeyc2wSEGg1YAIz38rtFRGKdVsKJiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPhEASwi4hMFsIiITxTAIiI+UQCLiPgkxe8CRCQ+LN66n+c+2cr6fcV0ykxjwqiTmDCqF6nJ2o87XgpgEWnRM3M38fBbaxpt+2h9Pq98toM/3ziONqnJPlUW3/RXl4g0a/Wug0eFb52PNxbw+w83RrmixKEAFpFmvbhwW7PPv/DpNpxzUaomsSiARaRZm/NLmn0+r7iC0sqaKFWTWBTAItKsbu3aNPt82/QUHQM+TgpgEWnW1WNPavb5CaN6kZxkUaomsSiARaRZPdu3IaWZgNXe7/FTAItIk6pqarlzxjKqawMn2bLbppOekkR227T6UP7z/M3MW5/vZ5lxKyoBbGbJZrbEzN4MPu5sZrPNbH3wtlM06hCRY/PU++tZtv0AACNzOvDxfRfy+UNfZuH9F/PLq0cA4BzcOX0J+4rL/Sw1LkVrD/gOoGEj4b3AHOfcIGBO8LGIxJAFmwr43YcbAMhMS+apa0c3WvV29dgcrh6TA0D+oUrufHEpNbVqRzsWngewmeUAlwN/arB5AjAteH8aMNHrOkQkfEWlVXx/+lLq2nt/duUw+nXNOup1D04cxsnZge0fbyzgdx9siGaZcS8ae8BPAj8Cahts6+6c2w0QvO0W6o1mdquZLTKzRXl5ed5XKiI45/jxayvYVRQ4pHD5aT2ZNDYn5Gsz01L43dfHkJ4SiJIn31/HJ5sKolZrvPM0gM3sCmCfc27x8bzfOTfVOZfrnMvNzs6OcHUiEspLi3fwj+W7AejVoQ2/uOo0zJrughjSoz0/u3IYALUO7nhxCQWHKqJSa7zzeg/4bOBKM9sCvAhcaGbPAXvNrCdA8Hafx3WISBi25Jfw01mrADCDx6eMokNmaovvu3Zcb64c2QuAvQcruGvGMmp1PLhFngawc+4+51yOc64fcC3wT+fcfwCzgBuCL7sBeN3LOkSkZVU1tdzx4pL6ZcXfPX8gZw7oEtZ7zYyHrxpOvy6ZAPxrXR5TP9rkWa2Jwq8+4EeAi81sPXBx8LGI+OjJ99exbEcRAKN6d+SOiwYd0/vbtUnl6a+NIS3YKfHou5+zeGthxOtMJFELYOfch865K4L3C5xz451zg4K3+l0S8dEnmw6PlcxKS+apa0cd16D14Sd14P4rhgJQU+u4/fklHCitjGitiUQr4URauSNbzh6YMJy+XY5uOQvX9Wf25dJhPQDYVVTOD2Yu17jKJiiARVox5xw/fnUFu4MtZ1eM6MnVY5ofvtMSM+OX14wgp1MGAO+v2cuf52850VITkgJYpBWbuXgH/1gRaDk7qWMGD7fQchauDhmB48F18yIeeXtN/ZJmOUwBLNJKbc4v4WfBlrMkgyemjKJDRsstZ+Ea1bsj9355CABVNY7bXviMg+VVEfv8RKAAFmmFqmpqubNhy9kFAzm9f+eIf89N5/Rn/JDAQtfthWXc+7KOBzekABZphZ6Y3bjl7Hvjj63lLFxmxmOTRtKrQ+CqGm+t2MNzC5q/xlxrogAWaWX+vbGA//3XibechatTVhq/uW50/VUzHnxzNat2FXn2ffFEASzSihworeSuGYdbzn5+gi1n4crt15m7vzQYgMrqWm57fgmHKqo9/95YpwAWiYLrn13ABY99yPXPLvCthiNbzr4yshdfPcGWs2PxrfNO5rzBgaFam/NL+MmrK1r98WAFsEgU7Nhfxub8EnbsL/OthpmLdvDWij1AoOXsoYnDI9JyFq6kJOPxySPp1i4dgNeX7mLGou1R+/5YpAAWaQU255fwszcOt5w9eW1kW87C1bVtOk9dO5q6a3z+dNYqPt9THPU6YoUCWCTBVVY3nnJ22wUDGdcv8i1n4frCyV3quy7Kq2q57fnPKK1snceDFcAiCe6J99exPNhyNrqPdy1nx+L2CwfxheCoy/X7DvHT11f5XJE/FMAiCezjjfn8Idhy1jY9haemjCbFw5azcCUnGU9dO4ouWWlAYEn0K5/t8Lmq6PP/d0JEPHGgtJK7pi9r0HI2jD7BgemxoFv7NjwxZRR15wHvf20lG/MO+VtUlCmARRKQc477XlnBnoOBlrMrR/biqtHRazkL13mDs/nO+ScDUFpZw3f//hnlVTU+VxU9CmCRBDRj0XbeXtmg5eyq6LacHYvvXzSYcf06AbB2TzEPvrna54qiRwEskmA25R3iZ7MCIZZk8NS1o2jfJvotZ+FKSU7iN9eNplPw4p9/X7CNN5fv8rmq6FAAiySQQMvZUsqC/4y/7cJB5PrYchaunh0y+PXkkfWP7315BVsLSnysKDoUwCIJ5PHZ61ixM9ByNqZPR7534UCfKwrfhUO6c8u5/QE4VFHNbc8voaI6sY8HK4BFEsTHG/P549wGLWfXxkbL2bH44SVDGNW7IwArdhbxyNtrfa7IW/H1uyMiIe0vadxy9uDEYfTuHDstZ+FKS0nit9eNpl2bFAD+b/4W3l21x+eqvKMAFolzR7acTRjVi6tG5/hc1fHr3TmTR685fDz4hzOXsWN/qY8VeUcBLBLnXly4nXeCe4k5nTJ4cOJwnys6cZcO78GNZ/UD4GB5Nbe/sISqmlp/i/KAAlgkjm3MO8TP3zjccvbklNhuOTsW9102hOEntQdgybYDPPbu5z5XFHkKYJE4VTflrK7l7PY4aTkLV3pKMk9fN4a26YHjwX+cu4kP1u7zuarIUgCLxKlfv/c5K3ceBGBs307cHkctZ+Hq1zWLX3z1tPrHd81Yyu4i/4baR5oCWCQOzd+Qzx/nbgKgXXoKT04ZFXctZ+G6cmQvrju9DwD7S6u444WlVCfI8eDE/B0TSWD7SwIX1qzz4MThcdlydix++pVTGdKjHQCfbinkqTnrfa4oMhTAInHEOcc9Ly9n78EKACaO6sXEGJxyFmltUpN5+mtjyEhNBuDpDzYwb33+CX1mLFwoVQEsEkde+HQ7763eCwRazn6eAC1n4RrYrS0PBX9e5+DO6UvYV1x+3J8XCxdKVQCLxIkN+w7x8zcDl+6pu6JEorSchevqsTlcMzawyCT/UCV3vriUmtr4vbS9AlgkDlRU13DHi0sorwqcfLr9woGM7Zs4LWfH4ucThnFydhYAH28s4HcfbPC5ouOnABaJA79+bx2rdgVaznL7duK2CxKv5SxcmWkp/O7rY0hPCcTXk++v45NNBT5XdXwUwCIxbt76fKY2aDl7IoFbzsI1pEd7HrhyGAC1Du54cQkFhyp8rurYte7fRZEYV3hEy9lDVyV+y1m4pozrzZUjewGw92AFd81YRm2cHQ9WAIvEqLqWs33FgT27q0afxIRRid9yFi4z4xdfPY1+wSs9/2tdHlM/2uRzVcdGASwSo57/dBuzgy1nvTtn8PMJw3yuKPa0TU/h6a+NIS14SObRdz9n8dZCn6sKnwJYJAZt2Hf46sDJScaTU0bTrpW1nIVr+EkduP+KoQDU1Dpuf34JB0orfa4qPApgkRhTUV3D915YWt9y9r0LBzG2byefq4pt15/Zly8P7wHArqJyfjBzOc7F/vFgBbBIjHns3c9Zvftwy9l3LzjZ54pin5nxyNUjyOmUAcD7a/by5/lb/C0qDApgkRjy0fo8nvloM6CWs2PVISOVp782htRkA+CRt9ewbPsBn6tqnn5nRWJEYUkld89YVv9YLWfHblTvjtxz6RAAqmoct73wGQfLq3yuqmkKYJEY4JzjRy8dbjn7qlrOjttN5/Rn/JBuAGwvLOPel2P3eLACWCQG/H3BNt5fE2g569M5kwfUcnbczIzHJo2kV4c2ALy1Yg/PLdjmc1WhKYBFfLZhXzEP/aNBy9m1o9RydoI6ZaXxm+tGk5wUOB784JurWbWryOeqjqYAFvFRRXUNtzdoObtj/CDG9FHLWSTk9uvM3V8aDAQuYHrb80s4VFHtc1WNKYBFfPToO5+zJthyNq5fJ77biqeceeFb553MeYOzAdicX8JPXl0RU8eDFcAiPpm7Lo8/zQu2nLUJtJzV/ZNZIiMpyXh88ki6tUsH4PWlu5ixaLvPVR2mABbxQcGhCu6eebjl7OGrTiOnk1rOvNC1bTpPXTuaur/bfjprFZ/vKfa3qCAFsEiU1U05y6trORtzUv1YRfHGF07uwh3jA8eDy6tqufH/PmXvwcD15ApLKtlaUOJLXQpgkSh7bsE23l+zDwi0nP18Quu5sKafbrtwIGf0D1zGaXdROaWVNQAUlVUx/vF/8Y/lu6NekwJYJIoqq2t5qMGUs6euHUXb9BSfq2odkpOMYb3ah3yuusZx5/Ql7DwQ3SskK4BFPLR8xwH+62+L2JIf+CfurqIyKqoDLWd3jh/EaLWcRU11TS2vLd3V5PNVNY7pC6N7gs7Tv3rNrA0wF0gPftdLzrmfmllnYDrQD9gCTHbO7feyFpFom7suj5umLaSq5nDbU10H1MnZWXxHLWdRVVhaSWFJ83OC1++N7sk5r/eAK4ALnXMjgVHApWZ2JnAvMMc5NwiYE3wskjBqah33vbKiUfg2tO9gBZXBPWGJjrbpKS22+XXKSotSNQGeBrALOBR8mBr85YAJwLTg9mnARC/rEIm2BZsLmj2eWFxRzZy1e6NYkWSmpXDJsO7NvmZClLtRPD8GbGbJZrYU2AfMds4tALo753YDBG+7NfHeW81skZktysvL87pUkYjJP9TyJXEKwniNRNY9lw6hcxN7uVePyeH0YJdEtHgewM65GufcKCAHON3Mwu65cc5Ndc7lOudys7OzvStSJML6d8lq8TX9urb8Gomsvl2yeO07Z3PV6MOjPlOSjP93xan86poRmEV3JWLUuiCccweAD4FLgb1m1hMgeLsvWnWIRMPwk9pzSve2TT7ft0sm5wzsGsWKpE6fLpk8MWVU/eXsczplcNM5/X1ZBu5pAJtZtpl1DN7PAC4C1gKzgBuCL7sBeN3LOkSirbrWNbk31TEjlaevG6O5Dz6r+/2J9l5vQ153gPcEpplZMoGwn+Gce9PM/g3MMLObgG3AJI/rEImqp/+5gbXBeQM9O7ShsKSSiupaOmak8t73z6Nb+zY+VyixwNMAds4tB0aH2F4AjPfyu0X8snjrfp7+YAMAGanJPH/LmXzzLwvZnF9Cp6w0ha/U00o4kQg6VFHNXTOWUlMb6P/976+cSn+dbJMmKIBFIujBN1aztaAUgIuGdufacb19rkhimQJYJELeWbmH6cFh313bpvPLq0/z9QSPxD4FsEgE7DtYzn2vLK9//Og1I+jSNt3HiiQeNHsSzszeILB0OCTn3JURr0gkzjjn+MFLy9lfWgXA9Wf25YIhIRd3ijTSUhfEY1GpQiSO/fXfW5m7LrBUfkB2Fj++bKjPFUm8aDaAnXP/ilYhIvFo/d5ifvHWGiCwpPWpKaPJSEv2uSqJFy0dglhB84cgRkS8IpE4UVldyx0vLq0fsP79iwdzWk4Hn6uSeNLSIYgrolKFSBx6fPY6Vu8+CMC4fp341hdP9rkiiTctHYLYGq1CROLJJ5sK+OPcjUBg0Pfjk0dptoMcs2bb0MxsXvC22MwOHnkbnRJFYktRWRV3z1hWf3mhn08YRu/Omf4WJXGppT3gc4K37aJTjkjs++/XV9Zf7eLyET0bzZYVORZhD+MxszHAOQROys1zzi3xrCqRGPX60p28Hryybo/2bXh44nCtdpPjFtZKODP7bwLXbusCdAX+Ymb3e1mYSKzZeaCM+19bWf/415NH0jEzuhdxlMQS7h7wdcBo51w5gJk9AnwGPORVYSKxpKbWcdf0pRSXVwNwy7n9OVtXtIhrOZ0yGt36IdwA3gK0AcqDj9OBjV4UJBKL/vTRJhZsLgRgSI92/OCSU3yuSE7U3246w+8SWlyI8VsCx3wrgFVmNjv4+GJgnvflifhv1a4iHnvvcwDSUpJ48tpRpKdotZucuJb2gBcFbxcDrzbY/qEn1YjEmPKqGu58cSlVNYGes3suHcKQHu19rkoSRUttaNPC+RAze9k5d3VkShKJHY+8vZb1+w4BcM7ArvznWf38LUgSSqTmAQ+I0OeIxIx/rcvjLx9vAaBDRiqPTRpJkla7SQRFKoCbHNgjEo8KSyr5wcxl9Y//56un0aODLqYpkaUrYogcwTnHj19ZQV5xBQBXj8nhstN6+lyVJKJIBbD+XSYJY+biHbyzag8Q6BH92ZWn+lyRJKpIBfA9EfocEV9tLSjhgVmrAEgyeGLKKNq1SfW5KklUxzuQ3QBXN5DdOfeeB7WJRFV1TS3fn76UksoaAL5z/kDG9evsc1WSyDSQXSTo9x9u5LNtBwAYkdOBOy4a5HNFkug0kF0EWLr9AE/NWQ9Am9QknpgyitRknaMWb4U7De1MM1toZofMrNLMajSQXRJFSUU1d764hJrawNG2+y8/lZOz2/pclbQG4f4V/zSBiWjrgQzgZuC3XhUlEk0P/WMNWwpKAbhwSDe+fkYfnyuS1iLsgezOuQ1mluycqwH+z8w+9rAukaiYvXovL3y6DYAuWWn88uoRGrAuURNuAJeaWRqw1Mx+BewGsrwrS8R7+4rLuefl5fWPf3n1CLLbpftYkbQ24R6CuD742tuAEqA38FWvihLxmnOOe15aTmFJJQBfO6MPF53a3eeqpLUJN4AnOufKnXMHnXMPOOfuQi1qEseeW7CNDz7PA6B/1yzuv3yozxVJaxRuAN8QYtuNEaxDJGo27DvEw/9YDUBykvHklFFkpoV9OkQkYlpaCXcd8DWgv5nNavBUe6DAy8JEvFBZXcud05dQXlULwJ3jBzGyd0efq5LWqqW/9j8mcMKtK/DrBtuLgeUh3yESw56as46VOwMt7GP7duLb55/sc0XSmoWzEm4r8AUz6w6MCz61xjlX7XVxIpH06eZCfv9h4FqyWWnJPDF5FCla7SY+Cncl3CTgU2ASMBlYYGbXeFmYSCQdLK/i+9OX4oKjpX525TD6dMmM2vfndMqgf9csXy+BLrEn3DMP9wPjnHP7AMwsG3gfeMmrwkQi6WezVrHzQBkAXx7eg2vG5kT1+2PhEugSe8L991dSXfgGFRzDe0V89ebyXbzy2U4AurVL5xdXnabVbhITwt0DftvM3gVeCD6eArzlTUkikbO7qIyfvLqy/vFjk0bSKSvNx4pEDgt3L9YBfwRGACOBqZ5VJBIhtbWOH8xcRlFZFQD/eXY/zhuc7XNVIoeFuwd8sXPuHuCVug1m9gC6FJHEsD/P38z8DYF29cHd23LPpUN8rkiksZYWYnwb+A4wwMwa9v22A+Z7WZjIiVi75yC/eudzANKSk3hyymjapCb7XJVIYy3tAT8PvA38D3Bvg+3FzrlCz6oSOQHlVTXc+eJSKmsCq91+eMkpnNqrvc9ViRytpYUYRUARgWHsInHhsXc/Z+2eYgC+MKALN53T3+eKREJTK5kklPkb8vnTvM0AtG+Twq8njyQpSS1nEpsUwJIwDpRWcveMZfWPH77qNHp11MoziV0KYEkIzjl+8upK9hwsB+Cq0SfxlZG9fK5KpHkKYEkIry7ZyT9W7AbgpI4ZPDBhmM8VibRMASxxb3thKf/9+ioAzODxySNp3ybV56pEWqYAlrhWU+u4a8ZSDlUEpqN+64snc8aALj5XJRIeXYdFYtb1zy5gx/4ycjplNDlN7A//2sjCLfsBGNarPd+/aHA0SxQ5IQpgiVk79pexOb+kyedX7CjiidnrAEhPSeKpa0eRlqJ/1En88PRPq5n1NrMPzGyNma0yszuC2zub2WwzWx+87eRlHZJ4yipruGP6EqprAxPWf3L5UAZ2a+dzVSLHxuvdhWrgbufcUOBM4LtmdiqBZc1znHODgDk0XuYs0qJfvLWGTXmBveMvDs7m+jP7+lyRyLHzNICdc7udc58F7xcDa4CTgAnAtODLpgETvaxDEssHa/fxt0+2AtA5K41HJ43QgHWJS1E7YGZm/YDRwAKgu3NuNwRCGujWxHtuNbNFZrYoLy8vWqVKDMs/VMEPXzq82u1/vnoa3dq18bEikeMXlQA2s7bAy8CdzrmD4b7POTfVOZfrnMvNztYg7dbOOce9Ly8n/1AlANeO680lw3r4XJXI8fM8gM0slUD4/t05VzfQfa+Z9Qw+3xPY19T7Req88Ol23l8T+KPSt0sm/++KU32uSOTEeN0FYcCzwBrn3OMNnpoF3BC8fwPwupd1SPzblHeIB99cDUBykvHElFFkpauLUuKb13+CzwauB1aY2dLgth8DjwAzzOwmYBswyeM6JI455/j+9KWUVdUAcPuFAxnTR52LEv88DWDn3DygqdPT4738bolfRaVV/P5fG9haEGgz21ZYSrDdl1G9O3LbBQN9rE4kcvRvOIkpRWVVTP7jv/l8b3H9trrwTTbjsUkjSEnWajdJDPqTLDFl6tyNjcK3oRrnWLUr7CYakZinAJaY8upnO5t9/pUWnheJJwpgiRk1tY59xRXNvqawpDJK1Yh4T8eAxXclFdXMXLSdZ+dvrh+u05T+XbOiVJWI9xTA4pt9B8uZ9u8tPPfJNorKqsJ6z9fP6ONtUSJRpACWqFu3t5g/fbSJ15bsorKmtn57SpJxxcheHCit5MPPj579cffFg3W1C0koCmCJCucc/95YwNSPNh0Vru3SU/jaGX248ex+9OyQgXOOf67dx+0vLKG0soa26SlM++bpjO2rxReSWBTA4qmqmlreWrGbqXM3HdVC1qtDG755Tn+mjOtNuwYX0TQzxg/tTvf2bdicX0J2u3SFryQkBbB4ori8iukLt/PneZvZVVTe6LnhJ7XnlnMHcNlpPUnVogppxRTAElG7DpTxl4+38MKCbRQHr1Rc54JTsrnlvAF8YUAXDVAXQQEsEbJqVxF/+mgzbyzb1aiVLC05iYmje3HzuQMY3F3XbBNpSAEsx805x9z1+TwzdxPzNuQ3eq5DRirXn9mXb5zVV1esEGmCAliOWWV1LbOW7eKZuZuOmtvQu3MGN58zgEm5OWSm6Y+XSHP0f4iErai0ir9/upVpH29h78HGS4ZH9u7If503gEuG9SA5Scd3RcKhAJYWbS8s5c/zNzN94XZKK2vqt5vBRUO7c+t5A8jt20kn1kSOkQI4AV3/7AJ27C8jp1MGf7vpjOP+nOU7DjB17ibeWrGbhiMa0lOSuGZsDjed058B2W0jUHFoOZ0yGt2KJBoFcALasb+Mzfklx/Xe2trAKrRnPtrEgs2FjZ7rnJXGN77Ql+vP7EuXtumRKLVZJ/KXh0g8UAALAOVVNby6ZCfPfLSJTXmNw7t/1yxuPrc/V4/JoU1qsk8ViiQeBXArV1hSyXOfbOWv/95C/qHGs3bH9evELecO4KKh3UnSiTWRiFMAt1Jb8kt4dt5mZi7eTnnV4Ytv7YkAACAASURBVIlkSQZfHt6Tm8/tz2hdeVjEUwrgBFNb66hpZqj54q2FTJ27ifdW78U1eFlGajJTxvXmm2f3p0+XzChUKiIK4ARRW+v4y8dbeHbeZnYeKANgT1E5K3cWMbRne2av3sPUuZv4bNuBRu/r2jad/zy7H18/ow8dM9P8KF2k1VIAJ4ifzlrF3z7Z2mhbWVUNV/1uPl3aprPnYOOJZIO6teWWcwcwYXQv0lN0Yk3EDwrgBLBm98GjwrdOVa1rFL5nndyFW84bwBcHZevEmojPFMAJYNayXS2+5kundud74wcx/KQOUahIRMKhAE4AB8O4oOXdXzqFU3poHKRILNHlCOKcc47SIwafHykzLZnenbWcVyTWaA84juUfquC+V1Ywe/XeZl83Obe3RkOKxCD9XxmnZq/ey32vLK9fvWYWuKx7VU3jHuCzB3bhnkuH+FGiiLRAARxnDlVU8/M3VjFj0Y76bX06Z/L45JH065rFjEXbefqfGyitrKF7u3T++s0zNJ9XJEYpgOPIwi2F3DVjKdsLy+q3XXd6b+6//FSy0gO/ld85fyAzF+1gc34JmekpCl+RGKYAjgMV1TU8PnsdU+duql8+3LVtOr+8+jTGD+3ub3EictwUwDFu7Z6D3PniUtbuOXzttUuGdecXV50WlZm8IuIdBXCMqql1/OmjTfz6vXVU1gSmlbVNT+FnVw7j6jEn6fI/IglAARyDtheWcveMZXy65fAVKc4c0JnHJo0kp1PLk8p0KR+R+KAAjiHOOWYu2sEDb6yiJHjxy7SUJH50ySl88+z+Yc9u0KV8ROKDAjhGhFpUcWrP9jwxZZSWEIskKAVwDHhv1R7ue2UFBSWBRRVJBt8+/2TuGD+YtBStFhdJVApgHxWXV/Hgm6uPWlTxxJSRjO3b2cfKRCQaFMA++XRzYFHFjv1NL6oQkcSm/9OjTIsqRKSOAjiK1uw+yPenN15UcemwHjx81XAtqhBphRTAUVBT63jmo0083mBRRbvgooqvalGFSKulAPbYiS6qEJHEpQD2SKQWVYhI4lIAeyD/UAX3vryC99doUYWINE0BHGFaVCEi4VIAR0hxeRU/f2M1MxdrUYWIhEcBHAELNhVw98xlRyyq6MP9lw/VogoRaZLS4QRUVNfw+HvrmPpR40UVv7rmNC4cokUVItI8BfBxCrWo4svDe/DwVafROSvNx8pEJF4ogI9RTa1j6txNPD778/pLwLdLT+GBCcO4arQWVYhI+BTAx2B7YSl3zVjKwi3767dpUYWIHC9PA9jM/gxcAexzzg0PbusMTAf6AVuAyc65/U19RixwzjFj0XZ+/sZqLaoQkYjxujH1L8ClR2y7F5jjnBsEzAk+jll5xRXc8tdF3PPyivrwPbVne968/RxuPneAwldEjpune8DOublm1u+IzROA84P3pwEfAvd4WUc4vv7MJ+w4UEafzpn111R7d9UefqxFFSLiET+OAXd3zu0GcM7tNrNuTb3QzG4FbgXo06ePJ8Ws2lXEk++vZ/7GAgB2HSjjuU+2snT7fl5avLP+dX27ZPL4ZC2qEJHIMVfXwOrVFwT2gN9scAz4gHOuY4Pn9zvnOrX0Obm5uW7RokURrW3Jtv187ZkFlFXVNPu6r53Rh59cpkUVInLcQh6r9CNR9ppZz+Deb09gnw81APCzN1Y3G76ds9J4bNIILaoQEU/4cSBzFnBD8P4NwOs+1MCW/BKWbT/Q7GuuO723wldEPONpAJvZC8C/gVPMbIeZ3QQ8AlxsZuuBi4OPo25/aWWLr6moqo1CJSLSWnndBXFdE0+N9/J7w9GvSxapyVa/mi2Uwd01u1dEvNNqe6k6ZaXxlRG9mny+Y0YqV4zsGcWKRKS1abUBDPDTrwxjRE6Ho7ZnpSfzh+vHkpmmrgcR8U6rDuAOmam89K2zeHLKKDLTkgHomJnKB3efz5kDuvhcnYgkulYdwBCY6TBx9El0b98GgE6ZaXQL3hcR8ZL+jR2U0ymj0a2IiNcUwEF18x9ERKKl1R+CEBHxiwJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQnCmAREZ8ogEVEfKIAFhHxiQJYRMQn5pzzu4awmFkesNXjr+kK5Hv8HdGSKD9LovwcoJ8lFkXr58h3zl165Ma4CeBoMLNFzrlcv+uIhET5WRLl5wD9LLHI759DhyBERHyiABYR8YkCuLGpfhcQQYnysyTKzwH6WWKRrz+HjgGLiPhEe8AiIj5RAIuI+EQBLCLiEwWwiIhPFMAiIj5RAIuI+EQBLCLiEwWwiIhPFMAiIj5RAIsvzKyjmX2nweNeZvaSnzUF69hiZiuCv1ab2UNmlh5LNUri0FJk8YWZ9QPedM4N97mURsxsC5DrnMs3s7YEZgVUOeduiNL3pzjnqqPxXeI/7QGLXx4BTjazpWb2qJn1M7OVAGZ2o5m9ZmZvmNlmM7vNzO4ysyVm9omZdQ6+7mQze8fMFpvZR2Y2JJIFOucOAd8CJppZ5xA1vhL8/vVm9qu695nZITN72MyWBevtHtyebWYvm9nC4K+zg9t/ZmZTzew94K+R/BkktimAxS/3Ahudc6Occz8M8fxw4GvA6cDDQKlzbjTwb+AbwddMBW53zo0FfgD8/sgPMbMLgiF/5K+PwynSOXcQ2AwMCvH0KGAKcBowxcx6B7dnAZ8450YCc4FbgtufAp5wzo0Drgb+1OCzxgITnHNfC6cuSQwpfhcg0oQPnHPFQLGZFQFvBLevAEYEDw+cBcw0s7r3pB/5Ic65DwgE5YmwJrbPcc4VAZjZaqAvsB2oBN4MvmYxcHHw/kXAqQ3qbW9m7YL3Zznnyk6wTokzCmCJVRUN7tc2eFxL4M9tEnDAOddsuJrZBcATIZ4qdc6d1VIRwYDsB6wDOjRTYw2H/3+qcodPrjTcngR84cigDQZySUu1SOLRIQjxSzHQrsVXNaHu0ICZTQKwgJEhXvdB8DDHkb/CCd+2BA5rvOac23+8tTbwHnBbg88/0T1ziXMKYPGFc64AmG9mK83s0eP8mK8DN5nZMmAVMCFC5X0QPNn2KbAN+K8Ife73gFwzWx48ZPGtCH2uxCm1oYmI+ER7wCIiPlEAi4j4RAEsIuITBbCIiE887wMOrq0vJtAPWe2cyw0uJZ1OoL9yCzC5pTafSy+91L3zzjveFisi4o2Qi3mitQd8QbD3Mjf4+F4Cq4gGAXOCj5uVn5/vZX0iIlHn1yGICcC04P1pwESf6hAR8U00AtgB7wUnVt0a3NbdObcbIHjbLdQbzexWM1tkZovy8vKiUKqISPREYxbE2c65XWbWDZhtZmvDfaNzbiqBiVfk5uZqxYiIJBTP94Cdc7uCt/uAVwmMF9xrZj0Bgrf7vK5DRCTWeBrAZpZVN27PzLKALwErgVlA3RUGbgBe97IOEZFY5PUhiO7Aq8FxeynA8865d8xsITDDzG4iMOxkksd1iIjEHE8D2Dm3CQg1IrAAGO/ld4uIxDqthBMR8YmuiCEircr+kkqenbeZWct2cbC8iiE92nHjWf24ZFiPuquTRI0CWERajbziCib94WO2FJTWb/tkUyGfbCrktgsG8oNLTolqPToEISKtxqPvrm0Uvg09/cEGVu4simo9CmARaRXKq2p4femuZl/z0uIdUaomQAEsIq3CwbIqKqprm33N3oPlUaomQAEsIq1Cx8w0stKSm31N786ZUaomQAEsIq1CWkoS14zNafJ5Aybn9o5eQSiARaQVufuSU+jdOSPkc/dfcSoDu7WNaj1qQxORVqN9m1T6ds5ie2EZAL07ZzAypyM3nNWPcf06R70eBbCItBq7DpQxf2Pg6jojcjow67ZzfK1HhyBEpNV4afEOXHCyeLSP94aiABaRVqG21jFz8XYA0lOS+MrIXj5XpAAWkVbik00F9cd+LzutJx0yUn2uSAEsIq3EjEXb6+9Pym26HS2aFMAikvCKyqp4e+UeAPp0zuTM/l18rihAASwiCW/W0p31y5Anjc0hKSm6YyebogAWkYQ3Y1FgyI4ZXBMjhx9AASwiCW71roOsCI6ZPG9QNj07hF4J5wcFsIgktIYn36aM87/3tyEFsIgkrIrqGl5buhOATpmpjB/azeeKGlMAi0jCmr16LwdKqwC4anQO6SnNj6OMNgWwiCSs6QsPH36YPC52Tr7VUQCLSELaeaCMeRsCg3dG5nRgSI/2Pld0NAWwiCSklxYdHrwzKQYG74SiABaRhHPk4J0rR/k/eCcUBbCIJJx/bypgx/7Dg3fat/F/8E4oCmARSTgNe39jYe5vUxTAIpJQikobD945o3/0LzUULgWwiCSUWct2UhkcvDM5N3YG74SiABaRhDI9ePghyeDqZi5DHwsUwCKSMFbtKmLlzoMAnDc4tgbvhKIAFpGEMTM4dhJi++RbHQWwiCSE8qoaXl0SGLzTOSuNi4Z297milimARSQhzF69l6KywOCdiaNOIi0l9uMt9isUEQlDLM/9bYoCWETi3o79pY0G75zSo53PFYVHASwice/lxTvrB+9MjpO9X1AAi0icazh4p01qEl8ZGZuDd0JRAItIXGs0eGd47A7eCUUBLCJxreFVL2J17m9TFMAiEreKSqt4Z1Vg8E7fLpmcOSB2B++EogAWkbj1eoPBO5PG5mAWu4N3QlEAi0jcmhFHg3dCUQCLSFyKt8E7oSiARSQuNRy8MyXOTr7VUQCLSNw5cvDO+DgYvBOKAlhE4k7DwTtXjY6PwTuhxGfVItKqxctFN1uiABaRuNJo8E7vjnEzeCcUBbCIxJWXFu84PHgnN/5azxpSAItI3KitdfXdD/E2eCcUBbCIxI2PNxaw80B8Dt4JRQEsInGj0cm3OJr725SoBLCZJZvZEjN7M/i4s5nNNrP1wdtO0ahDROLXkYN3zugfX4N3QonWHvAdwJoGj+8F5jjnBgFzgo9FRJrUcPDO5NzecTd4JxTPA9jMcoDLgT812DwBmBa8Pw2Y6HUdIhLf6ub+JhlcPSa+ux/qRGMP+EngR0Btg23dnXO7AYK33UK90cxuNbNFZrYoLy/P+0pFJCat3FnEql2BwTtfHJxNjw5tfK4oMjwNYDO7AtjnnFt8PO93zk11zuU653Kzs7MjXJ2IxIuZCbLy7UgpHn/+2cCVZnYZ0AZob2bPAXvNrKdzbreZ9QT2eVyHiMSp8qoaXlu6C4jvwTuheLoH7Jy7zzmX45zrB1wL/NM59x/ALOCG4MtuAF73sg4RiV/vJcjgnVD8+kkeAS42s/XAxcHHIiJHSdTDD+D9IYh6zrkPgQ+D9wuA8dH6bhGJT4k0eCeUxNmXF5GE03DwTrxe9aI5CmARiUlHDt65YmRPnyuKPAWwiMSkRoN3Tov/wTuhKIBFJCZNT+CTb3UUwCIScw6UVvJucPBOvwQZvBOKAlhEYs7rS3fVD96ZlCCDd0JRAItIzKmb+5tIg3dCUQCLSExJ1ME7oSiARSSmNFz5NiUBrnrRHAWwiMSMhoN3umSlceGQxBm8E4oCWERiRiIP3gklsX86EYkrMxYm1kU3W6IAFpGYsL2wlPkbA4N3RvXuyODuiTV4JxQFsIjEhIaDdxJ15duRFMAi4rvaWsdLiw8P3vlKAg7eCUUBLCK+m78xv9HgnXYJOHgnFAWwiPhuRnDsJCTm3N+mKIBFxFdHDt45PUEH74SiABYRX7WWwTuhKIBFxFfTF7aOwTuhKIBFxDcrdxaxendg8M75p3RL6ME7oSiARcQ3Mxpd9aJ17f2CAlhEfFJeVcNrS3YCrWPwTigKYBHxxbur9nCwvBpoHYN3Qml9P7GIxISZDXp/W8PgnVAUwCISddsLS5m3oXUN3glFASwiUVc39wES/6oXzVEAi0hU1TQYvJORmswVI1rH4J1QFMAiElUft9LBO6EogEUkqqYvbN29vw0pgEUkag6UVvLeqr1A6xu8E4oCWESi5rUlO6msaZ2Dd0JRAItI1NTN/U0yuGZs6z78AApgEYmSIwfvdG/fugbvhKIAFpGoaDx4p/X2/jakABYRzx09eKebzxXFBgWwiHiu4eCdr45pnYN3QtF/BRHxnA4/hKYAFhFPbS8sZf6GAgBG9+nIoFY6eCcUBbCIeGpmg8E72vttTAEsIp6pqXW8FDz80NoH74SiABYRz8zfkM+uonJAg3dCUQCLiGcannxrzXN/m6IAFhFP7C85PHinf9csxvXr5HNFsUcBLCKeeH1pw8E7Oa1+8E4oCmARiTjnHNMbDN65eowG74SiABaRiFu16yBrgoN3LtDgnSal+F2ASGtw/bML2LG/jJxOGfztpjP8LsdzDa96MUm9v01SAItEwY79ZWzOL/G7jKgor6rh9aWBwTtd26YxfqgG7zRFhyBEJKIaDt65avRJpCYrZpqi/zIiElEavBM+BbCIRIwG7xwbBbCIREzDwTtTtPfbIgWwiETEkYN3LtfgnRZ5GsBm1sbMPjWzZWa2ysweCG7vbGazzWx98FZrFEXiXMPBO5eP0OCdcHi9B1wBXOicGwmMAi41szOBe4E5zrlBwJzgYxGJA9c/u4ALHvuQ659d0Gj7dJ18O2aeBrALOBR8mBr85YAJwLTg9mnARC/rEJHIqetp3rG/rH7b/pJKZmvwzjHz/BiwmSWb2VJgHzDbObcA6O6c2w0QvA3ZqW1mt5rZIjNblJeX53WpInKcXtPgnePieQA752qcc6OAHOB0Mxt+DO+d6pzLdc7lZmdne1ekiBw351z90uPkJOMaDd4JW9S6IJxzB4APgUuBvWbWEyB4uy9adYhIZK3ceZC1e4oBOH9wNt00eCdsXndBZJtZx+D9DOAiYC0wC7gh+LIbgNe9rENEvNNo5ZuuenFMvB7G0xOYZmbJBMJ+hnPuTTP7NzDDzG4CtgGTPK5DRDxQXlXDaw0G71w4RIN3joWnAeycWw6MDrG9ABjv5XeLxIrdRWUUl1cBUBU8UZUo3l21h+Lg4J2vjsnR4J1jpP9aIh6pqqnlJ6+u4OxH/kn+oUog0MJ1x4tLKKus8bm6yGg493dyrk6+HSsFsIhHHnpzNX9fsI1a13j760t38aOXl/tTVARV1dTy8cbA4J0xfToysJsG7xwrBbCIB/IPVfD8p9uafP6NZbvifkD7oeChB9DKt+OlK2KIeGDh5kKqalyzr7nxz5+S268z/bpk0qdLJv26ZNG3SyYdM9OiVOWJKa4IBHBGajJXjOzlczXxqdkANrM3CCwdDsk5d2XEKxJJAAdKK1t8zdbCUrYWlh61vUNGajCUswK3nTPp1zUQztlt02NmlVlN8NjK5SN60jZd+3LHo6X/ao9FpQqRBHGwvIo/fLiRZ+dtbvG16SlJVFQf3RVRVFbFsh1FLNtRdNRzmWnJ9OmcSd/6PeZAMPftkknPDhkkJ0U/nKeo9/e4NRvAzrl/RasQkXhWWV3L3xds5Tdz1rO/tKrF1189JofHJo1gX3EFW/JLAnvDBSVsLShla0EpWwpK6tu7GiqtrGHtnuL6lWcNpSUnkdM5g35dsgJ7zV0y6wM6p1MmaSkndsrHOcfKnQc5WHb45xvQNYvcvhq8c7xaOgSxguYPQYyIeEUiccQ5x1sr9vCrd9eyteDw4YR2bVL49hdPZm9xOc8v2NboePC143rzwIRhmBnd27ehe/s2nDGgy1Gfe6C0ii0FJWwrLGVLfilbC+sCuqS+ra2hyppaNuWVsCnv6JN7SQa9OgbDuUtm8NBGFv26Bg5xZKY1/4/hbQWlfO/FJSzdfqDR9tNyOsTMIZF4ZM41faLAzPo292bn3NaIV9SE3Nxct2jRomh9nUiLFm4p5OF/rGkUSqnJxje+0I/bLhhIp6zAybT8QxV8+amPyCuuoHenDD6658IT/u5DFdVsLShhW0EpWwoa7j2XsPtgOc38bx1St3bpjcL58KGNLJKTjEuemMvOA2VHvc+A524+g7MHdj3hnynBhfxbqqVDEFELWJEjXf/sAnbsLyOnUwZ/u+kMv8upt2HfIX75zlpmr97baPtXRvbih186hT5dMhtt79o2nbbpKeQVV5ASoZVibdNTGNarA8N6dTjqufKqGnbsr9trbhzOO/aXUX1kYzKwr7iCfcUVfLql8KjnMlKTKasKvXDEAb/953oF8HFq6RDEPOfcOWZWTOC/tTW8dc61j0KN0krVDf6OFXnFFTz5/jpeXLi9vgMA4Iz+nfnxZUMZ2bujj9Ud1iY1mYHd2oVcGFFdU8uuA+VsKQgedz7i+HOok4JNhW+dTzYVUl5VQ5vU5Ij9DK1FS3vA5wRvtcRFWq3SymqembuZqXM3UtJgCfHAbm2578tDuHBIt7g5DpqSnESfYN/xkWprXeCkYP2hjUAoz12XV9/zK5EVdvOemY0BziGwBzzPObfEs6pEYkB1TS0zF+/g8dnryCuuqN+e3S6duy4ezKSxORE7pBALkpKMHh3a0KNDG85scFLw2XmbefDN1U2+b1y/Ttr7PU5hBbCZ/TeBkZGvBDf9xcxmOuce8qwyEZ845/jn2n088vZa1u87VL89My2Z/zrvZG45r3+LXQOJ5JqxOTwzdxN7DpaHfP47FwyMckWJI9w/RdcBo51z5QBm9gjwGaAAloSyfMcBHv7HGhZsPnwyKjnJuHZcb+68aDDZ7dJ9rM4fHTJS+fstZ/Ddv3/WqP84yeDRa0ZywSmaAXy8wg3gLUAboO6vwHRgoxcFifhhe2Epv3r3c95YtqvR9otP7c49lw5hYLe2PlUWG07Obsvbd5zLoq37+dbfFlNQUknvzplcPVYjKE9ES10QvyVwzLcCWGVms4OPLwbmeV+eiLf2l1Ty9Acb+Ou/tzRaLDGqd0d+fNlQTu/f2b/iYoyZMa5fZ9pnpFJQUklSnJx4jGUt7QHXrXxYDLzaYPuHnlQjEiXlVTVM+3gLT3+wodGS375dMvnRJUO47LQecdPZIPGrpTa0aeF8iJm97Jy7OjIliXinttbx2tKd/Pq9dY1WdnXKTOV74wfx9TP6nvDMBJFwRepU7oAIfY6IZ+atz+cXb61h9e6D9dvSU5L45jn9+fb5J9O+TaqP1UlrFKkAPsaV5yLRs2b3QR55ey3/WpdXv80sMJHsrosH06tjho/VSWvWepoZpdXZXVTGr99bx8uf7Wg0nOa8wdnce+kQTu2llfTir0gFsM5WSMxoOBS94WyDU3u2577LhnDuoGwfqxM5LFIBfE+EPkfkuFVW1/L8gq385p8bKCw5PC+3V4c2/OCSU5g46iSSfLhiBEBOp4xGtyJw/APZ66ahjSBw5z0PahMJi3OOt1fu4VfvrGXLEUPRv3vBQG48q5/vswpiaZymxI6W9oCviEoVIsdp4ZZCfvHWGpZsazwU/foz+3H7hYeHoovEIg1kl7i0Me8Qv3x7Le+FORRdJBaFOw3tTOC3wFAgDUgGSjSQXbyyds9BCkoCIyD3l1Sy80AZJ3XMIK+4gqfmrOOFTxsPRT89OBR9VIwMRRcJR7gn4Z4GrgVmArnANwDNoJOIc87x6Luf8/sPD896OlBWxXm//Cfjh3Zn/ob8o4ai33vpEMYPjZ+h6CJ1wu6CcM5tMLNk51wN8H9m9rGHdUkr9eby3Y3Ct06No9Hhhux26Xz/osFMzk2soejSuoQbwKVmlgYsNbNfAbuBLO/Kktbq/+Zvbvb5lCTj9gsHcfO5/clK1zoiiW/h7jpcH3ztbUAJ0Bv4qldFSevVcE5DKIO6t+WOiwYpfCUhhBvAE51z5c65g865B5xzd6EWNfFAVlrz/bqdMtVWJokj3AC+IcS2GyNYhwgHy6tok9r8nu3lI3pGqRoR77W0Eu464GtAfzOb1eCp9kCBl4VJ67K9sJSbpi1sNKP3SCNyOnD1GF0CRxJHSwfSPiZwwq0r8OsG24uB5V4VJa3Loi2F3Pq3xfXzG3q0b8Og7m35aH0+EFj3/vUz+/CjS4f4vqRYJJLCWQm3FfiCmXUHxgWfWuOcq276nSLheXXJDu55aQWVNYGpZaP7dGTq9blkt0vni7/6gK2FpfTpkslDE0/zuVKRyAvrGLCZTQI+BSYBk4EFZnaNl4VJYqutdTz27ud8f/qy+vC9cmQvXrjlzPpLv9dNLtPFHyVRhdvLcz8wzjm3D8DMsoH3gZe8KkwSV1llDXfPXMpbK/bUb/v+RYP53viBWs0mrUq4AZxUF75BBYTfQSFSb9/Bcm7+6yKW7ygCAtdke2zSSL4yspfPlUm4NNs4csIN4LfN7F3gheDjKcBb3pQkiWrlziJu+esidheVA9C1bTrPfGMso/t08rkyORaabRw54QawA/4InEPgpPRU4EyvipLE8+6qPdz54lLKqgKDdIb0aMezN47jJF0QU1qxcAP4YufcPcArdRvM7AF0KSJpgXOOP87dxC/fWVt/YcyLhnbjyWtH01bLiaWVa2khxreB7wADzKxh3287YL6XhUn8q6yu5SevrmDm4h312245tz/3fnkoyT5dm00klrS0C/I88DbwP8C9DbYXO+cKPatK4l5hSSXfem4xn24O/DFJSTIemjica0/v43NlIrGjpYUYRUARcF10ypFEsGHfIW6atpCtwQtkdshI5X//YwxnndzV58pEYosOwklEzVufz7f/vpji8sBCyf5ds/jzjePo31Xjo0WOpACWiHnuk638dNaq+mu1fWFAF/73P8bQUSMk2O1Y2gAAD9BJREFURUJSAMsJq6l1PPSP1fzf/C312647vTc/nzCcVF0uSKRJCmA5IcXlVXzvhSV88HkeAGbwk8uGctM5/bWsWKQFCmA5btsLS7l52iI+31sMBK5m8dS1o7no1O4+VyYSHxTAclwWby3k1r8upiA4w/ekjhn86YZchvZs73NlIvFDASzH7LUlO/nRS8vrx0iO6t2Rqd8YS7d2bXyuTCS+KIAlbLW1jiffX8dv/rmhfttXRvbi0WtGeHKlCk3dkkTnaQCbWW/gr0APoBaY6px7ysw6A9OBfsAWYLJzbr+XtciJKa+q4e6Zy/jH8t312+68aBB3jB/k2ck2Td2SROd1j1A1cLdzbiiB6WnfNbNTCSxrnuOcGwTMofEyZ4kx+w6WM2XqJ/Xhm5aSxFPXjuLOiwar00HkBHi6B+yc203gop4454rNbA1wEjABOD/4smnAh2iyWkxatauIm6c1nOGbxtRv5DJGM3xFTljUjgGbWT9gNLAA6B4MZ5xzu82sWxPvuRW4FaBPHw1xibbZq/dyx4tLKK08PMP3TzfkktMp0+fKRBJDVJYpmVlb4GXgTufcwXDf55yb6pzLdc7lZmdne1egNOKcY+rcjdz6t0X14XvhkG689O2zFL4iEeT5HrCZpRII37875+oGuu81s57Bvd+ewL6mP0GiqbK6lv/32kqmL9pev+3mc/pz32Wa4SsSaV53QRjwLLDGOfd4g6dmATcAjwRvX/eyDgnP/uAM3wUNZvg+OHE412mGr4gnvN4DPhu4HlhhZkuD235MIHhnmNlNwDZgksd1SAs25h3ipr8sZEtwhm/7Nin84T/GctZAzfAV8YrXXRDzCFzEM5TxXn63hG/+hny+/dxiDjaY4fvsDbkMyG7rc2UiiU0r4Vq55xds479fX0l1cIbvmQM684f/GKsZviJRoABupWpqHb94aw3Pzttcv21Kbm8enDictBTN8BWJBgVwK3SooprvvbCEf64NNJ+YwY+/PJSbz9UMX5FoUgAnoOufXcCO/WXkdMo4ap7Cjv2BGb5r9wRm+GYGZ/herBm+IlGnAE5AO/aXsTm/5Kjtn23bz61/XUT+ocAM314d2vCnG8Zxai/N8BXxgwK4lXh96U5++NJyKqsDM3xH9u7IM5rhK+IrBXACcs41uv/E++v5zZz19dsuH9GTX08a6ckMXxEJnwI4gbyxbBfPzttcv5gir7icm6Ytqj/ZBvC98YO4c/wgkrSsWMR3CuAE8eT763jy/fWNth2qqKkP37SUJB69ZgQTRp3kR3kiEoICOAFszi85KnwbSk02XrjlTMb21QxfkViijvsE8OqSnc0+///bu/fgqOozjOPPKwkQLgGEgECsllEpFysIxalUa0WsBYrg3U4d27Hj1Gk7djqFsdRLHatSRUVt1SqgVK2dCnV0bCsgFtF6KyA3q4BykVRKwEDlfglv/9hDsolckk12f7vnfD8zzG52A3kPTB5Onj2/3+6rdh1/LO+rBuQbAjgGNm/fc9TPqYrePh5A/iCAY6BXl7ZHfL5l0THq0ZEzYCDfEMAxMHZgT7Vscfh/ytGn9VBp6+IcTgSgIQjgGChp2UKlJYd+PbVfj1LdNLJvjicC0BAEcAzc9dKKmuXFJ3Ruo1bRbmad27XUzOvOVIc2nP0C+YgALnBvr/5UT7yxVpLUsU2xZvzwzJq+t7R1MavdgDxGABewnXv3a/zMpTUf3zq6n8ratwo4EYDGIIAL2F0vrdC6aNnxN/t10+jTegSeCEBjEMAFqn71cNuY/mymDhQYliIXoENVD+nbSpZ3KqlzCyA/EcAF6O5ZtdXD+X0/Xz3UfxcMAPmJCqLAvLOmqk718OuxVA9AoSKAC8iuvdUaP2OJDu63Xr96AFBYCOACcvesFTWbrR+qegBQWAjgAvHOmio9/sYaSVQPQFwQwAWA6gGIJwK4AKRXD8OpHoDYIIDz3L/W1lYPHUqKdTsLLoDYIIDz2K691Rr3bL3qoZTqAYgLAjiPTZpdWz2c16ebLhxA9QDECQGcpxasrdK0f9ZWD3dw1QMQOwRwHtq1t1rjZiytqR5+Nbov1QMQQwRwHrpn9gqt2bxDUqp6GDOgZ+CJAGQDAZxnFqyt0lSqByARCOA8snsf1QOQJARwHpk0K7166Er1AMQcAZwnFq6rrR5KWxfp9rGnUj0AMUcA54Hd+6o17tn06qGfulE9ALFHAOeBe2av0Oqoehj2pa4aO5DqAUgCAjiwheuqNOX12urhjouoHoCkIIADql893PJtqgcgSQjggO6ds7JO9XDR6VQPQJIQwIEsXLdFj722WhLVA5BUBHAAqQUXS6gegIQjgAO4d85Krd6Uqh7OpXoAEosAzrGF67ZoSlr1cCfVA5BYBHAOHaweDkTVw81UD0CiEcA5dF+96uFiqgcg0QjgHFn0ce1VD+1bF+kO9noAEo8AzoHUgou06mFUXx3XgeoBSDoCOAfue3mlPoqqh2/0LtMlg8oDTwQgHxDAWbbo4y16bH5t9XDnRV+megAgiQDOKqoHAEeS1QA2s2lmVmlmy9MeO9bM5pjZqui2UzZnCInqAcCRZPsM+AlJF9R77AZJc939ZElzo49j512qBwBHkdUAdvf5kqrqPXyhpOnR/emSxmRzhhB276vWz9Oqh5uoHgAcQogOuJu7b5Ck6Lbr4T7RzK41swVmtmDTpk05G7CpJr+8qqZ6OKd3mS6legBwCHn9Ipy7P+rug919cFlZWehxGuTdj7fo0fkfSZLat2KvBwCHFyKAN5pZd0mKbisDzJAVqb0eltapHrp3KAk7FIC8FSKAX5B0dXT/aknPB5ghK+6fu0ofVm6XJH39lDJdOpjqAcDhZfsytGckvSmpt5lVmNk1kiZKGm5mqyQNjz4ueIvXb9XvX62tHiZeTPUA4MiKsvmHu/uVh3lqWDa/bq7VX3BB9QCgIfL6RbhCcf/cVVpF9QCgkQjgJlpSr3rgqgcADUUAN0H9BRc3juqjHh2pHgA0DAHcBA+kVQ9nn1KmywYfH3giAIWEAM7QkvVb9UhUPbRrVaSJVA8AGokAzsCe/fWqh5FUDwAajwDOQHr1cNbJXXT5V6geADQeAdxISyu26pFXU9tMtmtVpIkXs80kgMwQwI1wsHqojrqHG0f2UU+qBwAZIoAb4YG5q7RyI9UDgOaR1aXIheSqqW+rYssulXcq0ZPXnPG556keADQ3AjhSsWWX1mzeccjn6lcPv6R6ANAMqCAa4MG5H9apHq6gegDQDAjgo1hasVUPpy+4oHoA0EwI4CPYs79a455dWlM9TBhB9QCg+RDAR/DbVz7Uio3bJElfO6mLrhxC9QCg+RDAh7Gs4n96aF569cBeDwCaFwF8CHv3H6hz1cOEEX1U3qlN4KkAxA0BfAgPvrKK6gFA1hHA9ezZX11TPbRt2YLqAUDWJH4hxvqqnXpo3kda92lqEcaGrbsV7TKpCSOpHgBkT6IDeOXGbbrskTe1dde+mscOhm+vLm31nSFfCDMYgERIdAVx8/PL64RvunWf7lDltj05nghAkiQ2gCu27NRbq6sO+3y1S88v/k8OJwKQNIkN4E0NOLut/IwzYADZk9gALu/URscc5eKGEzrzAhyA7ElsAJe1b6Xhfbsd9vk2LVto9ICeOZwIQNIkNoAl6bYL++uLXdp+7vHiFqbJlw9Qh5LiAFMBSIpEB3DX0tZ64cdDdePIPmpVlPqraN+6SH+//iyd3++4wNMBiLtEB7AktW9drB+c1Us9om0mu7RrpZO6tg88FYAkSHwAA0AoBDAABEIAA0AgBDAABEIAA0AgBDAABEIAA0AgBDAABEIAA0AgiX5HjHTlnUrq3AJAthHAkSevOSP0CAAShgoCAAIhgAEgEAIYAAIhgAEgEAIYAAIhgAEgEAIYAAIhgAEgEAIYAAIhgAEgEAIYAAIxdw89Q4OY2SZJ67L8ZbpI2pzlr5ErcTmWuByHxLHko1wdx2Z3v6D+gwUTwLlgZgvcfXDoOZpDXI4lLschcSz5KPRxUEEAQCAEMAAEQgDX9WjoAZpRXI4lLschcSz5KOhx0AEDQCCcAQNAIAQwAARCAEsys2lmVmlmy0PP0hRmdryZ/cPM3jez98zs+tAzZcrMWpvZO2a2JDqWW0PP1BRm1sLM3jWzF0PP0hRmttbMlpnZYjNbEHqepjCzjmY2w8w+iL5nvprzGeiAJTM7W9J2SX9w9/6h58mUmXWX1N3dF5lZe0kLJY1x938HHq3RzMwktXX37WZWLOl1Sde7+1uBR8uImf1M0mBJpe4+KvQ8mTKztZIGu3vBL8Iws+mSXnP3KWbWUlIbd9+ayxk4A5bk7vMlVYWeo6ncfYO7L4rub5P0vqSeYafKjKdsjz4sjn4V5NmCmZVLGilpSuhZkGJmpZLOljRVktx9b67DVyKAY8vMTpQ0UNLbYSfJXPRj+2JJlZLmuHuhHstkSeMlHQg9SDNwSbPNbKGZXRt6mCboJWmTpMejamiKmbXN9RAEcAyZWTtJMyX91N0/Cz1Ppty92t0HSCqXNMTMCq4eMrNRkirdfWHoWZrJUHc/XdK3JP0oqu8KUZGk0yU97O4DJe2QdEOuhyCAYybqS2dKetrd/xJ6nuYQ/Wg4T9LnNjMpAEMljY660z9JOtfMngo7Uubc/ZPotlLSc5KGhJ0oYxWSKtJ+qpqhVCDnFAEcI9ELV1Mlve/u94aepynMrMzMOkb3SySdJ+mDsFM1nrv/wt3L3f1ESVdIesXdvxt4rIyYWdvoxV1FP66fL6kgrxxy9/9KWm9mvaOHhknK+YvVRbn+gvnIzJ6RdI6kLmZWIekWd58adqqMDJV0laRlUXcqSRPc/W8BZ8pUd0nTzayFUicKf3b3gr6EKwa6SXou9f+8iiT90d1fCjtSk/xE0tPRFRCrJX0/1wNwGRoABEIFAQCBEMAAEAgBDACBEMAAEAgBDACBEMBIpGjpad/QcyDZuAwNAALhDBixF63g+mu0t/ByM7vczOaZ2WAzGx3tbbvYzFaY2Zro9wwys1ejTWdmRVt9As2KAEYSXCDpE3c/LdrvuWb1lru/4O4Dok1/lkiaFO2n8aCkS9x9kKRpkm4PMTjijaXISIJlSgXrbyS96O6vRctpa5jZeEm73P130a5r/SXNiT6vhaQNOZ4ZCUAAI/bcfaWZDZI0QtKdZjY7/XkzGybpUqU26JYkk/Seu+f8LWqQLFQQiD0z6yFpp7s/JWmS0rYdNLMTJD0k6TJ33xU9vEJS2cH3CDOzYjPrl+OxkQCcASMJTpV0t5kdkLRP0nVKBbEkfU9SZ9Xu8vWJu48ws0skPWBmHZT6Ppks6b1cD4544zI0AAiECgIAAiGAASAQAhgAAiGAASAQAhgAAiGAASAQAhgAAvk/hFC10FvjbooAAAAASUVORK5CYII=\n\"></div>\n</details> ", "_____no_output_____" ] ], [ [ "# Câu 6: Tạo PairGrid với một scatter plot \"total_bill\" và \"tip\"\n# Bạn nhận xét gì về biểu đồ vừa tạo\n", "_____no_output_____" ] ], [ [ "<details>\n <summary>Nhấn vào đây để xem kết quả !</summary>\n \n<div class=\"output_subarea output_png\"><img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAVIAAAFcCAYAAACa3zafAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3de5SU5Z0n8O+vi2ptCKZh0rLSSiCGxcSokPQIWbJzlAzBiUZ7vMQwmDVncoazc42X6Uw7YQfco2PPMDHuXHZ2MTd2NYjXkgyJhBWZJERJGhtsiTLGiGjBSHtpL9BK0/3bP6qqqa5+r/Venvet9/s5p093Vb1V9UA9/evn+ntEVUFERPVrMl0AIqK0YyAlIgqIgZSIKCAGUiKigBhIiYgCSk0gveiiixQAv7L7ZRzrYOa/bKUmkL766qumi0AZxzpIdlITSImIkoqBlIgoIAZSIqKAGEiJiAJiICUiCmiS6QJQdhX6ili7ZR8ODg5hZmsLupbNQ+eCdtPFoowJox4ykJIRhb4ibnqwH0PDIwCA4uAQbnqwHwAYTCk2YdXDyLv2IrJfRPpFZLeI9Jbvmy4iW0XkufL3aVGXg5Jl7ZZ9Y5W3Ymh4BGu37DNUIsqisOphXGOkF6rqfFXtKN/uBvCoqs4F8Gj5NmXIwcEhX/cTRSGsemhqsukyAOvLP68H0GmoHGTIzNYWX/cTRSGsehhHIFUAPxKRXSKysnzfDFU9BADl76daPVFEVopIr4j0DgwMxFBUikvXsnloyefG3deSz6Fr2TxDJbLGOtjYwqqHEvVRIyIyU1UPisipALYC+FMAm1S1teqaN1TVcZy0o6NDe3t7Iy0rRWNVoR93P3FgLOvDlOYcbv3dcwDAz2ypxFFWJ41eB6NYRRH3yox63s/Hc2zrYOSz9qp6sPz9sIg8BOB8AK+IyGmqekhETgNwOOpykBmrCv2464kD4+47cmwEN963B1+/6jzs6F5iqGRULYpVFHGvzKj3/ToXtAcuT6RdexGZIiJTKz8D+AyApwFsAnBt+bJrATwcZTnInA07X7K8f2RUOUOfIFGsooh7ZYbJlSBRt0hnAHhIRCrv9T1VfUREfgHgXhH5MoADAK6KuBxkyIjD0BFn6JMjilUUca/MMLkSJNJAqqq/BnCexf2vAfh0lO9NyZATsQ2mnKFPjpmtLShaBJwgn1EUr5mk96vGvfYUqeULz7C8P9ckiZuhz7IoVlHEvTLD5EoQbhGlSN3SWZqdt5q151bQ5Kh8FmHOsEfxmkl6v2qRL38KS6MvPSFXXP5Epplb/kTZwWxO8eP/eTIwkFIomM0pfvw/Tw4GUgqk0iKymi2trOHjL7V3flqYTusm+X8eLwZSqltti8gK14p657eFyQxaycHlT1Q3qxZRLa4V9c7vzhxm0EoOBlLyrdBXxOKebZbd+WpJzOaUZH5bmGGvm6x8rnO6N2NxzzYU+op1vU4WsWtPvnjpzgNAO2eQffO7MyfMdZOcuAqGgZR8cevOt+RzuO1yLravR9eyeRP+SLm1MMPIXARw4iooBlLyxWkig63QYEzuzOHEVTAMpOSLXfezvbWFuUVDEFYL0y+TCT8aASebyFX1JMSR944jnxu/U46TSunXtWzehM81n2NiGa/YIiVHtZMQg0PDyDcJpk3OY/DoMLclNpLatBvpSMORCAyk5GjNpr0TJiGGRxWTmyeh768+Y6hU2RP1nvq1W/ZheHR85Bwun2LAP5LuYunai0hORPpE5F/Kt+eIyE4ReU5ENopIcxzlIH9WFfoxODRs+RgnIeJT6RUUB4egOLE0KYx1nm5rgvk5exPXGOlXADxTdftvAHxDVecCeAPAl2MqB3lU6Cvi7ppD66pxEiI+UZ1FVB2g7fBz9ibyQCoipwO4GMA3y7cFwBIA95cvWQ+gM+pykHeFviJuvHeP4xAZJyHiE9XSJC9rguP8nNO8syqOFukdAL4KYLR8+zcADKrq8fLtlwFYHyItslJEekWkd2BgIPqS0lgrxenQummT85kZN0tCHYxqT73bmuA4N1ZEOXwRh6iPY74EwGFV3VV9t8Wllr+1qrpOVTtUtaOtrS2SMtIJlZaoUytFAKz+3NnxFcqwJNTBrmXzkG+qWZoUwplXdoG4siY4zj+WJo9SDkPUs/aLAVwqIp8FcDKAU1BqobaKyKRyq/R0AAcjLge5WHHn49jx/OuO1wiAFYtmZaY1miSjLretuM3017MlNSpp31kVaYtUVW9S1dNVdTaALwDYpqorADwG4MryZdcCeDjKcpCzVYV+1yCaE8E3rp4/dpgdxefm7+/FSM3SpJFRxc3f32v7HC9d5c4F7bjt8nPQ3toCQfzd+WppTwloah3pXwC4R0RuAdAH4FuGykEANux8yfFxJiIx642j1kvQ7O4HvCchMbUltVaSWsf1iC2Qqup2ANvLP/8awPlxvTc5c5pYyokwiKZQ2rrKJhO2hIE7mwg5Edtg+vXPn5eaytyoWlvylhsjWlvyts9JYxKSpLSO68GkJYTlC8+wvH/xmdNTW7EbyZpLz7actV9zqf3qibCz55MztkgzyGo2FyiNlY6oIieC5QvP4MRSQtTT7fXynKj372eJqMP4WJJ0dHRob2+v6WKkntVRISmZTLJafxyrRqqDKa4HJtnWQXbtM8RuwX2aFj5TOOxm9W+8d08qt2iaxq59Riy8dSteefuY7eNJnc0ld5UuenFwaGzi0O3YF7vPuzLpWBwcwnUbd+OGe3fj9xbO4jCPCwbSDHALokCyZ3PJXm0XvToQOp0CajerX2tUgbvKWcDqCaZZGYdl177BFfqKrkGUs7np5ZTByWnIxmpW34nbpg0raU9E4gcDaYNzG/vkgvt0cxuSsXu8dntoTpzn8pw2bdhJeyISP9i1b3Buv2hccJ9ubl10pyGb6gXwVrP41dwCrZW07a4KwjGQisj34XAElqpeGnqJKLDSL8VTGBp2zhE0Y2ozg2jIoh4TrH39C89qwwO7ipYB0M+QTeeCdvS++PrYeGgtu00bTtK4u6pebi3Sv4ulFBSaQl8R123c7XrdjKnN2Pm1pTGUKDtqW3VuEz5hvP4Du4q44hPteOzZAV+z9lav/cCuiWOXIsAKm1n7NKXpi5pjIFXVf42rIBRcoa+IG+51DqJ+f8HIO68Zl8J+/c1PHQp8oqvdpNXM97fYBlG3PxppT0Tih1vXvh/OXftzQy8R1aXQV0TX/Xsw6jInsKN7STwFyqCoxwTtXueNo8Mo9BV9tT5rg5vfsqctTV/U3Lr2l8RSCgrsaw/1Y3jEOYrWM2FA3kU9Jug0seS11WvXkny/TYYpu7JnaSLJC8flT6r6otNXXIUkZ4W+Io4csz9nqaKeCQPyLuqMS06v4zWA2bUkh0esJyYvPMv6nKq0Z7QPm1vX/qeq+ikReRulLr5Uf1fVU2IoIzlYVei3nWmttvjM6dzmFzE/Y4L1zO53LmjHmk17fbUca9kFXLs/xI89O2BZXqvVAo06keSF22TTp8rfp9bz4iJyMoAfAzip/F73q+pqEZkD4B4A0wE8CeCLquq8/YYmOOtrP8C7Lt15AfCNq+dnYpwqCbyMCRb6iui6bw+GR09s5+y6b8/Y852eZzU64xbAqoNgk0MSbyvFwSGsKvSPC5q1qwUafSLJC88L8kXk4wA+hVKL9Keq2ufhae8BWKKq74hIHsBPReSHAG4A8A1VvUdE/heALwP4Z//Fz66Ft251DaIAg2gSrdm0dyyIVgyPKtZs2mv7WdktmG9tyWPNpWd7fp5VEG3J53DSpCbLli4Ayx7P0PAIHnt2gJOXZZ62iIrIXwFYD+A3AHwAwHdFZJXb87TknfLNfPlLASwBcH/5/vUAOn2WO9O87J8XANfw6OREsgtYdvcD9suTppw0yfEztnteTmTcyaFrLj3bd8LX4uAQU+6VeW2RLgewQFXfBQAR6UGpS36L2xNFJAdgF4APA/gnAM8DGCyfaQ8ALwOwrAkishLASgCYNWuWx6I2tsoyJzdsiYYjKXXQbra+3r32o6p4oeficfd52chRqzoZCRDOxoM08hpI9wM4GcC75dsnoRQQXanqCID5ItIK4CEAH7G6zOa56wCsA0rZyT2WtWEV+oq4fuNu+4W9ZWyJhieKOjhtct7yKOUpzaUZf6uJncosby23SSY/S7LaHZZX2b1/RZgbD9LIsWsvIv8gIn+P0ljnXhH5roh8B8DTAN5xem4tVR1E6TjmRQBaRaQSxE8HcNBvwbOmsvXT7Tf55Jxwdj7hVn/ubOSaJnakjx0fxapC/4TUc3c/ccDycxc4L4kC/C3J6lo2b8IhewCQzwlWLJo1linKTqX1W+grYnHPtkx1+91apJUDanah1Jqs2O7lxUWkDcCwqg6KSAuA3wbwNwAeA3AlSjP31wJ42EeZM+mmB59yvYb759PBbhnT8KiOHUBYze6Pp8K9K+1nSVblvuqyTZucx+rPjZ/MWtyzzbaVG3W+gaRyW/603suLiMgDqnqFxUOnAVhfHidtAnCvqv6LiPwSwD0icguAPgDf8lnuzCj0FfG1h/pdMzm1tuQZRFPEbmLJz9Ikt51qqwr9406GXbHI/cgQL8u3nJKRRJ1vIKnCykf6Ias7VfUpAAss7v81gPNDeu+G5TWTEwDHM84pWQp9Rdsxx5yPdZ4jqlhV6LcMjrUbNUZUXY8M8bpJwKmVe71NfW30raNhBdLMTwRFwS2TUwUnl5LLKjit3bLPdsxz+cIzJuwYcproueuJA7jriQMTsnrZHQ2yYedLdWdzqmbXcs1SDtJqPGokoVYV+l0zOQHc+plkdmcW2c2MK0qtxeojQNpbW7Bi0SzX85Vqz0Oya9Xa3R/WsSBR5xtIqrBapEwrFKIVdz6OHc+/7nrdNR7GvMgcu+Bk131vL7farFp7HR+c7jrMUz0W6TREMKd784Suu13Xuzg45CtFX5ZykFYLK5D+RUivk3nnrn4Eb73nnsmJQTT5nM6Ob8nnfCX86FzQjhvv3eM6flp5z0Ufmmb7x7jSOu66fw/WbNqLN4eGHffg+511z0oO0mpu60j7ReQpi69+ERlbj6OqP4q+qI1v6e3bPQXRGVObGURTwG5csLIts7r77uUkVy9pEFsn57G4Z5unHs3wiGJwaBgK59UCjXryZ5iY2Dkhlt6+Hc8dPuJ6HdeKpkfYZxZV/nharTWteOPosOWuqaCqW9dRH/CXRm7rSJm8OQZe0uEB7M6njd14IQDcsHE3KiuDi4NDuKE8/tm5oN1yi2h1urrKEdqrCv22u57CVmldZ3XBvRtPY6QisgjAP6C0T74ZQA7AESZ2Ds5rOjzOzqeT1XjhR/7bD1G7vWIUJ3av1Qaq6vWglcDV++Lr2LDzpViCaHUrOqsL7t14nWz6RwBfAHAfgA4A/wWlbE4UwKpCv2s6PAA45aQc7v6DT8ZQIoqD3S61oeFR27R3468b8d0StVqL2gRMCOi1qreIFvqKnrJQZbHr73nWXlV/JSK5cjan74jIzyIsV8PzekRIToCnbr4ohhJREtgFqlpeg2glgLZOzkMVeHNoeNwQg9MQQnUArHTp7WS96+81kB4VkWYAu0XkbwEcAjAlumI1Nq9BFAC+/vn5EZeG4tYksN1s4Zauzq/Ka71xdBgt+dyEPLVeg5tTS5ldf+87m75YvvZPABwBcAaAy6MqVCMr9BU9B1Fu/WxMn/zQdNvHKidLRsFtGZNT+junvfLVS7eyekyz10Daqarvqupbqnqzqt4ALo2qy1c9ZLcHgDuuns/JpQa1/zXnoKLA2BrT1pZ8qO9dHByyzBFqt521cp3TmtjqP/ZZPabZayC91uK+L4VYjkxYcefjOOZhhv4OHhPS0NxaZ+2tLdjRvQQv9FyMKSfVt/lw2mT7AFwbJAH3vfZe99Bzr70FEVkO4PcAzBGRTVUPnQLgtSgL1mj8rBVlEG1sdhmSgIlBp94u8TvvHnd8vHbc0q1L7nUPPffaW/sZShNLHwDw9ar73wbgnrKdAACzuzd7uo5bP7PBasdTRZOUMtRfv3E3Zra2oNXmfCc3tcc9W6kOnl7S33ndQ8+99jVU9UVV3a6qnwTwLICp5a+Xq04BtSUiZ4jIYyLyjIjsFZGvlO+fLiJbReS58vdpYfxjkshrED3lpBy3fmZE54J2XPGJdssM90eOjYztfy8ODkWy3bOiOkheeFab5TV299N4Xs+1vwrAzwFcBeDzAHaKyJUennocwI2q+hGUDr37YxH5KIBuAI+q6lwAj5ZvN5w5HoPo4jOnc61ohhT6itj4c/v98nGoHUJ47NkBy+vs7qfxvI5krwLwm6p6GBg71O7/Abjf6UmqegiloQGo6tsi8gxKZ9hfBuCC8mXrUTpMr6FS8S28daun9YCLz5zOXUsZs2bTXk9d7yhd8Ynx3e+sLlsKi9dA2lQJomWvwWd2fRGZjdL5TTsBzCgHWajqIRE51c9rJV2hr+hp6ycABtEGZ7Vd0u7gu7A0CaDqvLC/tqWZ1SNCwuI1kP5QRLYA2FC+fTWAH3h9ExF5H4AHAFynqm+Jy+mHVc9bCWAlAMyaNcvr2xnn9cC6O67mrqWkq7cOFvqKuPn7e8eNcVZvl4zSqAIt+SbHk2drW5php/zLGq+tSgXwvwGcC+A8AOu8voGI5FEKoner6oPlu18RkdPKj58G4LDVc1V1nap2qGpHW1vyB70LfUXPk0tcK5oO9dTByuJ2q4mioeERNMVwMI/b8d21Lc3OBe11JZumEq8t0qWq+hcAKoEQInIzXMY1pdT0/BaAZ1T19qqHNqG0yL+n/P1hP4VOIj9HJ+/vuTji0pBJbhmcDA+P2rY0s7hsKSxuC/L/EMAfAfhQ9dEiKC2B2uHh9RejtE+/X0QqUeYvUQqg94rIlwEcQGk1QKqxO08VSZ+gqZ1oouDcWqTfA/BDALdh/BKlt1XV9VAYVf0p7HMwfNpTCVPAa3eeu5YaV/WkktNBcknAJU3hcztq5E0AbwJYHk9x0sfrWlHuWmpctTk4kxxEgeS3mNPI1xImGm9292bPuSO5a6lx2Y2J5kTGJm6ckojErUlkQvYnCiasc+0zx2t3HuDkUqOza+GNquKF8mdf22o1aUQ1E1nr48RAWgcGUarmtJjdaj1pEmQha32c2LX36cM3MYjSeHY5OC88qw1d9+9JXBCt4FhpeBhIfVhx5+M47nFQlEE0O+wWsz/27ACGPeSgNYXbP8PDrr1Hhb4idjzvuuILQCkRCWWL1WL26z2uLQ5Dc06g6i0PKcDtn2Fji9SDFXc+7nnB/dxTpzARCQGIt8XXNvVkrL3qvHFnPeVz45dwV25x+2f4GEhdLLx1q6+W6NYbLoi2QJQaXcvmTQhmUSkODmHtln3oWjZv7Kyn2mGFyqF6O7qXMIiGjF17ByvufNxzOrxrFs3ignsap3NBO3pffN3z8dtBVWeXYn7ReDGQOvDTEmUQJavco3Fvx6wsa2J+0Xixa2/Dz1lLHBOlVYV+XL9x94Rz4e1OC43SwcGhzB6LbApbpBa8rhU9OSc8a4lQ6Cvi7icOTNgubGoX08zWlswei2wKA2mNc1c/4mmt6Iypzdw/TwBKwSru1aKtLXmcPXMqfvb86+Peu7rVyfyi8WHXvsrCW7firfe8tSIYRKkizgmcaxbNwv6ei7Hm0rPx5IE3xwVRAXONmsJAWuZ3hp6oIo4JnJzIuJUhVhmnFMw1agq79ih15722RDlDT7WsDo4LU3NO8G+3fnbcfVzelCyRtkhF5NsiclhEnq66b7qIbBWR58rfp0VZBjd+gug1i2Zxhp4mqN5rH4VjIzohf6hdK5jLm8yIumv/XQC109rdAB5V1bkAHsX4I0xitarQ7zmI3nH1fLZEyVbngnbs6F6C1pZoEjiv3bJv3G0vy5sKfUUs7tmGOd2bsbhnG5M5RyjSQKqqPwZQu6r9MgDryz+vB9AZZRnsFPqKnnec8Kwl8mpwKJqUebVddrfjkyuJpGvXtTKYRsPEGOkMVT0EAKp6SEROtbtQRFYCWAkAs2aFO8Fz8/f3erqOY6LZFmUdtNLakrcMxlZddqflTVaTUUzmHJ1Ez9qr6jpV7VDVjra2ttBed+GtWz0l250xtZljohnntw4GOZtpUpNgzaVnh7IjiZNR8TIRSF8RkdMAoPz9cJxvvvT27Z6WOU0SrhUl/y4+97S6n/u+kya5dtm94mRUvEx07TcBuBZAT/n7w3G98apCP547fMT1upNzgmdrlpsQeRFkHeeb5S59GDuSrJZkca99dCINpCKyAcAFAD4gIi8DWI1SAL1XRL4M4ACAq6IsQ8XS27d7CqKLz5zO7jz5Vsn8FCRJSZitxSB77a2yWHFc1VmkgVRVl9s89Oko37eW15boHVfPZ4Uh38I6avnCs8KbBwDqa9nW/luqc5zyd8NeoiebwrJh50uu18yY2syKQnWxmiGvRxK2dzrN9pO9TATSEXXOzTP31CmcWKK6+ZkJdzp4JAkz6pztr09DB9LKzg4nLfkmnrNEgfgZ21yxaJbtVtIkzKhztr8+DRtIq3d2OLnt8nNjKhE1KruxzbmnTkFOSm3Q6uxNSc5en+SyJVnDZn9yG7eanG/CX19+LsdFKTC7sc2jx0bx/G0Tl9ElOXt9ksuWZA0bSO3GdATACz0Xx1sYamj1jCsmOXt9ksuWVA3btedYD8WFdY0aNpByrIfiwrpGDdu151gPxYV1jVIfSJ22s3Gsh4jikOpAyu1slASsh5TqMVJuZ6MkYD2kVAdSbmejJGA9pFQHUi47oSRgPaRUB1IuO6EkYD2kVE82cdkJJQHrIaU6kAJc4kTJwHqYbca69iJykYjsE5FfiUi3qXIQEQVlJJCKSA7APwH4HQAfBbBcRD5qoixEREGZapGeD+BXqvprVT0G4B4AlxkqCxFRIKYCaTuA6oOUXi7fN46IrBSRXhHpHRgwf54NZQ/rIHlharLJ6uiaCQcrqeo6AOsAQEQGROQIgFcjLltUPoB0lj0p5X5EVS+K+00t6uCLcZfBgKR85nFz+3fb1kFTgfRlAGdU3T4dwEGnJ6hqm4j0qmpHpCWLSFrLntZyR0FVwz0vOaGy+pkH+Xeb6tr/AsBcEZkjIs0AvgBgk6GyEBEFYqRFqqrHReRPAGwBkAPwbVXda6IsRERBGVuQr6o/APADn09bF0VZYpLWsqe13FS/rH7mdf+7RXXCHA8REfmQ6qQlRERJwEBKRBQQAykRUUAMpEREATGQEhEFxEBKRBQQAykRUUAMpEREATGQEhEFxEBKRBQQAykRUUAMpEREATGQEhEFxEBKRBQQAykRUUCpCaQXXXSRonRAHr+y+WUc62Dmv2ylJpC++moWDzWkJGEdJDupCaREREnFQEpEFBADKRFRQAykREQBRXocs4h8G8AlAA6r6sfK900HsBHAbAD7AXxeVd+IshyUTIW+ItZu2YeDg0OY2dqCrmXz0Lmg3XSxKGPCqIdRt0i/C+Cimvu6ATyqqnMBPFq+TRlT6Cvipgf7URwcggIoDg7hpgf7Uegrmi4aZUhY9TDSQKqqPwbwes3dlwFYX/55PYDOKMtAybR2yz4MDY+Mu29oeARrt+wzVCLKorDqoYkx0hmqeggAyt9PtbtQRFaKSK+I9A4MDMRWQIrewcEhX/ebwjrY2MKqh4mebFLVdaraoaodbW1tpotDIZrZ2uLrflNYBxtbWPXQRCB9RUROA4Dy98MGykCGdS2bh5Z8btx9LfkcupbNM1QiyqKw6qGJQLoJwLXln68F8LCBMpBhnQvacdvl56C9tQUCoL21Bbddfg5n7SlWYdVDUXXcix+IiGwAcAGADwB4BcBqAAUA9wKYBeAAgKtUtXZCaoKOjg7t7e2NrKyUeGK6AKyDmWdbByNdR6qqy20e+nSU70tEFKdETzYREaUBAykRUUAMpEREATGQEhEFxEBKRBRQpLP2RBXM9ERxMFXPGEgpcpUMO5XkEJUMOwAYTCk0JusZu/YUOWZ6ojiYrGcMpBS5tGR6onQzWc8YSClyacn0ROlmsp4xkFLkmOmJ4mCynnGyiSJXGejnrD1FyWQ9YyClWHQuaGfgpMiZqmfs2hMRBcRASkQUEAMpEVFADKRERAExkBIRBWQskIrI9SKyV0SeFpENInKyqbIQEQVhJJCKSDuAPwPQoaofA5AD8AUTZSEiCspk134SgBYRmQRgMoCDBstCRFQ3I4FUVYsA/g6l45gPAXhTVX9koixEREGZ6tpPA3AZgDkAZgKYIiLXWFy3UkR6RaR3YGAg7mISsQ6SJ6a69r8N4AVVHVDVYQAPAvhPtRep6jpV7VDVjra2ttgLScQ6SF6Y2mt/AMAiEZkMYAjApwH0GioL1eCxIGRCmuudkUCqqjtF5H4ATwI4DqAPwDoTZaHxeCwImZD2emds1l5VV6vqWar6MVX9oqq+Z6osdAKPBSET0l7vuLOJxuGxIGRC2usdAymNw2NByIS01zsGUhqHx4KQCWmvd8yQT+PwWBAyIe31joGUJuCxIGRCmusdu/ZERAExkBIRBcRASkQUEAMpEVFAnGzKANN7mE2/P5mzqtCPDTtfwogqciJYvvAM3NJ5julihY6BtMGZ3sNs+v3JnFWFftz1xIGx2yOqY7cbLZiya9/gTO9hNv3+ZM6GnS/5uj/N2CI1yG+Xt54usuk9zKbfn+JX3Z23Ynd/mrFFakily1scHILiRJe30FcM5foK03uYTb8/xavSnXcKljmRGEsUDwZSQ/x2eevtIpvew2z6/SleXrrtyxeeEUNJ4sWuvSF+u7z1dpFN72E2/f4UL7eWKGftKVQzW1tQtAiCTl1hP9dXM72H2fT7U3xyIpbBNCeC52/7rIESxYNde0P8dnnZRaY0sOu2N2J3vpqxFqmItAL4JoCPAVAAv6+qj5sqT9z8dnnZRaY0qHTbs7AIv5qooaUIIrIewE9U9Zsi0gxgsqoO2l3f0dGhvb08aNSLBt1JZHyql3VwvAatZ05s66CRFqmInALgtwB8CQBU9RiAYybKkkZOFZg7iSgOqwr9uPuJA6g0w7Jez0yNkX4IwACA74hIn4h8U0SmGCpLYl79fv4AABVZSURBVBT6iljcsw1zujdjcc82yzWibutJuZOIolZZK1rbl81yPTMVSCcB+DiAf1bVBQCOAOiuvUhEVopIr4j0DgwMxF3GWHldcO8WKLmTKFxZqoNeFPqKuLtq/3ytrNYzU4H0ZQAvq+rO8u37UQqs46jqOlXtUNWOtra2WAsYl0or9LqNuz21JN0CJXcShSsLddCrQl8RN967Z0JLtFpW65mRQKqq/w7gJRGprN35NIBfmiiLSdWtUDu1gdOuoiqAxT3bcOFZbVwmRaFbVejH9Rt3Oy64FyCz9cxzIBWRj4vIn4nIn4rIhNZjHf4UwN0i8hSA+QD+OoTXTBWrbnqt2sBptZ60ojg4hAd2FXHFJ9rR3toCAdDe2oLbLj8nkxMAFI5Kd95tfc+KRbMyW888zdqLyF8BuArAg+W7viMi96nqLfW+saruBtBR7/Mbgdt4klVLsno9qVVLdmh4BI89O4Ad3UvCKyhl2tot+xyDqKAURBt9ragTr8uflgNYoKrvAoCI9AB4EkDdgZTst30CpZak3bq8ypbLOd2bLSt4Vgf8KRpO9Skngq9//rzMtkQrvHbt9wM4uer2SQCeD700GWO37fOOq+djR/cS18rJiSWKg119EoBBtMxrIH0PwF4R+a6IfAfA0wDeEZG/F5G/j654ja1zQTtuu/ycusczuf+e4mBVzyrdeQbREq9d+4fKXxXbwy9KtJK6nc0qM5Kfsp6cbxqbsGptyWPNpWdbXpuVQ8gouIW3bsUrb5/YaDhjajNuu/ycRP7+JIWnQKqq66MuSJTStG3Sa1kLfUV03b8HwyMnRkmPHDtu+ZpZOoSMgjl39SN4673xK0leefsYbvvBL7Hza0sNlSr5HJOWiMi9qvp5EekHJs5rqOq5URauWpCEEYt7tllO6rS3tsQyu13bwrzwrDY89uyA5V/3Bf/9R3jj6LDl61RPQDld19qShwgweHQYM1tbcPDNIVh9zE0C/Pq2i0Mtv5fn19maYdKSiM3u3uz4+P4e97rS4OpOWvKV8vdnAHTVvODfBixUbExum7RqYVa3DqtbnABsg2PttU7XDQ6deMxpsf+olsrnFgS9lt/qddLUG8gytyBKzhwnm1T1UPnHD6vqi1Vf+wGcFXnpQmJydtvLovvKVlAvCR/CTgzh9lp+yu/1+VlObkGNyTGQisgflrv180TkqaqvFwA8FU8RgzM5u+211XtwcMjXta0t+SDFGvdaQR53u45JVBrDjKnNpouQaG5d++8B+CGA2zA+O9Pbqvp6ZKUKmcns8k6L7muvA5y74tXXdi2bh6779mB4NFhibqdWeaGviCabM3i8vk6Qs6YoWoW+ItZs2jtuKMjKKSflONHkwq1r/6aq7lfV5TVd+9QE0YrOBe3Y0b0EL/Rc7Gmxe1ic9sZXVFrHF57lnl2ocm3ngnasveo8tJcDktNMTEs+h2sWzbIsx9Fjxx3znnoJojxrKn0KfUXcsHG3axAFgKduviiGEqUbTxGNmFVr2G7W227cMCeCUdUJLenqNajVM+Pvr5m1rzyn44PTJ7RA3jg6bDn5Yzc2WlmD6nXWnmdNJdOaTXsx6uE6ztR7w0AaA6/HEduNG46q4gWXCm0VsFZ/bvzi/Eqwrm2FVCZ/qq91Kovftac8jjlZCn1Fx5aoAK71jcbjccwJEmR1gdcM+14nf7iPvzFV6okTfsb+MZAmiN2eZi9jp16XGXkNkBzbbDyVDPduy9n4GfvHrn0A9ezYcXpO54J23Nd7ADuePzGXpwAe2FVExwenO76215Zm17J54xbIA+55Tzm2mX5Lb9+O5w4fcb3uGiYiqQsDaZ287Nix2lr5wK6i7XMKfUX87PmJCyKsxjBrX99umVJtS9NPgOTYZmOoTUJihXlFg2EgrZNTV7oSFGsDrdVxDdXPccpEXtuyrH19qyBq1xVngMyOVYV+1yDaks/xOJqAGEjrZNeVLg4OYU73ZssWoluQdNrtU9uydFqeZLVUirKn0FcclxfBSk6EQTQERgOpiOQA9AIoquolJsrgZ5zTS1caKAVMLwvZq69f3LMN72/J2y5LOfTmEGZ3bx5bxxlkqZSVpOZrpfoU+oq4buNu1+vYnQ+H6RbpV1DKLHWKiTf3k5nIS1c6iOLgEPI5QRNguVC6shO0kkt0SnMOR45NbJHWs3SFGZoaj5cgOvfUKfx8Q2Js+ZOInA7gYgDfNFUGP5mJvGRBCmp4RD1n3Tx6bCS05UnM0NRYrLb81poxtRlbb7gg+sJkhMl1pHcA+CqsG2AAABFZKSK9ItI7MDAQegH8ZCaKK1uR1xwkCgQ676kaMzTZi7oOhm3FnY97ao0yCUm4jHTtReQSAIdVdZeIXGB3naquA7AOKGUnD7scfjITec3iFJeciK/Zd6cxUGZoshd1HQyTl2VOQGmtKIXLVIt0MYBLRWQ/gHsALBGRu+IuhJ/dO16yOMVp+cIzPF/rtn2Uu5jSb+nt2z0FUYDndEXBSCBV1ZtU9XRVnQ3gCwC2qeo1cZfDz3HItdfmxN8RQtMmT0zEnG8STGl2D86Lz5w+9n45EVyzaJavXwa3MdCgx0KTWSvufNzTriWA2ZyiYnrW3ji37rFdl3iOzzNu3rOYqBoeVQxbzLxXa29twd1/8Elf71XLbkii+v4wFulzCVX8vJ61dMfV8/lZRMh4IFXV7QC2Gy7GmOpg0Do5j3fePT6Whb56WZDfMdOjw16yP1o879hxzOneHCgw5WzWvPptVTvhEqr4eQ2ii890ztNAwTH7U5XascQ3jg5POMqj0iXuWjYP+aboTwh+4+iwY1o8L+zWvIa5FpZLqOK19Pbtnq6bMbU5cI+G3DGQVvG6VrQ4OITrN+7G8ZAX5bupNzC128y+291fDy6his/CW7d6GhOde+oULnOKCQNpFT+/9Aog5jgKwLmMhb4iFvdsw5zuzVjcsy3WWXkmgo7Huasf8TQ735wTLriPkfEx0iRJ0lpRgXWSk8lVs/xu47nXb9yN6zbuRntrC674RLvnc5bq4TXPKdVv4a1b8dZ73nbX/e2V50VcGqrGQFrFKhjkc4IpzZM8nbYYlkpikg0/fwkjNWO0R46NYHb35gmB9o2jE8tXebw4OIQHdhUdlzTVHs07bXIeqz93NgDvuUu9Xkv+eZ1YAjhDb4Koif5pHTo6OrS3tzfy97FawtP74uuu6cjCUp0bck73ZtvUe/Vob23Bju4lE+4v9BXRdd+eCRNruaZSEpXq+w3mrox+Zs9FXHWwFoNoYtjWQbZIa1itp7zx3j2xvf/J+RPD1mH/ibMbX127Zd+EIAoAI6OK2o6kXbZ+Mo/HhJjDQOpB2CnznFSfM2+3/rNedhM/fmfWORMfnxV3Pu7purmnTuHWT4M4a+9iVcH56NooVFp9fvbTA6Xx3NaW0lbU2j6I08SP35l1zsTH49zVj4w7CNHOKSflOENvGAOpg1WF/tjGRmsdHBzCLZ3n4JpFszztQJo2OY+1V56H3as/g/09F+MbV8/3vHfebnNBrkkm3M+Z+Hicu/oRTzP0M6Y246mbL4qhROSEXXvY7xHfsPMlY2WqtPpu6TxnrMu2qtCPDTtfGtfdb7eZHfezd75yXZBZewrP0tu3ewqifpPXUHQyP2tfu0ccsF/DWa2S0zGKFms+J1h75fizdKzKmYTTH2NMVJKJWXuvM/QMokbY1sHMd+2ttoV6+dNy1xMH6g6i+SaMdbunTc5P/BAsCpDEvexueU7JHz9JSBhEkyXzgdTIDLQIupbNwws9F2Ny86QJZ60Mj+qEAJnEvexJDO5p5fWPzykn5ZiEJIEyH0hNzEAPj5wIlF4DZBL3sicxuKeR16OTTzkpx4mlhMp8IDV1hEhlT7/XAJnE40CSGNzTZlWh31MQBcAgmmCZD6TVx2zEbf7NP0JxcMjTms8kHgeSxOCeJn6W1/HAumTLfCAFSkFqR/eS2KeFK0uNFCemA93OjepaNg8zW1twcHAIa7fsMzqxk8TgniZegyh3LSWfkeVPInIGgP8D4D+gdK79OlX9H07PCWPpSW3aOVXgzaFhTGoC6jwJJBICYEV5eYtTqrxarS15rLn07EYNZA21/InLnFIpcUlLjgO4UVWfFJGpAHaJyFZV/WVUb1i7DrM67VySgihQaqHe9cQBvDDwDp488KZlma0MDg2j675SgpUGDaYNgcucGo+p45gPqeqT5Z/fBvAMgEh/870eI5IkO55/3XeZrZZOUXJ4TUJyck64zClFjI+RishsAAsA7LR4bKWI9IpI78DAQKD3ydKSnCz9W6MWZh1ccefjnpKQzJjajGdv/Wyg96J4GQ2kIvI+AA8AuE5V36p9XFXXqWqHqna0tbUFeq8kLckJ8xhkK0n6t6ZdWHVw6e3bPQXRxWdO54F1KWQskIpIHqUgereqPhj1+5laL1or31Q6RsRLWRafOX3CddWp8uxen8uPkmXp7ds9nfoJgN35lDIy2SQiAuBbAJ5R1dvjeE+rDEdxq55V7/jg9HGz8UfeO45jI6XZeLtZ+9qkIIW+Im7+/t6xSagGn7VPpVWFfs9BdH/PxRGXhqJiavnTpwD8BEA/MLbV/C9V9Qd2zwlz6YnXsSo/nDJG2aW6cxNjZqU0SN3yJ68L7ptzgn/jmGgaJGv5k6r+FAZ+MVYV+nH3EwdCPwsJKAXRfE4wPDLx1StZkQDvy5Jql2vV8xpkjp9dSzw6Of2Mz9rHpVKxo2x/T2meZLvV1G9WJGZWSjc/Wz/5hzH9MhNI48h2/+bQsONEj59lScyslF4Lb93q6Tpu/WwcmTlqJKzTOO2670Bp2ZFTi9HPsqSZrS1jGaLqeQ2Or5qx9PbteOXtY67X8cC6xpKZFmkYazenNOew9srzcM2iWbYZm5xajH6WJQXJrMTM9Wb4maFnSrzGkplA6vdoYyuVXCG3dJ4z7pTOaZPzOGlSE67fuBtNNgF72uS8rxZhkMxKHF+Nn5/JJS5zajyp79q7rbOsPPb+ljzyAbM8VYJR5YTOzgXtE2bXrYYQWvK5sRM5/fBzEmg1jq/Gq9BX9BRE5546hd35BpXqQOq0RAjAuMfCWoRfO25plwwlJ4JRVSPjk0HHV8mfGzxkuJ8xtZlBtIGlOpC6dWG9ZE5qb23BhWe1YfNTh8btEHrr3WFYpf2sHWu1a+WNquIFQ124rmXzLI9u5tbRaLh1ctgSbXypDqRBu7ACYEf3EgCYsAzFLmdkbdc9ia2/SuuXs/bmMTFzNqQ6kLoFMavHrK6z0m7z2rUL7pPa+qt3fJXCxSCaDametXdaIuSW7ckt2HldfsRzi2juqVN83U+NJ9UtUi9d2OpZexFg8Oiwp66un+4xW3/ZtvWGCyakyuO4aLYYyf5UjzCzP1EqpS77EzUc2zqY6q49EVESMJASEQXEQEpEFBADKRFRQKmZbBKRAQBHALxquix1+gDSWfaklPtVVTWaMqlcB180WYaYJOUzj5vbv9u2DqYmkAKAiPSqaofpctQjrWVPa7mpfln9zIP8u9m1JyIKiIGUiCigtAXSdaYLEEBay57WclP9svqZ1/3vTtUYKRFREqWtRUpElDgMpEREAaUmkIrIRSKyT0R+JSLdpstjR0TOEJHHROQZEdkrIl8p3z9dRLaKyHPl79NMl9WKiOREpE9E/qV8e46I7CyXe6OINJsuI0VHRPaLSL+I7BaRhszQIiLfFpHDIvJ01X2Bfj9TEUhFJAfgnwD8DoCPAlguIh81WypbxwHcqKofAbAIwB+Xy9oN4FFVnQvg0fLtJPoKgGeqbv8NgG+Uy/0GgC8bKRXF6UJVnd/Aa0m/C6B2YX2g389UBFIA5wP4lar+WlWPAbgHwGWGy2RJVQ+p6pPln99GKSi1o1Te9eXL1gPoNFNCeyJyOoCLAXyzfFsALAFwf/mSRJabyA9V/TGA12vuDvT7mZZA2g7gparbL5fvSzQRmQ1gAYCdAGao6iGgFGwBnGquZLbuAPBVnDjP7TcADKrq8fLtVPy/UyAK4EcisktEVpouTIwC/X6mJZBaJVRN9LotEXkfgAcAXKeqb5kujxsRuQTAYVXdVX23xaWJ/n+nwBar6sdRGkb7YxH5LdMFSoO0BNKXAZxRdft0AAcNlcWViORRCqJ3q+qD5btfEZHTyo+fBuCwqfLZWAzgUhHZj9LQyRKUWqitIlI5kibR/+8UnKoeLH8/DOAhlIbVsiDQ72daAukvAMwtzyA3A/gCgE2Gy2SpPK74LQDPqOrtVQ9tAnBt+edrATwcd9mcqOpNqnq6qs5G6f93m6quAPAYgCvLlyWu3BQeEZkiIlMrPwP4DICnnZ/VMAL9fqZmZ5OIfBalFlIOwLdV9VbDRbIkIp8C8BMA/Tgx1viXKI2T3gtgFoADAK5S1doB70QQkQsA/LmqXiIiH0KphTodQB+Aa1T1PZPlo2iUP+uHyjcnAfheUn/PghCRDQAuQClt3isAVgMoIMDvZ2oCKRFRUqWla09ElFgMpEREATGQEhEFxEBKRBQQAykRUUAMpEQ0joi0isgflX+eKSL3uz0n6xhII1BdER2umS0iv+fhtWZXp/uyePxLIvKPNo/9rPY1ROSCSoo8IhutAP4IKO10UtUrXa7PPAbSaIxVRAezAbgG0iBU9T9F+frUsHoAnFnOSXpf1R/hL4nIwyLySDk38GrD5UwMBtJoVFfEteWvp8sJc6+uuuY/l6+5vtxq/ImIPFn+8hMEz7Cq3CLyTpj/KMqMbgDPq+p8AF01j50PYAWA+QCuEpFGzVnqyyT3S6gO3QA+pqrzReQKAP8VwHkobUn7hYj8uHzNn6vqJQAgIpMBLFXVd0VkLoANALxW0vMBfAzA0fLrb1bVhsxuTsZtVdXXAEBEHgTwKQCZr2sMpNH7FIANqjqCUoaZfwXwmwBqU+vlAfyjiMwHMALgP/p4D1ZuikvtnnLuMQcDaRyscnpauR6lBArnoTTk8q6P92DlpjC9DWCqzWNLRWQ6gCGUssj/fmylSjCOkUajuiL+GMDV5UPl2gD8FoCfY2JlfT+AQ6o6CuCLKGW58mpp+fCuFpQq946g/wDKrnLvZkd5kmltzcM/BfB/AewG8ACHkErYIo2Aqr4mIpWK+EMATwHYg1JL8auq+u8i8hqA4yKyB6XDuP4ngAdE5CqUcoAe8fGWlcr9YZRSn7FyUyCqarei5LCq/kmshUkBptEjIk9E5EsAOhhIJ2IgJSIKiF37lBCRZSidMV/tBVX9XRPlIaIT2CIlIgqIs/ZERAExkBIRBcRASkQUEAMpEVFA/x9pGpRym3wSSAAAAABJRU5ErkJggg==\n\"></div>\n\n</details>", "_____no_output_____" ] ], [ [ "# Câu 7: Tạo Pairplot với một scatter plot \"total_bill\" và \"tip\", sử dụng palette color = 'day'\n# Bạn nhận xét gì về biểu đồ vừa tạo\n", "_____no_output_____" ] ], [ [ "<details>\n <summary>Nhấn vào đây để xem kết quả !</summary>\n \n<div class=\"output_subarea output_png\"><img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZYAAAFcCAYAAADri5hEAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOydd3hb5dmH7/ccLdvynkkcJyF7MBtmoWUUygelYbRQSlugzEKhrDLbQvt1kH6lUAplj1D2CAlhpEBIIEATEkbIdBzbsWPHtryHtnTe748jy5Ytx05ieSTvfV265HN8jvTIPtKj9xm/R0gpUSgUCoVisNCG2wCFQqFQ7Fsox6JQKBSKQUU5FoVCoVAMKsqxKBQKhWJQUY5FoVAoFIPKqHEsp556qgTUTd06byMCdV2qW5zbfs+ocSwNDQ3DbYJC0Qt1XSoUvRk1jkWhUCgUowPlWBQKhUIxqCjHolAoFIpBRTkWhUKhUAwqluE2YF/jk/Xwzir41iFwyuHDbY1CodgVhjRoCDYTkEFswkqONRNNqO/be4v6Cw4iK9fBOb+Be16CH98FC5YOt0UKhaIvDGmwxVvKGZsu5ch1Z3HGpkvZ4i3FkMZwmzbqSbhjEUJsF0KsF0J8JYRYG9mXJYR4TwhRErnPTLQdicYw4I7HoMMLmU7wBeGOR6GybrgtUygU8WgINnPx1pupCtQCUBWo5eKtN9MQbB5my0Y/Q7ViOUFKeYiUcm5k+1ZgmZRyKrAssj2qWf4lrC8Dhw00DVKToaUD7ntluC1TKBTxCMhg1Kl0UhWoJSCDw2TRvsNwhcLmAQsiPy8AzhwmOwaNt/4LwRDYrea2rpkO5sVlUN8yvLYpFIre2ISVQltBzL5CWwE2YR0mi/YdhsKxSOBdIcTnQojLI/vypZQ1AJH7vHgnCiEuF0KsFUKsra+vHwJT94xQCJZ8AkKYt06S7dDugUUrh882xeAzWq5Lxa7JsWby1LS/Rp1Loa2Ap6b9lRzrqI/MDzsi0RMkhRBjpZQ7hRB5wHvANcAbUsqMbsc0Syl3+d+cO3euXLt2bUJt3VO+LoXjrzFXKA5b7O9aOuDYA2HpPcNj2z6M6P+QxDOSr0tF/ySoKmxEXJvDScLLjaWUOyP3LiHE68ARQJ0QYoyUskYIMQZwJdqORLJqoxkGcyb3/p1Vh8+LobYRCrKH3jaFIhFIw8Bw1YPfD3Y7Wl4uQtu9D+RElvoO9LE1oZFnU2/MwSahoTAhRIoQIrXzZ+AUYAPwBnBh5LALgcWJtCPRfLbZjPfpcf6adhv4g7DsiyE3S6FICNIwCK3fSNNRJ9AwcRZNR51AaP1GpDHwMt1ElvqqMuLhJ9E5lnzgYyHEOuAz4C0p5VLgbuBkIUQJcHJke9Sydkvfa19dA0PCii+H1CSFImEYrnpa5p2HUVFpbldUmtuugeebElnqq8qIh5+EhsKklGXAwXH2NwInJfK5h4qmNqhuAEsff8nOhP7KdWavy25GCxSKkYffH3UqnRgVleAPDPghElnqq8qIhx/1MbeXbCg38ytWve9jbBaz5HjrjqGzS6FIGHY72oSimF3ahCIz7jtAElnqq8qIhx/lWPaSjeUQCoNlF47FaoFACNZsGTq7FIpEoeXlkrH4pahz0SYUmdt5uQN+jESW+qoy4uFHiVDuJeU1Zn5lVyEuXQMpzc58hWK0IzQNy4GzyVq13Ax/2W27XRWmCY0ZSZNZMuvxQa8KS+RjKwaGcix7ybYqMzm/K4QwHcsXW4fGJoViIOxNybDQNPSC/L16/kSW+qoy4uFFufC9pLgyfplxTyy6GTYLqPyhYgQwGCXDCkVfKMeyF3R4oK55YI7FagFfALZU9n+sQpFoBqNkWKHoC+VY9oLyGggboO8icd+JRTc1xdZtS7xdiv0bQxq4Ao1U+WtxBRrjNwYOQsmwQtEXyrHsBWU1EA53rVjmJn3CzzL/xVhr72WJrpnd+SqBr0gkA+46H4SSYYWiL5Rj2QvKqs0Vi0WHeWkv8FTRGfy24CaeLTqVMZaqmGM7VY/Xlw6DoYr9hoF2nQ9GybBC0ReqKmwvKKsxHUaWpYGb8+/Arvlwh1MotFVwc/7tXF/9TMzxmmbmWKSMlddXKAaLgXad723JsGGAqyV6KnkZSlVC0YW6FPaCbVWmkzg7/TlyLXW4w05CWAlJCyc732SGfX3M8RbNnM9S1zRMBiv2eXan67yzZFifMB69IH+3nMqGcjjmF3DAeeb9hnJzv0IByrHsFeU1YNEMzs74N0gIRxaAXiMJu+bjrPTnYo636GaXfklVvEdTKPaeoeg6d7XAmbdDRWRhVFFrbrvUpFRFBBUK20O8fmhsg0OcXzHRtg2/7Ep6SjQMKTg97VX+Vv8HgpHf6TqE/LCtGo7rJc2pUOw9Q9F17g90OZVOKmpVj5aiC7Vi2UN2NpiJ++PTlmETAQLSHvN7v3SQbanniOSuucRapAO/tHqorVXsT3R2nRfaC8izZQ+6lIndBhNio21MKACb0nhURFCOZQ+pbjBjyiekvQeYq5TuBKQNiwgyY9wbyG89jDzsVbB5AaVyrBjd5GXAoj91OZcJBeZ2Xsauz1PsP6hQ2B5SXQ/JtDEn+UuCMt6fUfBZdgqLDlsF1nUgBUz9EO25u9m0PWXI7VUoBhOHHR68HpId4PGZ2wpFJ8qx7CE7G+CQpDXYhZ+A7N1U1mrVuP3g8UzyurG5U9iZYoXCDSSd8BR1H/ySNjekKf+iGCIGY0Z9J64WOPWm2DzLhAL49CEoyBokgxWjmiEJhQkhdCHEl0KINyPbk4QQq4UQJUKIl4QQo67dt7oBDk/7DIsIEZS9g8uvHJCKy2Hhjk2VzG1rhbANDA3LIW9iZJVRtnMYjFbslwya4KRhQEctObKC566v5ahZXeer5L2iO0OVY/kVsLnb9nzgXinlVKAZuGSI7Bg0qlxwaPKayFZst2O7RfDaJCcGkBUMclhbpOs54AA9iDZ3IZWuITVXsR8zKIKThgGu9fDIUVjum8jRnxzFol+tjzoXlbxXdCfhjkUIUQicDjwe2RbAicCrkUMWAGcm2o7BpqLG4OCUdYRl7z/hh2OTabdpOMKSDWkpHNrpWNDA0LHM+IhNqktSMVQMhuCkxwXPzYOWCnO7pYLct+bxtwtdZvL+z4lJ3g9IUFMx4hiKFct9wM1A5xWRDbRIKUOR7SpgXLwThRCXCyHWCiHW1tePHDlvwwBLWxlOvZ1QnDDYu4XJGIAuYX16CuN9HnICfgCEYQEtxKrQh0NstWKwGKnXZZ8MhuBkyN/lVDppqeDwqX4+fQjmTBp8SZcBC2oqRhwJdSxCiO8BLinl5913xzk07gxGKeWjUsq5Usq5ubkjRxyvsQ2mWL6O5Fdi6x9cDp31WXYshkQAX6c7EdC1apE64eYxFCcvH3K7FYPDSL0u+yQ3m9TFz8cITqYufh5yd2PCosUOGRNi92VMwGK3U5CVGJ2wgQpqKkYeiV6xfBP4vhBiO/AiZgjsPiBDCNH5iVwIjKpUdnU9zHCsR2BEZVw6WZXnIKAJbJF5xdtSkvDpgsPauvQuQq4JtKWUUukfVS9bMUppDLdytf15yt+/H2/xMsrfv5+r7c/TGG7tdaw0DMK1dYQrKgnX1nUl+JPz4ILFXc4lY4K5nZyXMLsHKqipGHkktNxYSnkbcBuAEOJ44CYp5QVCiFeAH2A6mwuBxYm0Y7CpaYRp9s2RMFjsAmx1ngMJaJE1WFgT7Khx8N3XXchxGi/PG8cmdwaGAR80reaiMWcNuf2K/YuADLC0bSVLWRmz//fyhpjtzuqxzkR/p5S+5cDZZmly3oFwxSozLGaxm04lgZLGnYKa3Z1LX4KaipHFcHXe3wLcIITYhplzeWKY7NgjaptgqmMLPSN4AQ3W5jrQZJe7OfrjdsYvCJG9I8iZS2t44LZ1zHaVEWoYz9L61UNuu2L/Q6DFVTwWPb4U9Vs9pmngLDBXK86ChOvkD4WgpiIxDFmDpJRyBbAi8nMZcMRQPfdg09rQQq61DosIEerWHLklw4bXomGNhMEymkJc+WAdugZp5/pofTuFzB1B/vjyIn6cfB1fTXyP9rCbVF11SioSh47GPZPu4MbyP1EVqKXQVsA9k+5A7/m9coSNKx4KQU1FYlD/oT2hsRgNiUHssPuvs+yEBFgijmXe601kNoXwpAiEBez5Br4kjaK6Fi58azP+cIgvOzYOxytQ7EdoQuPJ2pe5q+g6Xp3xL+4quo4na1/u/QE9AscVJ1pQU5EY1H9pD7C3FqMJg1CPirB12aZgkgY428Oc8k4rUkDQrxH2CRw5YUIWDYTkh5s+IHtngLUd6+M8g0IxeORYM7lp/GXcVXkfP9hyFXdV3sdN4y/rFVJS44oVg4XSCtsDnO5i/MKGRQtH94UEbMiyR5P231zZjrMjjN+uAQJfg05Stnm8L0nD4gnz/Zc8fDL7c24YN+qEBxSjiIGGlPobV2xIg4ZgswpLKfpFOZY9IC+0hWaRSa6tMbpve6oVjy6iYbBvL29DSAhbzASpr1EnZZwfzSIxQoINzgP5n+VfssRVTMvUNjIsacPyWhT7B50hpf7oHFfck85mxc6+klPTjuMfaVfjCArEXopaKvY91JWwmwQCkgn6FtpDqTH7N2fYCGkCi5Tk1QaZtsVLyNJVdeNv0EGAPbJq2Zg+kySP5IS3a1U4TDHi6d6s+I3kWfzZfQbeY/6HtvMuJPTFV4RLy/FX1RIOqa54hXIsu01DTQMZelOviZHFGWaCU5Nw2NoObAFJ0NrlWHwN5p86Kcd0LJoW4ouUIzl9cT2ft6wbIusVilikYRCqbyBcUUmorJxwbW1c1ePuzYq/cZyPds4V6AX5pPzpTtquup7GaQfTduyJBL7eqJyLQjmW3aVtRwnQW4NmU6YNidm/MvczNwCG3uVYwn6NYIeGI9uUSMsPVvNmzlnk1QVp/GDpEFiuUMQiDYNQSSnhDZto+vapNE4+kKajTowrqd/ZrAiQZ6RiVFSSfMsNtF1yVUzfS8fZ5xGsHZh+mmGYPWEVteb97qr4K0YuyrHsJr7aMgAcmi+6z69BhdOKLiV2n8Gc9R4Mrbckmr9RwxFZsRSEqvk0/Tj8upXpb26kKdhbXkOhSCSGqx6jtIy2i6/sV1K/e7OiS2tHm1CElpUZt+9FBPrvezEM2FAOx/wCDjjPvN9QrpzLvoJyLLuJ0VROUFpJs7RF921PtZr5FQOmbvVh90uCcVQnfA06VqeBbjcYE6wipFkpSZ/E3E+b2bhj1RC+CoUC8PsRKSkDaorsXlk2sfBg0ha/iHS74/a9SFv/fS+uFjjz9q4plBW15rarZdfnKUYHyrHsJpbWMnb4xpNp6XoHlKVaCQnQpWT2eg+WkIxJ3HfiazQbKh3ZYdLCLTgML+v0b2EJGjS/8vKQvQbF/klPgUnstj6dQ7ymyM7KsjxHLrYD56Af/g0yFr4Q0/fiXPgS1oL++178gdjRxqCmUO5L7LLcWAixhD4k7QGklN8fdItGOCmeEsqCeRyQXB7dV5ZmwxBm4n7O1x5zp4gXCtNBgiMnjHunlfxQNZuSDqWuwEr6ko9NBTWFohuD1TsSV2DyP4vRJh9A2lMPR8NhA22KlGg06DnoE7NwrlyOFgwgbTasBbnolv7ts9vMqZPdnYuaQrnv0F8fy9+GxIrRghEmLVhBc2hqzO6SdPPdkOQ1mFbsI6zHGzkDRlAQaO2WZwlWU2sZiyvTykEbXLhLNpMydWZiX4Ni1NCzd6RThHFG0uTddi5xBSa/O4/MNR+hZ2WS9eFSZNhAJDvQ8vJ22ZPSmR8xQ1kaEwryWfRnmDO2ty6lNAwzX+P3m5IxkX6XvAxz6mRnOCyRUygVQ88uHYuUUo057E77ToQRpCPsjO6SwLY0K7oBU4u9vcqMe2I2SoYBSUGwmg1Jh6GFQSLZ8coCZtx+d+Jfh2JU0NegqyWzHh9Qs2MMfQhMCo8HvUcorCc9nUOLI5czb9d65Uc+fQgKsmLP60uGX9M05kwyzwkEzZVKXkbCBZMVQ8Qu/41CiPVCiK/7ug2VkSOGpjIMA4LdFI3rknQ6rJqZX9ngxRKWhHbhrn0NOrrDwJIiyQ9W49ZSyd1hY9PMJIzX3kLKPiOPiv2MQR101YfApAyF4/atdNLpHJqOOoGGibNoOuoEnOUbGZMZe068/Eh/MvyaZjqionwSNoVSMTz0Fwr73pBYMUoINZRhSEH3z/6yVCthIbCFDaZv8ZoJqTj5lU58DWYCPyknREFjNQBtoXFsmV7DIa/XEFq/EetBcxL4KhSjhcEcdKXl5ZLx+ou0nPWj6Ooh7Yl/0XbjbaQ//I+4Mi4Q3zm0nXUej738AdusVrKygjQ1WXnqtUxs1pEtw68YOvoLhVUMlSGjAXdNOV7DQYrFE91XlmYlrIHVJ5lW7MPo51uXv1lDGmZlWEGt6VhqrePwOOrw6WG8Cxcpx6IAunpHeuZY9mTQldA0RH4uqffON/tPmppx3/F7gqvXwD/+r+8T+3AOk/I8XNxyI1V1pl2P3fJXcpInExMEiaySup8/3DL8iqGhv6qwj6WUxwoh2iHaWB69l1LuV8qJofoydviKyLZ2iU+WpVlBwviqAEkeo8/EfRRD4G/SceSEKQhWAVBrGccBFav46uBksl55hdTf3obQ9V0/jmKfZyCqxH0lx+MhNI3262/ZvQ/6PpxDcbgqJvdzWamZ+8kVmVF7ZHIyGYtf6pVjUTL8+z67/H4tpTw2cp8qpUzred/fgwshHEKIz4QQ64QQG4UQv4/snySEWC2EKBFCvCSEGBVfYbSmYir9ReRZXdF9pamm6VO3+vrsX+mJr0HHkR0mVbaSZLipsY5j+lYvHx+dSqimhuCazxP2GhSji10NuoqX/4gnxxIlN5u0xS8OaN5Kp9yKS88lbVHsjJa0xS/wO+8Cfpx1OpvyFlCW9QzvZszHbugx9jQf/i1wOMha9QE52zeTtWo5lgNnKxXk/YAB/4eFEIcJIa4VQlwjhDh0gKf5gROllAcDhwCnCiGOAuYD90oppwLNwMgfSBIOYnVXUekvIsfaAEBQQHWKBV1KppSYEi/9hcLAdCyaVWJPNygIVlNnHUdOU5jKIjs+ESLw9n8S+UoU+wiGy9VHctzV+1hpUOwv5yr7s5S/fz+BrStI+vQdtDkze33Qd5dbKfyBxo9fmY1jxXKyI87BPaOIo1IO4Y+tpxE4/kxaph5K4PgzSd5YjufZl3qVNINAnzAevSBfOZX9hAH9l4UQvwMWANlADvC0EOI3/Z0nTToim9bITQInAq9G9i8AztxNu4eetiqkEYp03TcDsDPFQlAD3YBpW7wIyS4T9534G80/uyM7HHEsYwGYUOln3RwHvkVvquowRb+Efb64+Y+w39fr2M7S5aVtKzmr6bcc1XwNJ7muozHcW6Oup9zKoo81Dr4ln4ZU0zlk2NK5SZ9H2zk/iXUi51xA8sU/6WVPyOfFFWjEkEoIbH9hoF8fzgcOl1LeKaW8EzgKuGAgJwohdCHEV4ALeA8oBVqklKHIIVXAuD7OvVwIsVYIsba+fmCKqQmjqRTDkLSEM9GE+aFf6TQrwpzuEEWVgQGFwQACrRpGUJCUEyI/tJMaSyESmFnsY+U3UwlWVhJap2a0jFRGynUZtlvilhCHbb1TpwEZGHDpcn9yK5rQ0ILh+H0xltjcoDahiHWhbZyx6VK2eEuVc9lPGKhj2Q44um3bMR1Ev0gpw1LKQ4BC4AggXmt53K/nUspHpZRzpZRzc3OHOeHXXI5hgM/omsOyI8VCWIOpJX4swV33r8Qi8NXrJOWZKxavlkybls70rT7WHJZCQAbwv6Wk9EcqI+W69GalwGuP9dDqepZAZmqMJH1jq4EtFIzK3nfSV+lyp9xKd3rKrQibNb7GmM0eY4/x2iP80fdCtLmzIdg8CK9cMdLpr0Hyn0KI+zFzJRuFEE8LIZ4CNgAduzq3J1LKFmAF5monQwjR+TFcCOzcXcOHnOZy2kNObJo/uqvSaUVImFzmwxKW/VeEdcPr0rFnhBnPdsAsOS6sDoAmqCi04lfhMEU/ZNjSCc6cRNr7b5D58Xuk3jsf7x/+SnJxJWXVRlSSvqLURdbHD/DE5D9GnUuhrYCnpt4dt3S5U26l07nEk1sRYwrIWPh8bCHAwucR48aQtWo5GeUbKH//fi6zP83nnk1A1wrJkAauQCNV/loVIttH6e879trI/efA6932rxjIgwshcoGglLJFCJEEfAczcb8c+AHwInAhsHg3bB4emkqp8BWRY2mK7tqeakECE8v9SEDGmcHSF16XOap4etpWAOqs45ju38T0rT4++qaTGa+UEt5SjGXmjEF+IYp9BU1o5LdpNH3n+zFhqZavvqb5meVU1JpNj3mZBlvHn8Xfdy7grqLryLFmkqenMc5Ijqs5pmn0K7eiWSxw0ByyVr6LDAQRNitiTIG5vyCfxkAj12x6MG5z52DpnylGLv01SC4YyIMIIV6TUp4T51djgAVCCB1zdfSylPJNIcQm4EUhxB+BL4EndtPuIceoL6bUdwi5NjOmbgAVqVZ0CZNL/Ls+OQ6+BlPpuChrJ7ihxloIwPRiL0u/m8Glz7Xgf/td5VgUu0T6A3FzHZn2ru72QLrGxZV/pSpQy39aVwLmh/ySGQ+R1+PxjJBBqN2FZvjJ0uxYsvPQ+lAr1iwWGF8Y93d9NXfqaIOnf6YYsQw4K9APB8TbKaX8GuhVmiylLMPMt4wOQn5kazWVvjPJjpQa1yfpeHVBki/MuOrAboXBAGRI4GvUSc9zk1LaTp3FrAybsdXHS+daaErTsC15m5Qbrx30l6PYNzCkgc8q4zYwNvu7WsMafZKqQC3fSJ7Fbxznk2ek4tLaMRAY0oiuFIyQgVG3HtuL86ClAjImEPrRYsg/sJdzMQyzeswfMHMyvVY0fTR37gy4Bk//TDFiGay1576dDGipwAiHqfAVkWs1VyyVKRbCQpj5laAkvAeN8j6XTlJOmLGhSmqtZmHc5DIflpDBxtnJhL7eSHi7UtVRmPTMTTQEm/lV24MYrz3So4HxRTIn5UZzJI3NNk5NO47H/Bcx6TvXkjT9JCZ951oyimup9ddF8xyhdheWTqcC0FKB5cV5hNpj+2IGOlY4XnNnp/5Zd/ZU/0wxclFBzYHQXIYRluzwF5ETkXPZ4bQSFnBAmR99NxP3nXhdOkKXHJm0KhoKs4bMx1x2fCrS78e/9L1BfSmK0UnnbJYzNl3KkevO4oxNl+I2PCxtW8ll9qcpf/9+vMXLKH//flqnj+GAcRqfPgTlL8M3CjP5R9rVaOdcEdN30j7vfKqqNkZLgYUwupxKJy0VaEZsqHdvxgp3hshiigj2UP9MMXIZrFDY7n+qjiaayggbkgr/BHI6VyxOCwI4oNyPkAPruO+Jp85c5hyZ8RmvhC6IirBN3+rl7dOyMITE//a7JF956aC9FMXoJN5sljLfDgptBXzu2cRZnt8CkdyJ9nhUkt5EI9QicMfJxeQZqdE8xxvTHyJv+jyM6ReBNQuCTYj6T8CrE66ojGqR+QNds1iOnG1w62XNZGUFCdmtGHLXEy4Hon+mGP0M1n9z3x6q21yOx0imPZxKtsVcsVQ6rUhgckTKZSAd9z0x/Br+Jp3ZWZvwCwetuvmtbcZWH2ENagtsBP+7GmO4m0MVw0682Sz3VT/JE1PvHtC3f9HHPBaX1g6YjkoIndDsO2n64e00HHoaTT+8nXD+j2n75U1dWmTrNjAmVMP6/6tlzdOtPDS/hD+JSzmv7izOKRlYE+Su9M8U+wZ7OuhrffdBX1LKdxNv6jDSXIbLn0OmpQWrZgoGbE+1YglJJm7371EYrBPPTp3xWTtJ0jzUWsw8y/StPkDy+Tec4PPhf2/5YLwKxSgmXm7CFWxkjDWPJbMeZ/XBr7Nk1uN9lu1qebmm4GSc5kWAU9OOI9PlQ3p8pN47H+uRh3fJtFxoimwYFZXmPJc1a8k990RmtJZxf/UTvSq8VBOkQg36GggNxVQFiqIVYR0WQYtNY0KFH5t/zxL3nXhqLKTNCXBo2lfUWAuZ4d9AisegqNLPh0cnc8ZbzQTeeZekH587SC9GMRrpq3w3Q0+F+sZIeZZA5BE3MC00DcuBs8la9QFhv4+AFe73L+Ev8hImJ+VhQaPt2l8TXPxmdAhY57wWLatrBWRUVJrzXCoqcZ95AVe8fz9LWRn9ffcKr/4qxxT7LmrQV38EPNBRS7n329Ew2A6nlbAmmFzmxxKSeJP2/N3idekIw+CozFVRMUqAg9Z7WHpqJiGrRmDZCoyODjSnc69fjmJ0Ei83ka2nY2zYHDPvJH3xi+hzZqHrXW/trg94DbutgLw80GWImzZ8m9Z5P6K120RJd20dwdVraLvkKlLvnW/Ob2nqWoFoE4qi20ZFJWNkRoydnRVenZVjnUn+zu79OZOUc9kfGKi68VFCiDVCiA4hREAIERZCtCXauBFBy3YwwhR7pkdLjXekWMyKsHIfmrFniftOZFjgq9M4Nv3jaMkxwMHrvQR1KJ6ejOxwE/jgw718IYrRTs/cBPUNvWTzW+f9CF/dTsLhEOHaOkIVlbgr67j6HiNaGlxWbcDOWlrn/Sh25PAlV5F8yw3RbS0vh7TXnsWz4Dnz+SPOxzP/79Ht1OTMuDmevakcU4x+BvqR+ACmwnEJkARcCvwzUUaNKJpKkUaI9e7Z0VBYVYoFA5iyzdfvjPuB4NmpMy2lBL8zObpv2lYftqBk5TEpSCNMQJUdK3rQl2y+8AcJbdhE01En0DhxFr7jT+CB727kqJkGYzINUis2Eq7cEffczrCXNqEIxo/jnqxP2Pa3S6HkEzKWvYn7nw+b4bEJRfDaY/zJ+yJ/nvhrPj7o5WiOR0hIc9fx3g2VfPi7Oo6aaSbzuyskK/ZtBlxuLKXcJoTQpZRh4CkhxKcJtGvk0FyONAzKvZP4n8x3AKhyWhBSMrnEhzEIE4Q7dljIPiLM5DGVUGbus4UkMzd7WXOggyuBwNL3kcEgwqoayRQmnbL5PbvuLbq112rE8rPzmP+EWQSi/eQ8jHvnxz3XMAwy3noNfX+3UxkAACAASURBVOoUhNXGlfk/wYMfIaxoejrpD/8D/vF/YLfSmmnnV0yOKRnunGrpnncezopKpk8o4tVnXuIHz8+mplnDZt29ccqK0clA/5ueyPjgr4QQfxVCXA+kJNCukUNzOQFpx2skRydHVjqt5NUFcbqNAc9g2RXBNp2WjjQOzl1PuNu/5KANHnaM0WnJScJoaiL4yX/3+rkU+w7erBQsC5+MqfSyLHySoCbjrkYKnAHGOP0YFZV45v+d9AWPxqoTv/UaekY6bVddT+O0g2k6+kRSt1QxzppHni0bXbegF+RHpkEWkGXP7FUybLjqe4XnLD87j/t+XM+iP0Nu2m6OU1aMSgbqWH4aOfaXgBsYD5ydKKNGFE2ltAfNEFWOtZ4wZo5lSpkPPbRnHffx2FmdzTcyPqc+uauk9KD1HqSUrD48BRkM4X9HhcMUXaRZU2mcnk/1sofxFi+jetnDNEzPQ6LF7VnJKRBk5+toE4oIrl5Dxy2/Je2xB8je/DmZK5YSSnXQctb5cUYd9+6jkoZBuLaOcEUl4dq6Lsfg98d1agePDzBnEtDQ2/H09RyK0ctAHcuZUkqflLJNSvl7KeUN7C+lyI1baQqZqqu51nrqknUCumBKqR/NYK9KjbvjrRBYRBijqGshWFgdJLspxKpDbYDE/+Y7yHB4cJ5QMeppCrVyQckNnN54C99suY7TG2/hJyU3siXoJPRMbM8Krz3Gz9rv5EbfP0hd9FzUubRd9ksa3S6u8D0AYSOuU+g5Wrgz3BV31dFHI6aeZDOrwfpwPPgDKPYdBupYLoyz76JBtGNk4msDdwO1gTFIIMfaSFWKOY74gNKIftJeJu47cdY10BjIJmu8O7pPAN/4ws3a2VZCyXaM6p0E/7t6UJ5PMfqJ141fFajFJ/z88j+zSf14OZnbN1C97GEusT/J555NLG1bydWOF3B+8g5sX41txSLCaSlcYTmFeq09rlPoOVo4Xrirc9URrxEzY/FLaHmRSZt9OB7sNhT7Dv113p8vhFgCTBJCvNHttgJoHBILh5PmcpAhKvxFpOhuknQvVZFS42lbvXtVZtyT9FArH9YfR1FBDZq1Syz6G1+48dngy0OcZjjstZE/E00xNPSlFNzhtnLnzzVsY/NxFTg4vfGW6BRHgKVtK2nJtGH3hHBsKidnp4eDt4YpCKWT/u/HY0cdL3qOV1gd21W/i1VHVyPmcnK2byZr1XIsB86OJuf7dTyKfYL+qsI+BWqAHOCebvvbga/jnrEv0VwORphS7xRyIs2RVSkW0ltCZDeFBi2/0snXVbM5b+wrOIuCtJWa3+BmbfGS5DP4dK6dw/8LvkVLcP75LkRS0qA+t2L00bMb/9S04/hH6tXYAm4Mh4+GgA0hBIW2gl6THDM7BKK2lbarro82V6Y99TDS6ST13vloWZlItxtfQTY/TjmTLf5yvnBvNLvq7Xas875H8oUXmF34Tc1mr0tk1SE0Db0gP67N3R1PZ0u+qgrb9xhI530FcLQQIh84PPKrzVLKUKKNG3aaS8EIs9kzI6pqvMNpYVK5Hz0EQevgOhZvo0albzwHTNoUdSzWEBz8tYdVB6fwK4cD2diE/91lOObtHykuRd9oQmOaYxILZz6MJiGruI62U/8Hd8RRyNce4emsr3hs6l+4rOS2qBTME1PvxlIfoPniK2MbJC++ksx336DxiG9FnyOwdQXeZMGthb/ghvI/YhNWRE46qb+7lZazfxx1ShkLn0fkDGwC5K4cj2LfYKCd9z8EPgN+CJwLrBZC/GAA540XQiwXQmwWQmwUQvwqsj9LCPGeEKIkcj8yhzE0l4PQqPaNjTZHbk+1ckCZLzKDZXCfrihQzpL6M0gqMNCTusovv/GFm8Z0ja3TkpChML4XXx3cJ1aMSgxpsMNXTX1VMem1HbT16F3RzrmCk8MzWVz/Hi/N+CeLZj7CXUXX8feqJyAcih/O6lYcok0ooka0cH3ZHxljz4t21cuGxqhT6Tyv5ewfIxv2/ei4YmAMdP35G+BwKeWFUsqfYY4V/u0AzgsBN0opZwJHAVcLIWYBtwLLpJRTgWWR7ZFHUylIA5c/mxxrI26LoNGuM6XUjxQgtcFdsRT5y3jT9T3Cmk765K4qmUPXeRCG5OO5doTFQuDdZYRKywb1uRWjj5ZAK87NVYw76UosFTVxHUWekcrctIM4b8s1nLn5Ci7ddiv/aV2J30rcJHrY5Yr+3Kl+XBWoRUdjmj2inKwquxT9MFDHokkpu88nbRzIuVLKGinlF5Gf24HNwDhgHrAgctgC4MwBWzxUSAkNWwlgx2MkkWOtjybup3bOYBlkigKlVPqKqKwfS/rUAJ0Tn51ug9mbPHw014ZMSkL6/PiefCYhNihGLoYBtU2mNEptEyQ1ueGcyzAqKjGamvuct5JjzexVPfYkK8hY+HxsEn3h84RnziSwdQXl79/PZfan+dyziUJbAZXVDhpaI295Vdml6IeBOpZ3hBD/EUJcJIS4CHgLeHt3nkgIMRE4FFgN5Espa8B0PkBeH+dcLoRYK4RYWz/Uw648jeBroTWcCRJyrA1UpVix+STjqgODnrgHGBesxGF4WVb9bSxOg+SxXWmsoz7rYGeuRtlEKwjwPvsiRtv+oQM60hiO6zIUgu21UFoNX5bAL+4BzdcVzvLM/ztpT/wrxlFoC5/gkdC75FqzelWPHRecQvsf7ib13vlkrlhqKhn/4W6kL0R1RjrXdDwYdSr3F/6VW+7JjOp8qcouRX8MVCtMAo8Ax2K2VzyKGdoaEEIIJ/AacJ2Usk0MsPdDSvlo5LmYO3eu7OfwwaWxBIwQrqD5hsy11rMlxcKECj/W0ODnVwA0JJP9xSwOnMXPff8mfWoAz05TG+zwtW6euEjy0eE2JpclYTQ24Xv6WZKvvWrwDVHskqG+Ljsl6M/+TZcE/WM3Q3Wbg9SI3ldw9Rrcd/yetH/dizH9AIqpwVlQwB8tNxE0gjw7+f8oaJTYQhCwgFXqtC5+k9bFb8Y8V8p9f2GcM43XpjyJ1/CjSyt/fiST2kZT5wtUZZeifwZ6JZwspVwopbxBSnm9lPJ14H8GcqIQworpVJ6TUi6M7K4TQoyJ/H4M4Orr/GEj4lgqA+a3slxrPTucFjNxH5KDohEWjyn+TZTYZtO6zYZzfDCaxE/rMJi5xcOHh9vAagUpcd/3L4xGlTDd13G1dDkVMO8feN1AK7KT9p/XyXjrNTJXLCXld7finVTAed67Ob3xFi4ouYFN3m1oBkzYEcK2qQxRXYdtUxkWNKzzvkf6whfIXLGU9IUvYJ33PdaFSvlB8ZU0iTrCfhtbq+Di08xZKnndRq90VnaZumH5yqkoYuivQfIXQoj1wPQeo4nLGUAfizCXJk9glif/vduv3qCrm/9CYOR1/TWWALDdOxEJ5NvqqHRaox33MjF+hSn+zXi0FLaWTwAB6VO6EqJHrXZTnadTXqgjUlKQLheev+8f0wv2Z/yBLqcCcORsg+uvKuV/6/5Ch7uJtquup/n4U2m76nrCXg8A30iexZNpv+abLWMoaNWQbk/MY4bDBqm/vYX262+h+fhTab/+FlJ+ezOvsIqqQC2XldyGltLKncFLSZtQyqyJhhrQpRgw/V0qzwNnYDqCM7rdviGl/MkAHv+bmAKWJwohvorcTgPuBk4WQpQAJ0e2RxYRx7LTX4BTd5Oke6h0Wphc6kNIBk3KpSdTfJsB2BA8GE+NhfRpAcwnhCM+dyOk5MPDbQiLBYnA8/jThDZvSYgtipGB3WaGvzq59bJmfu26mSssp2CcfUlM2a9x9iX8JfkSngpcSt6Jl+L50c9BSjRNxDggPRym5ZwLYvtYzvkJP4xEuKsCtehCpypQy6WlN9MUVnPsFQNnl45FStkqpdwupTxfSlnR7dY0kAeXUn4spRRSyoOklIdEbm9LKRullCdJKadG7gf0eENKvSmBUevPI89ahytJJyRh0nZ/QhL3nUwKlKARpsQxk5YtVizJBs5CM4mf3hZmxhYvHx1uQwLCmYLscNN21fXIgCr13Jforh6cG6jj3b8ZUeeSl2tqhOUZqXHLfifJHIyzL0EvyCflT3cSXre+lxMxGhrinyvyeT3rf1mV+U8Kms2VT/c59grFQFCL23j4O6B9J2gWanx55FldVKVYGFsVwO43CA14PNruY5MBJvpLKbHPwl1lIeTWSJ/hj/7+qM862JGvs32cjtA0hMNBaM0XuP96b+KMUgwp8dSDJzRtZM0jBuUvw/gsUyPM1YdopM3iwKioJPmWG2i75CpESkpvJ+JqiHtuJilM+s612KYdj+eYU3nMfxGnph2HTWJWESgUA0A5lng0bYNwCDSdnb4C8mx17HBamVjmxxIioSsWgFm+L9mUdCghaaGl2EpyQQhbutkRfcQaNyD5aG5El8luRwKevz9AYMVHCbVLMTT0pR6c4aunKB/GJJkaYe/rm0lf+FyvXpSA0+wz0bIy++xx8Sx4joxX/h177iv/pu3G23t17/8j5XJyFv4CXOuVc1EMCOVY4tFYAjJEEDuuQDYFtjoqnBamlPrQDDmoqsbxOMTzGW7NSal9Jm0lVmQYMqaboa6MbuGwTkSqE+nz0nb5NYRr6xJrnCLx9NHZ3hkek5VVTG9L5Ya0HyHDkrR/3UvmiqVmqXHYoMFbj/HaI0i3G21CUdwel9Q7b8Pz8usxfSzSaiHYo/zYqKjE0dqItnkRPDcPPCOvgFMx8lCOJR6N28AwqA/mI6Ugz1pHWaqVydsiHfcJStx3cpBnDQBfJR9B2K/Rvt1K2uRAVE7/yM86qBijs32sHjFHIJxOwjuqab/iGmRo39cH3aeJ09lunfc9ZF19t/DYieiVO3H/6a+0nH4OzcefSsvp59D2w5+QE0riMvvTbJuTQdrC5wjX1kV7XDK3fknz8qcJFhXgOO2UmKowLSk5bnhMeGvMjZYKCPlRKPpDOZZ4NBaDENQETAXWXFsd21OsTC71J3y1ApATrqcwsJ2vk00x6ZYtNoRFknaAuWo5co1ZHbbiiG6rFosF4XDgX7YCzz33J95IRcKI19meds9faDkrVmSy5azzSb7wgphzjYpKrIZACsEptdfxQ9sDJH+6lOCz9+GbNYlWW4h8kYHP344Ykx+z2gkHA6QveDQ2PPbCg2ifR4o2MyaAxT50fwjFqCWBaehRTK3ZorPTZ5bh2NLqyHAFSfIZCc+vdHKI9zOWp55GGA1/I/jqdTJmBGgptpHZGmbmZg8rjnBw4SIvnRaJJAcyGMD913uxfuub2I4+ckhsVQwu8Trb+wqPaYVjSV/4QsxclBq9jd+M/yV/3PEAdcFG2rJsaDVtBM4x59kHJhSR9sS/8C59H8dVP0cLScIl23Bf+ksoyCdj2VuEhAC7hrb+AUT1atOpXLAYkuOqLykUMSjH0hNfG7RUmhVh/jwzMZ7dxMTSMJaQHPQZLH1xiGc1b6afyxbHgcz2raOl2EbBsV6SC0J4aq0cs6qDx3+eQmmRzpTKLqlzkZqKbG2l46bbyVyxFGG1Dom9isGl58yScG2dqTjczbloE4rQMjNp/eFPu+aivPoc3qQU7PU1vJpyB4bNimiD9nOuQC/IjxniZT/tFNpO+B6pny3HetghpL20AJ9VcmXbgyxtW0mhrYCnjrubGcdch6ZpplNRXZKKAaCukp40bAEjBJqFWl8+Ds1HfXqQA0rNGSxDEQoDOMK9EosMsSL1NADat1sI+wQZM8xw2BFr3WiGZPmRsaEJIQQiKYnQuvV4H31yaIxVJBwtL5eM11/sXcV1U2wVV8sPLiC/XWfcSVfSMulAOo45Bb26DttJx5PypzujOZW2q65HT3WiF+Rj8frRC/JpGpPCSa7rWNq2EjCbJC8uuZUGhx2cBcqpKAaMulJ64toUdSw1/jzyrPVsT7Uyucyf0I77njiNdg53r+TD1FMxEGAIWkuspBQGsaQYpHYYHLjezYpIs2R3hN2ONCTuu/+O4VJVPPsCQtPQD5xF5rK3yPz4PbOKKzkpbhWX0dSMXpBP+sIXSF/wKEZNLSl3/Jq2S67q4YR+QsrvbkXoQEctARnoJa+vmiMVe4JyLD2pNyVVEBpV3jEU2GrYnmJKuQzVaqWTE9vfpsGSx8akQwFoLTaT9RnTzcqco1d3UJetsXly74imcKZgNDbhefDRoTNYMagY0sAVaKTKX4sr0AgCtNQAenoQ7aBpaE5n3CouLS211+pEtrT1GgdsVFSiT5mE9ua58MhR2ELBXvL6hbYCbEKFUxW7h3IsPXFtACQIQaV3HGPsVXg7INkzdIn7To52L8cmA7yX9n0AQh6Njh0W0qYEELpk7uce9JARUx3WibBYQBN4H32KcJ3qbRlthIwQO/w1VPirWe8u5tby+WzxlcL7t6K/9G1sT88gbNF69aekPfEvcDjA7yd9waOkL3wBvSCflrPPJ+V3sYNazVLiSjM531JBzuJf8dTUu6POpdBWEB1HrFDsDsqxdMcwoG49CA1v2EFDMIvUnFIKt/vRQ3LIHUuy4eaktiW8m3YWLbr55m7ZYkO3S1InBknxGhy6zs2Hh9sIx/lPiuQUjNY2vPc/NKR2K/YOQxps8ZZx7pZfcubmK7ir8j5+XnAuf6t6goYjL4kep4UN3P98OKbJ0f3PhyEUigpOtl9/Cyl/utOUtp96QGyO5qVH0P77m67H27yIGeFUlsx6nNUHv86SWY8zIykyjlih2A3UFdOd1kpTJ0yzsMM7FiRoeZUcUOpHN0jIcK/+OK/5CQLCxqIMs1/BW6sTaNEi4TDJ0as6aEzXWD81TjjMopurlgXPY7S2DrHlij2lIdjMJSW3RPMdVYFabiz/Ez/MPZ1Aalc4S+oaKddcGdPkmHrHr3vJsrRdchUpv7uV7ZZmUj55l4zyDWR++h7t461Un3kfrouWYBQeCRkT0DSNPFs2hfYC8mzZyqko9ghVbtwd10YwgmBxUOkbB4Ant55D3zelXBiGN1lRoJxjOj7g9Yyf8IPmBTiNdlqKbeQd6cORE+awrzzY/AYfHmHnkOLeHfciKRmjuRnfcy+RfNXlQ26/YvcJyGDcJPoYWy4iOYOqaz7D5neTJsATWbF09rH0JcuiT52M12mlyjeWA8bDVn8pF2+dT1Wg1gx5nfMPZpCBpvpUFIOA+jrSnZovwQh3rViAhsx2ppT6EjbYayBc1PhPOvRUnsq5FoC2UitGUJAxPYDDLznsSzMcFoyzohJWC0iJ9+EnlNTLKMEmrHGT6BmWNM7ecjVHbr+WMxr/QVV6EPm7X8WsWERqatyEvnQ4GGsbz5RxGk3hZi7eenPMiujiqrtpSMtRJcWKQUFdRd2p+dIsJxaCKt9YrMKPzxMgxT30ifvuTPFv4fstL7I443zKbNOQIUFbqRXnxAC6w+C4T9tpTYHPDopfvSOSkghvryDwzrtDbLliT8ixmurF3ZPoT0y9m3/Xvs5dRdfx6ox/cVfRdfy56iHqp+WifbwYZ9lXWD95k9oCKxk9FY9fexYtK5ncdA1N63tFpMqKFYNFQh2LEOJJIYRLCLGh274sIcR7QoiSyP3IKDkxwlD9ebRPpdI7jrz8DRSW+bEkcMb9QLm44X5Sw638Zcx8gsJKyxYrQoP0qQEO/tpDZkuIpcc54p9ssyFDITwPPzG0Riv2CE1ozEiaHE2ivzLjQbL0DM7P/z42zcoO9w5Oap/Io5YrmdJoYwlfMK3+p3y3/kaa6MB3QCaZHy0le9vXZK54G+EMooV80cfva0WkyooVg0WiVyxPA6f22HcrsExKORVYFtkefhq3gb8NNPPNtcM7Fue49UzZar4hh7qHpSdpRiu/rr2DUvsMnsz+FcE2Hc9Oc3SxLiXHftLO6oOsNKX1doBCCITNRvDTVQS/+noYrFfsLpowk+hJwk5LsAVXdTGWHbUc4s3nbN9BULWT0NcbafvVzVxQN41bCy6jKlDL5SW343UkI2sqaD7pezROmkPzdy8mVF6PjMxSibciUmXFisEkoR+XUsqPgJ5jh+cBCyI/LwDOTKQNA6b2KzNxr5uOpdI7Dm1MKdO2+tBDcsg67nfFMe4VfK/1JV7O+jlfJR3RNbp4fIhvr2wnLCTLju5DfdbhQPoDeJ98ZmiNVuwVYSPEmK3NjDvpStJ+divOLdU0n3AazceebJYSX3Ml7f87nyvkSYAZ0kpt8tNy3hWxXfZnno/hqgd6r4i6lxV3H4kcrq2LOiOFYncYju/h+VLKGoDI/cgoQ6n5EqQBQqct6KQ1mIqWuoOiygBSG36n0skvXPMZG6zkL2PmU1uTRajDLD0u3BlkSqmPd46z95J4AVMSBF3D/8rrGA0NQ263Ys9wNvngnMu6Rg1ffGWvUuLkCy9AC5lCpIW2AvRAKK4SMv5AdLtzRdS9rDjeSOTQ+o3KuSh2mxGdvBdCXC6EWCuEWFtfX5/YJ6teA9JcmZR7i0ALk91chx4e/vxKd5Kklztqfk2TnsP8/LtpLraSVBDClhHmOx+0UTlG54tZfSfxjbZ2vM++NMRW71sM5XVpCYSjTqJz1HB3jIpKtLwcsFh4K3s+L078E8JqjVsZhr23QkPMY/UxErlzpaNQDJThcCx1QogxAJH7PlUSpZSPSinnSinn5ubmJs4ifwfUbQDNbOspcU+C7J1MKetAD0lCI6zbZ4ZvPZc3/I1PnSfyTP1FyDBkzvTzzf+2k9YW4tXvxk/iC4v5QnxPPoMMqgqgPWXIrktAszuiTiLe7HptQhFafj7uP9zNuJOupLA8gMjN7zUoLGPxS2h5/djax8yX7isdhWIgDIdjeQO4MPLzhcDiYbAhlqrVEPKBbn6jK3VPQozfyszNbqTGiMiv9OQHzQs4tuN97s+4g20VRaROCmLXDU55v5U1B1qpGBtfJkA47IQrKgksfW+ILVbsCa1JeWgvvNQ1u/6ph2MdxsLn8b74Kr4nn4lMlfwxoqkpOigsZ/tmslYtx3LgbDMcuivijEQeyEpHoehJosuNXwD+C0wXQlQJIS4B7gZOFkKUACdHtoeXyk9MqfxI4r7EPYn0ws+YtdmLJRQvYzH8CODm2tsZG9jBtW0PY1h00qcGOHlZK5aAwWun7Kr0OIzniQXxf68YUXT4NL6/YDa25R+Q9OLjVExx4vrgcQJbV5Dx4Tu0X3U9nt/+IXp85wqjc1CYPmG8qRM2gMbHeCORB7TSUSh6kNAgj5Ty/D5+dVIin3e3qfjYvI+sTLa6JzLV/xAOnzGi8is9cRrtzK++jF/qL/JB64kcM/Nj0jaFOf6jNv5zQjo/esvL2PrYxKsQAiwWgh99SmjTZiyzZg6T9YpdYUiDhmAzpAf57TVWWnKsnFt6U0xj41vZ8xlXG6tcvTcrjHgjkbW83F06JWkYZg7G7zdXPP0cr9g/UFeAt8WsCNPM0FHQsLDdYWN2qSuSXxm5jgVgTLCKv+24iNerzsTvTGHLrCM4c3ELWljy1NnJ8U9KciD9PryPq1XLSMRUNy7ljE2Xcsz6s7gzeClhi6dXt/zvvAtIez22y1574SVaHHu+wtidlY6qIlP0hXIsVasg5I/mV7Z7xxOeuJmDNrQjBSOq1LgvJgW28dMv/8bOtgLGzmnht2nPcti7qSw/wha3QkxoGggN3wsvE95ZMwwWK3ZFQ7C3lle5f0evbvm6UBPt0wpJWb4UZ/EGdj7/Ad9fMBu3f2je1qqKTNEXyrFsXxkZRdyVX7EXfsmMrV6so0izMSfcQNZn25hjW8/BYzbwxroF+JvHMv/nqbQnx+nGT0nGaOvA++Ajw2CtYlfE0/K6r/pJHpscO4TrnoK/UlyeTepFRaRdNpHDbiugplnDNlTKLKqKTNEH+7djkRKKIxLjkfzKFl8Rs3xrsPvDBEeZdJK70oJoDvDHglu51HU/7QtvpCQrh9uvyuulfCx0HQR4n3oWw9VnxbdiGIin5eUKNpKt5fF76+O8lPc6v7c+Tl5wMrnpGhMih04ogEV/hryMITJUVZEp+mD/diy168zhXpYuGZSP01M4+sv6ESE8ufsIGr6w40gL8Yv8h7h31e+wvHEun8xx8purs/H1eL+L5GSM1lbc9z44POYq4pKjp/PUlL/EanlN+Qv59lQOGZNNUVIBh4zJZso4jSnj4NOHoPxl837OpKFTvldVZIq+GGGtf0PM1rchHAS7M7pr09gWrnmlGT08PIO99hZ3lRV3VZCcg30cWP4l89+AG2w/Y9lpL1B7V5gbFnRwcGQgmLBYkELD+8QCkn52PpaZM4bZegWA5m1gxhu3seTISwikZGNzN5Lzxm1o33+IgqyCXscXZA2DkexZFZli/2D/vQKkhC1vAF0OxBVOJU+sI88VJDzqVitd1H/mQGiSMd/0MMv/FTctXoH/mVvYacvixlvS+P3VTraNN2NjwpmCdHvouOMPSDkye3b2O0J+tM2LyHv6DAofPIa8p89A27zILDIZYexJv4xi32f/vQrqt0DD1mhTJMC/kw/gmK9qsAYlQevodSzBDg3XZw5SxobIPdTHSe1L+Pa6SuofeYjjllhYM1PnyrvS+c21ToonWxFWK4Fly/E98/xwm64AMzSbMSF2X8aEmJCtQjGS2X8dy9fPQdgPlq4O9TcK7Hzzvx3ohhwVZca7oq3ERmuxjaw5frIP9PGrurvI8HawevXfuOf6Ws5Z2MjXUzWu/k0ad9yWS4vDoOP2Owlt3DTcpiuS8+CCxV3OJWOCua3m0StGCfunYwl44OsXzJ8jYbBtyRbC/gYmVXhBDw+jcYOHa42d9nIruYf6mHNKDfdbfo4jI8Q7+Rfxs9UuFjxQwiWf1lI8Gy55aCxVlmZ2/ugHhCu2D7fp+zeaBnkHwhWr4Mbt5n3egWoevWLUsH8m7zcvAnc9WJOiu54ck86Ji9vQgxqBpNG9WoliCGpXOvDU6mQf7OdbR33Jm9r3tkZsvAAAIABJREFU8GrJOMNh7NLHVe1ufvpOLYumZnP/rTlc/ZdaXCfPpeyGy5n87T9w2AzLLjU4wzKMhmZKxSgGDUNAg81KwGqWH+eI/fVboGI0sv85Finh88dAhqNNkeVJFpakp/Lge3V06CmkaJ5hNnIwEbSV2GjbZsWRbUAKvJD3Uz6Vx/H9Hc9yrOs/CCkosuaSm38aD59axxVLlzDlfx/i9eJFXDfnZ5z/7QOZlJWC2/DSEGyiOlDHdl81lf6dNIWasQoLBziKODbtcM7LPZ0xNhWy2Rs6JV06u+87Rwd3TnlUKEY6YrRUAs2dO1euXbt27x9o27vw4rlmQ6Q1CQlcOyMb9xor1/9fE9LhwWLZtzuHvSKJO8Y9zFfJRzA+UI5d+thmn4nA4JiO5ZzZ9jQzrP/P3nmHx1Fdffi9M9vUezGWJRcMuNCdgCEhlBBIgmPqF8pH6IQSSIAvtCQQUqghkEYIHYIpCQ49kAKYEIwBG4xx72pW16rv7rT7/TG7qivZVlvJuu/z7KPd0ezOGWs8v733nPs7n5HZYFCb4+XNY/JZ+cU0Ns8JEPG4zsoStwuhjoYEbNzpw1Qtmf/J+yZXTTqPdE/qQGEMlTExRBq267IbtUYDC9Ze3GP1fZGvkFdnP0K+L2dYj6UYEcbEtZlIJtaIxTZhyS/ANiCQAcA/c5J4KzOJu58OUe/Lo0DfyJ5+XSTJEHdXXMgbGafzfuqx2MLDhfW/4fiWlymwot5hUqJ7HGY6YaY9V07bohRMv822WQE2zUln06w0NsxJJ5jXValkS5sOJ8zjNX/lraal3F7yfxyefnCCznL8Es/SpcKoxpCqOZuiCyHET4E2KeWvEh1LbyaWsKx41HUy9gZACMoDOj/dO4v5bxmU7GhnbdYkCqw9W1RieLBZ0Pw8C5r7aVMsBGFHx+iQZH3VIUMz+HzjHGZ+spEDl5dh6wLTq7H2oAze+mYhHxydC16dND0FU1qUhiu5aNMNXLnXd/hu4VnoIn7jMUVfYpYuvUcsPjHOPIYUE5aJM2Fbuw7e+RlIB3Q/DV6NK2fl0mbC+Q92sDl5H6bbaxId5ZjDMQUN72m0Rzzsf8IGNp99CBV5eUT8GrotOejDINfctp77v7OC416rxmM6eIWHdD0VQ1rcX/k4l2/+CQ1mMNGnMm7I9Wbx+D5397R02educr1ZCY5MkWiEED8SQmwQQvwb2De67RIhxMdCiM+EEIuFEMlCiDQhxDYh3G8jQoh0IcT22OuRZmIIS3sdLD4XIk3gS6Uy4OHCuXmsSfFy1X1NZDTZfJY5hxTZnuhIxyZSEP7AoPSjbA7KXkXqwiSMXD/hJJ32VB3LI5iyvYMr7tzIr89bwZFv1aJJSNNT8Aidd5o/4H/WX8WKttWJPpNxgSY09kuawauzH+HDA1/k1dmPqMS9AiHEocCZwMHAqcAXor/6m5TyC1LKA4F1wEVSylZgCfDN6D5nAoulHJ351IRdqUKIE6PKu1kIceOIHSi4DZ45BerWIr0pvFaQxukHFbA61ce5z9TzpbdCPLXXhRwTem3EQthTSF1TzfK3Z6B7HfK+BUlzAA1Mn0Zbqo4ZFZhrblvPHZet5KAPGwkIH0lagLJIJRdsvJ6Hqp/FcFSuYGdoQiPfl0ORv5B8X44SFQXAl4EXpZQdUsoW4JXo9rlCiPeEEJ8D5wBzotsfAS6IPr8AeHy0Ak3I1SqE0IE/AF8HZgNnCSFmD+tBbMtdBPnYsciqT/koN5tLDpjE/+2bQ7MUfO+P1Zz5WDtvZJ1MkqeRPKtm55+pYGbpKt5/cy7/aTqK1C9oFHzLJrXYAA0sn0Z7io4tBPusaeHm69fwk2s/Z95HLaTJJDqcEL+qeJhT1l3GP4PvYclx1PBGoRgbxCvjfQL4npRyf+A2IAAgpXwfmCqE+AqgSylHbcogUcn7LwKbpZRbAYQQzwELgaH5iVgG1K+HrW/Ruvo51hplfFTg5x8F+7Al2UtOjcnCvzWw4G9t5O7QeDbvItZm7sutVT8Y+hlNII6s/xfL/zWfm/f/ORdPe5x9j9lARodBZJtDqMpDuFGno0PDH5Ec8mGQA1c0Ub1XgA+PyuXTg9NYO309V4V+SrF/MgtyjuWItEOZkzKTJC2w84MrFBOX/wBPCCHuxL13LwD+BKQBVdH8yTlAZbf3PAU8C/x8NANNyDoWIcTpwIlSyoujr88FDpNSfq/XfpcClwIUFxcfWlpa2vk7KSW/3v5bIpteJ2yHiNghwnaYWp+g2qdz5N9bmVxm4u9wSGu1KS6LkFYbQNpeVqccyBOTL2a6tY5L6+7Bi/rmPBia9UwezfsB4ZIcvjXpVeZnLiVJhNBwCFt+ktra0MMRrDKN9pU+HENg6wLbI2jK9FBV6KM500N7uhfHqxPwp9Iwbzo7TjyIdD2VgObDIzxcWngWGZ603odPWPneQNelQsEIXptCiB8B3wFKgQrcL+PtwPXRbZ8DaVLK86P7FwLbgElSyqaRiqtPnAkSljOAE3oJyxellFcN8J463H+44SAXqB+mzxoqKpb47CyWeinliaMVTH8M4bocS//WQ0GdR1/GxLUJnV/iF0opzx3N4yZqKqwCmNLtdRGwY6A3SCmHrS2dEGK5lHLecH3eUFCxxGcsxTIQg70ux8v57Qx1HmMXIcTvcPPY3xjtYydKWD4GZgohpuHOB54JnJ2gWBQKhWKPY6AZoJEmIcIipbSEEN8D/gHowGNSSrU6UaFQKPYAEmbpIqX8O/D3BB3+oQQdNx4qlviMpVhGgj3l/NR5KPowbtyNFQqFQjE+UMt5FQqFQjGsTCx3Y4VCoZgACCFygLeiLwsBG6gDpgI7pJTD63TS+/hqKkyhUCj2XLr3bRFCTAVek1LOHeRneaTcuReTmgpTKBSKBBMxmF9Ww9ItlWwrq2FpxGD+CB5OF0I8LIRYI4T4pxAiCUAIsUQIMS/6PFcIsT36/HwhxF+FEK8C/9yVAyhhUSgUigQSMZi/ZjuvHH018/c5m6lHX+2+HkFxmQn8QUo5B2gCTtuF98wHzpNSHrsrB1DColAoFAmkJsi9p/+E3NJow9DSajj9J+TWBLl3hA65TUq5Mvp8BW7eZWf8S0rZuKsHUMKiUCgUCcS0mFRa3XNbabW7fYQOGen23KariMuiSxN6W43vVhdEJSwKhUKRQLweqkoKe24rKXS3j3Io24FDo89PH8oHKWFRKBSKBFKQxXUv/Jz6mLiUFMILP6e+IIvrRjmUXwGXCyGW4ro9DxpVbqxQKBQJJmIwvybIvabFJK+HqoIsrvP7+CDRcQ0WJSwKhUKhGFbUVJhCoVAohhUlLAqFQqEYVpSwKBQKhWJYUcKiUCgUimFl3AjLiSeeKAH1UI/YY0ygrkv1iPOY8IwbYamvr090CApFH9R1qRirCCFsIcTKbo+pcfbZSwjxwnAfW/VjUSgUij2TkJTyoP5+GbXA38EQV9nHY9yMWBQKhWKPxYrMp6l0KY1bttFUuhQrMiLOxr0t8IUQU4UQq4f7OGrEolAoFInEisyndvUrPHtaLk2lkFkylbMWv0L+3G/h8Q9l9X2SECLmYrxNSnlK9Pl84AApZWO86bHhYERHLEKIx4QQtd0VUQiRLYT4lxBiU/Rn1kjGoBhdHOlQazRQEamm1mjAkU6iQ1IoxjZt1fd2igpAUyk8e1oubdVDtc0PSSkPij5O6bZ9tyzwB8NIT4U9AZzYa9uNwFtSypm4PZlvHOEYFKOEIx3Wh7awYO3FHPbZKSxYezHrQ1uUuIwgtg3X/BaO/T7sUHUE4xPHmtQpKjGaSt3tI8NuWeAPhhEVFinlf4DeyrgQeDL6/Eng5JGMQTF61JtBLth4PRWG21yiwqjmgo3XU28GExzZnsubH8ETb8KyNXDXokRHoxgUmqeKzJKe2zJL3O3jlEQk7wuklFUA0Z/5/e0ohLhUCLFcCLG8rq5u1AJUDA5Dmp2iEqPCqMaQZoIiGhnG0nX52lIwLfDo8NJ70B5KaDiKwZBaeB1nLa7vFJfMEjhrcT2phaNtmz9sjOmqMCnlQ1LKeVLKeXl5eYkOR7ETfMJLka9nx6IiXyE+4U1QRCPDWLkuHQf+9TFoAgI+aG6HZWsTFo5isHj8H5A/91tc9O4HXLN5Oxe9+8EwJO6RUqbG2faElPJ73V5vl1LOHcpx4pEIYakRQkwCiP6sTUAMihEg15vF4/vc3SkuRb5CHt/nbnK9qj5jJCirgWAreD3uw7Jg+fpER6UYFB7/B2SWHEH2jGlklhwxVFFJNIkoN34FOA+4M/rz5QTEoBgBNKGxX9IMXp39CIY08Qkvud4sNDGmB8bjllVbwbLd0YoQgIAP1YhFMQYYUWERQjwLHA3kCiEqgFtxBeUvQoiLgDLgjJGMQTG6aEIj35eT6DAmBKu3gu24+RUATYPPNoOUUaFRKBLEiAqLlPKsfn513EgeV6GYCGzdAYIuEfHobp6lsg6K+i2JUShGHjVHoVCMUzZX9LTS9eju1Ni60n7folCMCkpYFIpxiOPAlh2gd/sf7NHdBP6GssTFpVCAEhaFYlxSE4RQBHS9a5smQGiwddwuq1MMN0KIHwkh1gghVkWt8w8bYN/zhRB7DcdxlQmlQjEOKa3umbiP4TiwpTIxMSnGFkKI+cBJwCFSyogQIhfwDfCW84HVwI6hHluNWBSKcciOBtcnTO/1P1jXYFN5YmJSDJ6IY8yviFQv3R6u2FYRqV4acYzhsM2fBNRLKSMAUsp6KeUOIcQtQoiPhRCrhRAPCZfTgXnAoujIJmkoB1bCohgSys04MVTVu6OTeMJS1wRtHYmJS7H7RBxj/obQ1ldOW3f5/CNXnTH1tHWXz98Q2vrKMIjLP4EpQoiNQogHhBBfiW7/vZTyC9EV90nASVLKF4DlwDlRN+QhmQMpYVEMGuVmnDgq690y497rVXQdLAe2V8d/n2LsUWc23nvJpptyu5u3XrLpptw6s3FItvlSyjbgUOBSoA54XghxPnCMEOJDIcTnwLHAnCGdQByUsCgGjXIzThw76nuWGsfQNTf3sk0l8McNlrQmxTNvtaQ9ZNt8KaUtpVwipbwV+B5wDvAAcLqUcn/gYSAw1OP0RgmLYtBMFDfjsUhZDXGVxaO7uZftSljGDR7hqYpn3uoR+pD+ikKIfYUQM7ttOgjYEH1eL4RIpWe/+1YgbSjHjKGERTFoJoqb8VikqsG1cOmNEO72bWoqbNyQ582+7uGZd9R3N299eOYd9Xne7KHa5qcCTwoh1gohVgGzgZ/ijlI+B14CPu62/xPAg8ORvBdSxhtQjz3mzZsnly9fnugwFN2I5Vhi02ExN+P9kmaMhvHkmHDDSsR1adsw+VQIG5Aa579/sBWOnwcv3TGqYSm62O1rM+IY8+vMxnstaU/yCL0qz5t9nV/zjVuHY7WORTFolJtxYmhoca1btH5uX7rmrspXjB/8mu+DIn/hEYmOY7hQwqIYEsrNePSpb3ZLjeNNhYG7vaYRIgb4B1oOp1CMEOqrpUIxzqgNupVfA41YbAfKVQs9RYJQwqJQjDPqmsCR/Y9YdN0VljIlLIoEoYRFoRhn1DcNPBUWG7GUqsowRYJQwqJQjDPqmt2f/ZUeacLtIqmmwhSJQgmLQjHOqA3Gt3OJIYQrLmU1oxuXYuyxO7b5w4mqClMoxhm1QXdEMhCOVKvvJzqDsM0fNhI2YhFCXBNV0tVCiGeFEMPuV6NQ7IlUNfQ/Womha64R5ThZ/zzhkZHIfLu0fKm1Zes2u7R8qYxERtI2f3tUZBBCzBNCLIk+/6kQ4jEhxBIhxFYhxNWDPXBChEUIMRm4GpgXtW7WgTMTEYtCMd6oDfZfahxD16ClHZraRicmxeCRkch8a/XaVxq/csL8hr0PmNr4lRPmW6vXvjIM4tKfbf5A7AecAHwRuFWIwfkzJXIqzAMkCSFMIJlh6FqmUOzpmJYrFvEqwuTkz2DOP8H24K2agV03lb+UN9ORsQlLWpyc8zVmJk0d9ZgVA+NU197bdNo5uU5pmfu6tIym087JzX73H/fqJVMGvRpfStkmhDgU+DJwDK5t/o07edvr0RFORAhRCxQAFbt77IQIi5SyUgjxK6AMCAH/lFL+s/d+QohLcXsJUFxcPLpBKhT9kMjrsrEl/uJIOeVTOOVm8IZACvQDNDQjibuaw6QE2/DbFi9uf4pn9vsNU3MPHdWYFQMjLXNSTFRiOKVlSMscFtt8YAmwJNp/5TzAomu2qncKItLtuc0gNSJRU2FZwEJgGrAXkCKE+N/e+0kpH5JSzpNSzsvLyxvtMBWKuCTyumxo6buGRWoWfO1XrqiEUyGSimzPwHjxe9y6rI6/v7uSKzZVoBtt/HLFlWCo9pJjCeHxVmklPb+gaCXFCI93JGzzS4HtuA3AAE4byjH6I1HJ+68C26SUdVJKE/gbsMcYsI0nVGvh8UVjS3TVffcRy4z3Ib0aTD+x1S2a1+QOnuarjRXcNauY3+8zBZ8jWZ5kUbrs9oTEroiPVph/XebiRfUxcdFKislcvKheK8wfKdv824DfCCHewx2VDDuJyrGUAYcLIZJxp8KOw+23rBhFEmx7rxgEja1x7Fz2fx00G4wuD/3jGmpYkL6M33WcwdLcFQDU+304SF7Z8SJXha+HQPooR6+Ih/D7P/DMnf2t7Hf/ca+0zEnC463SCvOvE37/kGzzpZQriP+F/T1gnzj7/7TX67mDPXZC7h5Syg+BF4BPcBvOaMBDiYhlIqNaC48/GltAOl3lxtLfBkWfg6MTG634bZv/276B9ezFAzWXdr5Xx/2PtjRNwMbXRz12Rf8Iv/8DvWTKEZ4Z06fpJVOOGKqoJJqEfS2VUt4qpdxPSjlXSnlurNZaMXqo1sLjj2DvqbCilaAbYHVVhZ5ZXU5hJMz9/uMIt+yFdLr+m4c1jQ2pflrX/mWUI1dMJNR8xwRGtRYefzS29rJzKfnEnQZz3FntDNPgwsptfJidxsrsZKSjY7bmdr7fKyWGpvFp03JoU54vipFBCcsEJtebxeP73E33XtuP73M3ud6sBEem6I9gay/zycmfRZ+4W8/dUUaabfLwtEl4M9zRqNnU9eXBK8HQBJ+memDTP0YnaMWEQ3mFTWBUa+HxR6wqDKL5lawKsN3/xqmWyf9Ul/FebjqlKQG8pissRnMBKdH3C8AWgo2pfih9Dw7+zuifhGKPRwnLBEe1Fh5f1DZ1G7HkbwTd7hSWM6orSLMtnp1SAIA3rR4hHMymbuvspORr/2jiyOWthFJfJ3CShfCo24BieFFfTRWKcURDM3QOKAs2gmaBreNzbM6pKuXj7FQ2p7plx0K38aTXYjRHp8Kk5NLf13LNfdXMWB2m5fUW2m8c6lIJhaIvSlgUinFEY0u3irD8zdEnGsfX15BtGvy1qKcTgDejujPHcvwbzXzt9Sbqcj1c+sdpNB0KHY88g/nJytE7AcWEQAmLQjFOCEegI9JNWHK3QjTfckZNBZXJPj7NSO3xHm9GDUZzAXk1Juc+WoduS4QjkQhWHJ+CIELH7/80uiei2ONRwqJQjBOCba5PmNBA6gZkVoGjM7O9lQNam3h1Uk6fRi2+zGqstkwu+U0d6c02oSSNjFaH5A6HrUlJ+Pc1ibzyOnaNKj1WDB9KWBRDQnmNjR7B1m6LIzMrQTNB6pxWU4GpCf6V37dM3JtRxcIdr3Dwig4Mn8DRBQIoqDEpC/gJzDYhHMJ47c1RPx/FnosSlj0Yx4HqRiitdn86w3zPj3mNLVh7MYd9dgoL1l7M+tAWJS4jRGOL2xFSE0B2GWg2Hkvj6/XVvJebQau3b3VXoVPKNRt/Q7PIwvB1jWYKak0q0v3oWRLhtYgoYVEMI0pY9lAcB1ZvgyMuh+nfdn+u3ja84qK8xkaXxtZulvnZZaA5HN7YRJpl8lZ+Zp/9hSO5+rmP8TgW7+Ye1WOarLDGpDrTi2kLPHkO5gcfIsPhUTwbxZ6MEpYxhHQc7Ooa7NIy7OoaZBwV2NWpp9omOPlmd7QC7s+Tb3a3DxfKa2x0iS2OFALIKQUJ36ivptWr82lmap/9T1ncyGErgjw2+TI2Jc/u8buCGhNbF1QaPrwlNrKjA3P5J6N0Joo9HSUsYwTpOFifr6Hx8GOonzqbxsOPwfp8TQ9x2Z2pp4jRJSoxSqvBGMZ7vvIaG116GFDmbkWT8JVgLe/lpmP16lU8d1UHZz7dgJCSz9IPoNJX0uP3BTUGEiiTAfwzIkjTwvzvuDbUVYwhlLCMEZzaOpoWfpsefa8Xfhuntq5zn92ZevL7oKTnPZ+SQvAN4z1feY2NLo2tUVHxuBVhRe0GSbbN0pyMHvtlN5hcc08V/ohDKEmj2NxGmW9aj30m1bjfMMq8AbRkB+FxMP7z/midimIPR3k5jBUiEeL1vSZidL7enamn/Ex46fau6bCSQvd1nKn4QaO8xkaXTgPKzErQLOY1tWBogpXd1q7oluQH91STV2sSStJACEqMrbybdiKG8OGT7vWU2WTjMyTlAT/YoGeBufwTnLY2tNS+02oKxe6ghGWs4PejlRT3EBetpNgdekSJTT11F5f+pp40DeZOg6V/dKe/fF5XVLRhvucrr7HRo9OAMloR9tX6Oj7NSsXQu/6oC14KcuBn7Z2lxQDFxhYkggrvVKYbGwFXoApqTSoy/FAv8RRK7A0RrFWr8R1xeALOTrEnob5ajhG0/DwyX36eHn2vX34eLb/LomN3p540DQqzobjA/TncoqIYXepiBpRZFehYHBps4aOstM7fF5VFOGtRPcIB09f1xy42tgJQ6p/R4/MKakx2FPiwgxreKSbStLBWrhqNU1Hs4agRyxhBaBqe/eeQvewdd/rL70PLz0N0UwM19TSxqY8ZUGbsIN0y8UnJJ5ldwnL2U/UkdTh0JPe8HoqM7QgkZb6+wrLqgGSMBo1AiQEEsD77fBTORLGnkzBhEUJkAo8Ac3Edjy6UUk7oshShaeiFBQPus7OpJ+k4bsI/EnGn13qJk2L80mlAmVVJUThEg89DZZI7VTqlNMJhy9qwdYHUetq6+GWEQrOyTwK/sMbE9ApqDB9T/SGEB8yPVozW6Sj2YBJ5x/kN8KaUcj/gQGBdAmMZd8Rbz7IrJcuK8Uk4AqGYAWVWObNa2/ksM6Vz0eOCl4L4DEnEL+K+v9jYSmnvEUutW3Jc6gkAoGeCXVaO09AwkqeimAAkRFiEEOnAUcCjAFJKQ0o5jEv3xj8D2bH0u56ltnanJcuK8UmwLbo4MqkNPdDMjPYOVkYXRSa32Ry1pAVbp89oJUaxsZUK3zQcetq6AFQG/CAlngIHTBNr1eqRPyHFHk2iRizTgTrgcSHEp0KIR4QQKb13EkJcKoRYLoRYXlc3cW6OO7Nj6W89ix0O77RkWTF0EnFdBqN2LnpWFR5hMDlksDrd/S9z6PJ2kkIOhq///87FxhYM4aPGO7lzW06DhZBQle3DaYkm8C0Lc6XKsyiGRqKExQMcAvxRSnkw0A7c2HsnKeVDUsp5Usp5eXl5vX+9x7IzO5b+1rPYfk9nVVmM3iXLiqGTiOsyVmqsZVbhFRaZhklFkh+Aw5e2uh2K9f7fH6sMK/NN79zmsV1xqSrwYjUIvLkmCIG1Rs1KK4ZGooSlAqiQUn4Yff0CrtBMeKTjkN5ew7+uLePdW2o4fJY7TOlux9KflUooO2WnJcuK8Ulja9TZOGsHHizaPDpSCJI6bA5d3o7loU8vlu4UR7YAUNpNWADya02q873YDRpaqonQJNann43kqSgmAAmpCpNSVgshyoUQ+0opNwDHAWsTEctYIpZ8b1/4bVJLy9i3pJgXnnqe05+ZQ1VQ67Rjia1nuWDj9RR4svlZ0nnsqxfhC0YQc2YNWLKsGJ80NLtTYZ6sCvIjBlujfe0PWNlBICQxvQP/jTOcZjLsIKW+vXtsz683+fyAZKxG9/1ausAuLcNpaUFLTx+Zk1Hs8eyysAghDgG+hFsa/L6UcqhWqFcBi4QQPmArcMEQP2/cE88vzPOdb3P/U+/gn1zQaccSW8/yxqzHSFlfRstxZ9FSWtY5QvHsP0eJyR5GU7TJV2rOeiaHIqxPSwZg1toQui0JJfU/WokxPbKBLf59e2zLrzVpzPTQ1qqTBnjyHIxtJtbqtWoFvmLQ7NLdRwhxC/AkkAPk4ibdfzyUA0spV0bnqQ+QUp4spVRNPPrxCztwisHcaT1XzmtCIyNo0LLwLFUFNgFobHX//lpmOXuFIqxPdYVl7qoOd4cBpsFi7B1Zxzb/vlh0JWPyay0AqlN8yBB49rKQloW1esJPICiGwK5+rT0L+IKU8lYp5a3A4cA5IxfWBCXqF9YdraQYPckX345lF4wrFXsGjS0gvCHspDYyTYtGv5eUVpup2yNYnp2LCsDe4fWYwkt5t4WS+XUmEthR4MNq0PAVRhP4SlgUQ2BXhWU7EOj22g9sGfZoJji74hfWg36ESFWB7XkEW0FkVOMVJmZ0dLLvhhAek10WlhkRt9pri39W57b86FqWmmgCX083QEqsz9RaFsXg2VVhiQBrhBBPCCEeB1YDbUKI3wohfjty4U0suvuF5W5fR/aydwbMl+yKEO1KV0rF2KehGZKyt6Dj0Ka7U1mx/Iq1i5nSYmMrXmmyKdAlLBktNn5DUpXvjljQJFoK2Bs2IiORkTgVxQRgV5P3L0YfMZYMfyhjj0T4bvX2C3OkQ53R0K/ppFaQR9bbryN0HZmcjJ6T3RljrMosVhCgkvvjl7omyJ/mLlxsjJYH7rMu2qN+F/IrAB5spkU2sLnbiEUAebUmVfleV1gAT47ErI5grd+I98D9h+8kFBOGXRIWKeWTIx3IWGMs3JRj1i2xVfYxm/z9kmYgJHHjIye76/39dKXMXvbOTs0uFWOLhhYoytmEx7SoTPKj2ZIZm8MDLoqMx96R9fw39asXFj28AAAgAElEQVRI6DR3ya9z17I4LQIs8EyyMcvdBL4SFsVgGPAOKYT4S/Tn50KIVb0foxNiYtiVVsFDpec0VTVWXX2PKav+rFuqQkGcmvi+YJHqHVR2NLCj3sHqiJ/ct0Pxpzi6+5PVNUF1Q3yvsvjxq2m2kSJmQOnJrqQgbFAd8FFQbZIccrD1XRutxJgRWU+Lnkm9p+uLRV69RXW+FykFVqPAO8lN6KsEvmKw7Oyr9/ejP9cBC7o9vgWsH8G4Es8IV1z1dSI+Fnv1Wpq/fV6nK7HlGHGtW8obTZyOUNz4nFAHp268mM3GFgzNFze5rzl2HxHo7k921m2wZhsccUV8r7L48Ssn5ZEiZkBpZDSSZllIIZi2NYJuyd0WlplhVyw2dZsOK6gx6UjSaEnTsRs0PNkRN4H/+ZphPQ/FxGFAYZFSVkWf7i2lLO322A7sN+LRJZIRrriKNyJqueAykm+4tnP0Eahvj2vdUlvnxZR6/Ph0nQqjmh/WXk9Dhp/UF57rkdxPf/QBWq67qc/Iq7s/2fVnw4V39u9V1l/8ag3NyNDYAo5m0J4cQY/q9rRtYTRnYH+weMRaE28OzO7cllfnVobF8izC6yD8EmvVaqRtD8s5KCYWO5sKu1wI8Tmwb69psG3AHj0Vttulv7tLPyMiLTur83k43Mq9037UoxXxQzPu5M6Hs9jUmEz64w/2FI3HH2SbVg+4I5u6UAQnN4+0++4ia8mbpN13F+0/ug3z5df6jLwiRpeQZKV1PY/R3atsoPjVGprhJ9gKaZkb0YSDFU3UT9scnc7cxcR9jGSnnSnGNtYHDujcFlvLEvMMA9CzQLa1YW/dNiznoJhY7Cx5/wzwBnAHPd2HW6WUjSMW1RhgV1oFD4noiKj7zVkrKcZpDHY+b9ZNshos/p1xF7bPw6bUVpoq8/lwjcbPXs7mmXMnkf7AfYiUFGR7O+0FqdzU8gfAFaGqGi8ppkXeNTf0OQ5+b1csjsMkfy3bH45Q0ejHkPmUFGo9xKWkkE6vsoHiV2tohp/GVpiUt4Iw0OrRQUpmbgrjDPJSnB1eyQcpx3Qm8GNrWarzvVgfaSDBW2Bj11lYa9bhmbn3gJ+nUPRmZ1NhzVLK7VLKs3pNhe3RohIjVvqrl0xBLywY1mqweCOi9McfpOOuX3eOjoqNdPKPvZiO6YdgHnkSc7ZKFr2SRkkh3HqhhmfvGXgOOQhtajHmQfvxQ+05VnSspchXyD35d/P44ix8k/oeh8UPU5YewZGOmzip/RzfY4cz5c9Tmf/+4RyY+jkv3OZQEp2FKymEl26n06usv/iVk/LI0NAMWbluvqPe7yWn3iK1zd7t/EqMOaFPadEzqfSWAJAUkaS12lTle8EW2M0C72QTadkqga8YFAnreT/R6Tsi8iJ1DxnPPwV+H1LXaf3CUT1yGO2nnM1v3nuH5otdQ0pN0yBaNpwsHe7IvYHb5LUI24sWyuKP12rkZ0JTfhGVbz1Ipu2nVmvlF+HHqNnYyKuzHyHfMGHRQmgqdQNrKiXzlYXsf9Eylj5QiGG5IxX3eAPFr5yUR4pgK/izSkm2bRq8Xg6q6EC3wdzFFfe9mR1aCcCapIMoMt2/e36tRVWBO9q0GwSeSQaIgErgKwaFEpYE0nsxZHfs0rK4OQyfY1CY3Xd/ISGn0Yre5AVaPsTWUXYQ4ZsNN/R5jyFNsCJdohKjqRQvEQpzBh+/YvgItoLMqiXTsGgPeJhcYaDbknA//e13xlRjMylOG2sDB3FCy8sA5NWblJe4jcOsBh3fDBOhg7ViJVJKxG7mchQTG/X1cqyyG1VpOyv97a8xmE94weOHzJKeH5hZ4m5XjAkamyUdaa34on/PyeUGyP772+8MDcl+4VWsSerqrVdQa1KT58HS6Ezga9ngNDaqBL5it1HCMpI4DrRVuyOCtur4qwxxV9jXGg1URKqpNRpwpNOZw/AuPImMvz1L1n//RdZbryNy+w4jdlb6G2sM1r267PF97ibXmwXJ+XDOy13iklnivk7OH4F/EMVgMNqqaEgWbickoLh06B5eB3V8xFb/PgR1d/g7qdrA1gU1+V6seg2QeCc5YJpYH68Y8vEUEws1FTZSRJPinfmL2A07f/8eyYoBbVvm7EfarTfRdMpZPWxbRG9bmZ2U/sYag706+5G+nmMCN6bvLnOnxTx+V1RUrmTMoNkfYwsI6xpIScn2yKArwmIc2rGUR/kBnyTP57jW15lc6ZYclxb5mVxt4rRoeIsMwqt0zI8/IXDmGcNyLoqJgbp7jBQdtX2S4ixa6G6nyz6lsiO+bUvQbCZcu6NTVKDvSCT2GRFt59NmmtDI9+VQ5C8k35fTw8gSTYPUQlf8UguVqIw1vG4P+iavh8ygTUr77lu59GZmeA2pTgvLU44EYPIO90tI2WT3mrFqNby5BiAxP/x4SMdSTDzUHWSk6JUUd4oOo/bk31OhW9QaDWyudLj81w6GNLl/+i08svedHJIyB3DFJSQjBNvr+h2JdLdgOf6XeThPq9LfPRUtZRNSQLumRxP3DFlYdBwOaf+AFclHIIHkkENW0KK0KJrAr9PQkiy0JA1r/UacpqaBP1Ch6EZChUUIoQshPhVCvJbIOEaEbklxp+gw1p96HwtaH+Wwz89gwdqLaU8q56fXlrPd2gqAT/Pys+JrOCRlDkW+QmxpUyWa+h2JdLdgWbZO49Sn57D9qXfI2rrzPi6K8UM4AqRXotsaQggmV7oVYbtr5RKPQzuWUu8pYJtvHwCKKo0eIxYAT74Ew8D8cPnQD6iYMCT6zvN9XIPLccPOHH07E/FeSe1l/8E+8ocYJzxBWpOX36VeyaHJs6kwqjF9TbR76rl5+z2cvv4Kbt5+DzqCpzNvZknmvRQE4d/6OvS/P03m64vJWvImma8vJv0fLyL9NplUMym769jL1mkc/pMCqpN3bTGnciYeH9Q1SSLpQZIjGgIoKnenrOQwVP9+qe3fCBzeSf86AJN3mJRO9uEI3N4sNniLLaRtY7z97tAPqJgwJExYhBBFwDeBRxIVw+6ys7LeWCJ+wdqLOeyzU7mp/PeYhWfTevSp+PY5mmlfvZqHI+dzaPJscr1ZXLP1F1QY1RyaPJvHsm9k300RjCNPomXaAbTOP55rW45CFx5arriG4NEn0nLFNWitQfTnTyXw+OG8cs3nHD67SxD62K4M8jwUY4fmqh3UpoBmukOUKaWD8wiLR5bdyCEdy3g77ZtIYHKlQTigUZftAUdgNQi8eQYgMP7xb6SUQz6mYmKQyBHL/cD1wLi5m+2srLd3/5Tver5G68ln99hfO+27/DhwFg5Op6g8HDmfvVc30XLqOT32bV14FoFtVT2Pd/qFOIfeCE2l5Ly6kN9d4hYDxLNdGex5KMYOHTvWUpXswYwWcE4dhoqw7hzb8jpV3imsDxzA5EpXtMpieZZa10Jf+DzYZeXY6zcM34EVezQJERYhxElArZRywAJ5IcSlQojlQojldXVj4Ka3k7JeQ5o9+qfkO2lx9z/AM4OAFuCEjC/z48BZaKd9F5GSEndffdpUspa8ScbfnsV72BfcfXzRpfdNpRxQEmHbX2DpH2HutF0s6FLOxENiNK/LhuBKLA3avZDWYpPeMniPsHh8qe3f+GSEVzLPpGhHrOQ4lmfRQXfQ8zRkJILx1pJhO65izyZRI5YjgW8JIbYDzwHHCiGe7r2TlPIhKeU8KeW8vLwxUOG0k9XwvVe4t3vsuPuvsrZw2rrL+UHRhZSQ5zboagzG3dfetp3g0SfSes0NpPzyVrwLTwIj6gGaWYKj+SkugMLs+KISN5cywr1m9nRG87psiKzDQcP0R5hcYaDZu9+DZSDSnBa+0fwCb6UtIBLKJa3V7krg17kXlLfQdT8Ov/iqmg5T7BIJERYp5U1SyiIp5VTgTOBtKeX/JiKW3WFnjr7dV7gfmjybSeG+PVO0vz3KL8LPUmFUc8mmm0hPzkErKabjrl+T/ugDfdyO2392JxBtBHbRFaT/6udoK+6EzBKsM1/Gk9b/Cvn+cikiN0c5E48TmtiKJT14NavTI8wZpJVLf/xP4+M4QvDXrAvcBH50KsxpFcgQeHIiCK8X69PPsFatHtZjK/ZM1Mr73WBnjr7dV7inNYRom/812gsLyFj0KFpBPgSSwLF50rwOLdWD6RV4LUnWW6+BYeJISdYbL4LXCx6dljPP77E4zSktA92LfcbzOJofT1o+mqf/7wb95VJi5ci740wsHcfNwUQi7ohHORmPPI5N0FsPdg7CY1BUHkFIhjXHAlBoVXJ8yyu8lHkOX9+xmNWH2zgCNCkwazS8eWEIZCNb2wg//RzeA/cf3gAUexwJvzNIKZdIKU9KdBy7Sr89WqK+YFpzOfmGic9w3BzJnFkIv5+W//sR9oaNBI86AWPvL2IcfTIZVa20XP1DGvY+gODXT0FWVdNy0604ra3YwSZSbrmxR35FKylGJCXhySnBl1U4oKgAA+RSIrvVa0ZVkSWI5nJ2JIG3LaVHqfFwVIT15pK6e/HLEJ8HT6YjSWNHoVteaFXpaKkmeooDAsLPL8YJBof9+Io9i4QLy3imyzyyitpQBc4rl8O9U+FPh4NmoZUUk3zd1TSd/r8kn3cOLRdd0avCy90ee91y0RUkn3cObbfdgWbbnWXGrdfcQModt5H55os0Z/ncBl27gqbFz6Xs5khDVZEliPoNVCR7sEIZAEzdNrwVYd3Jthu4uO4+tjd8lbAIsHFaAACzOrZQMoJITsZpaKTj178bmSAUewxKWAZJ7zUrCzZ9n/XHX49TdBg0laK9ey2ZLz2L0PXOXvYD9bjv/jr5vHNoOv1/e9zIWy64jEiSh6+vu5D1oS27JC5S1/vmbR59AKnvZvZXVZElBLt+LTuSPbRFskhut8kKWsNaEdabBc3PMbu0jnaZyWd7u5WHdqOGNMCbH0Z4PCAEoYefwNqydcTiUIx/lLAMkt5rViqMai6ovJv6r/4YOfkwnH3PR+RmQ8Df2cs+3ugh1uO++2stPzfujbzFbOk0qaw3dz4dITSN9t89SNp9d5G15E3S7ruL9t89uPu5EVVFlhB21K/GwINt+TsrwkZqxAJun5b/q7oVWV3MG7PnuhulwKrW8OSFAdyy+JZWWq+9SU2FKvpFCcsg6b1mBVxxIX0K1oF30HjGzTRMmUX77x8ic/EiOp5c1Gf0kPnC03Q8uajzdfqjD9Dx5CK03Ny4N/IKGjqPY0izT0y9S4tFbg5pt/2I1mtu6JxSS7vtR7td/aX62yeGbc0bsdDRpOysCBvJEQvAVGML+29oomZaiHcyjwLc6TA93UD4bYSuIfw+zLffJfTIEyMai2L8oqrCBklszUp3cTkx/ctkdWQgnQ7S7ruLjrt+TfhX9wOQ+pu70DSNrHffdKexNI1Wj4Hv/l+Q+uvb0XQPaILk886h/e77SH/0gc6cTKxMOcWn8X7m/TTpEXQpOqfD6s0gOA4ZG6poWXgmemEBKbfciD5zb8RehWQvexsi5qD70qv+9gnANik3q7HkXng0k6JyY0QqwuJxxsr3WXPsftw973y+/O/3sap1wMKbH8YoT4FAANncTNstv8B3zFF4Zu498kEpxhVKWAZJbM1KbDrsxPQv84fI2TSd+I2uplwvPI3d2kbk6eeQAmwjgq770Lw+HE3wL1aSnp5BpicdkBzcnk/rNTfglJZhr1lH2n13oeXnIoqLaG1tIP8o1x4mpaSY1JeeoWk/L9V2HRdsupHfpV7JtIVXu6Lyy1t7iFLmy8/3cTt2pEPQbCYkI9jSJk0kkxGMRIWjbzmx6m8/ygS3Ue4TOKYf3d86ohVhvZm7vpUUp40dM9t5bflCTq5fDBZ48iIY5SkIISAtDdnURMtl3yfrHy+7+ReFIor6yjlIuq9Z+fDAF/lDxvdpXXh2n6ov4fWSfMUltBz1dZr2Pojg0Sdib9yMrKji1OA+fNq8itPXX8EPtv6cinSTzBcXoZUUY374Ma3X3IBIScbRNexv9PzstpPPhvpGLth0IxVGdad9TPIN1/atPutVweVIh23hcjaEt3Lausu5avMt2KvX0nj4saqceKxQt57yJA9OUwHelGambosMi6PxrpDW5jCjrJWMoo94MP8qDOnDrNXw5oc79xG6jkhOxvzgQ0J/+NPoBKYYNyhhGQLduzJqETO+11d+Hk2nndOnwou6OpztpVwpTgAg35tDqVlF/T55pL3/BjlrlpD+zmvcsmwO9PPZfpPOqbhardWdMuun+qx7BVe9GWR7pLLTXfnHgbPgtEtUOfFYom4dZUle2hpLSBc15NRbw77ifiD22xDCP2UNVf5CFmd9G6tKQ880ED67cx8RCIAQtP3yHqw1a0ctNsXYRwnLMGH7PfErp2wr7o1epKQgUlIIWBqHpMzhhqLLuHn7PcxbcwZfqfsBayan8b2/FnP3sx621AXifnbYKzu9yX4RfhZn8Z+Q7e07reAypEmyltQpSv2ZZapy4sRh1a+lIsmP3ZrDlOq2YWvutavMWh/C8UeYlfl3Hs6/gtbKZEDiLQj32E+kpSHb2mm79fbRC04x5lHCMkxEstNIeWlRnzUjdsWOuDd62d6ObG8n7HG4YtK5XLftlz1Kly/ddiMLjm8G4JLH8kl/qWdVlrP4T9wZ/gv3Tf8xRb5CVnSs5eaUV7Hm7U/mi88OWMHlE146nFCnKMVGO71jVOXEiaMquIaI8EAkiSnlo1MR1p1Z60MAzMt7igY9lz/L89z1LIWhHvsJTUP4vBj/fqeH/ZBiYqOEZRiwbIdyo5arAs9S+/YjZG9cSfoD99H+o9sI//NtMhcv6lNmTEkx2tQS/iD/QaYnPW7pcnaWW1JcFdRomzqH7GVvk75tFZVvPcgl/if4T9tyCiwPr+b/kA/nPs8d028gKa8Qz4H7k73sHXK3x29TnOvNYqp/cqco/SL8LCx+WJUTjxUibZQbNVjSg7A9o1oRFiOr2WZypUH17Ea+1LqER3MvJ1Tlx1sY7rtzUhLSiNB+z29GL0DFmEaVcgyC3oaMjek+7q98mO96vkaKpbPRU0v2QSVkP/c4mqbRevX1boVXdhZOY5DWX9xN+m/voS7Hy+niJLyap0fp8qHJs/lZ0nnMMMMs+3kNWdPyyErX0DIL8UuHvcwMfm//Ep+EHNPBk1aACOR3WbUIBqzg0oTGtMAUMvV0Fs/6I4600UUyGUMsS1YME/UbKPdrmNKLLh2mlEUT96NQEdadQz9p5+/fyOQXLfdxcdrLvNl2IqeWvIyWauK0dbUqFUKA14fx9hKsdevxzNpvVONUjD3UnWM3iWfImFHTxIPeyzioJoXCNbVMuva3ZFZ30JqfjrBtzJdfo/nUswgefSLNp56F+fJrVEVq2GSVcczqs7l6y23cO+1HnXb7j0YuZPJxl9Gxz1ymfucYShrXIGKNNqVGdXkOR59fyOSvF7LvpXvxeW0hzm7+KTWhkePLoshfSHFgMtn+LPTCwrimlF2eaNXUGg277lWmGBx16ygNeDAjyXh97ZRsjyQkjHmftGPpgqbZFZwc/Cv3m9dhCU8/o5YAMhwm9MSi0Q9UMeZQwrKb9DZk1AsL0OqDNB13EsEvHe8aRl51GaGf3U1mVRNYzXHzFy26QZGvkGf3/Q03T7kSn/Dy4qwHeT7ntgErtGqb4OSboTQ6c1Za7b6ubRqh8+3hiXYKC9ZevMteZYpBUreO0iQfRtNeZGo7yGoc3q6Ru8reW8JkNtks+0Ia11TdSWtrClvN6chCq8++QtNA0wg/8zxOS8uox6oYWyhh2V16GTIm33AtTWf0MoyMuhQ71TU4qfl9mn2lP/4gXm8ACfxw2x2cvv4Krtr6U5rsFrxRu/3udK/QihhdohKjtBqMvg4vw0JcT7Rd9CpTDJK6dWxODmA17sX0tm2jXhEWQ5NwyKftfHRwCgGtjTvKruX9piNpmZxCSOtb2CGSkpDBJiIvvDT6wSrGFEpYdpdehoz9uhbn5+LU1iM7Omi/6daeRpA33coUO5MGM0i+NwfoumFbPn3ACi2/D0oKe/yakkLweRkR+vNEi+dVphgGpCRUu5LKJB92w2SmN1WOSNfIXWXeJ210JGmsnJvMoR0fM33zBrxeg3v3vpJ2kdRjX+H1Ih1J6M/PqhbGExwlLLtJb0PG/taNaLm5rsGk14tdXdMjx2JX17DVqeZPOxbx58yb+Dj7AV7M/jkFnmyCmT7SX+zf8DE/E166vUtcSgrd1/mZI3O+MU+07hT5CvGJEVKyiU5TKVtFCAsPdsNkptbXj3pFWHfmrg6R3OHw1lHpAMzZ/Clpdgtp+UEuL7yfVi21x/7C58Va+TnW52sSEa5ijKCEZTeJGTKmv/822ZtXoR88h8y/PdNTCP76Z9ofeRJ5y/e5136lz/qW5Jee5gnnbW5vX4B55AL0mV9m2lev5tHIhWyv9HDsn+aw/al3SN/ct1xY02DuNFj6R9j2F/fn3Gm73btrl4l5osXEpchXyOP73E2uN2sn71QMiupVbEnSMaQHpymP6aXRfMUoV4TF8FmSL73fyn8PS6M1RUOGBJ56gwu8j7LKN4tr8u/A7n4bCQSQRoTws39NSLyKsYESlkEgNI1qvZD1bck0Guu4J+s9kpa8QvaWVWS9+ybWpHzkDy7kAt8j/KHuGa4KPIv235cxNi4h8P7f+WnqG5zB4WinfbdHbobTLiG5NsKydRqH/6SAOTdPoc7Xt22wwCHPqGFyuIw8o6arYmwE6O2J9ursR9gvaQaaUJfOiFD9GVuTvZi2H5/pMGPzyHWN3FWOebcFwyt450vuqMWo0CnOLef6HT/jo8AhPJR5Qee+QtNACCJ/WYwMhfr7SMUeTkLWsQghpgBPAYWAAzwkpRyTq6scx624krZDVqQOjx1BeDXSsnyQaRNiFn9YdQvLkldyR/qlTNf3wmM6eG3BX9NvwcrW0EJhPFoqaQE/jmlxvnYMM7yTaY6Tm5meF2bFX6soTPOQEYygtZhYRhIiL5cGu7mHPb5TWoZ34Umk33sHji5w/F68+QXo+vD+WWOeaIpRoPoztqb4sZoKmS5WEQg7CakI68600ghTt0d447hMFvyjCaNMJ+kAi4WpL/NR4xE8lH0+J7b/m2lmKeB6iDn1DURef5PA6ackNHZFYkjUdyELuE5KOQs4HLhSCDE7QbH0i+PA6m1w5b0Ono1raP3SMTRMnU3jEV/DXruJqzbfwuZwGSemf5kntCvZu8aLtnEbTUd/nYa996flqK/j3VKOcft9OBs20fzlE2medgD5x16MXt2Ad+FJPY6nlRSzRSvjltofo6/dSNsRX6Np2v4EDz8Wc/Vqbtp6F9srPusSlcO+QMpVlxE87psEp82ldf7xmKtXY9t9y0EV4wDHgR0r2Jzqx6grYU5oJbotsTyJFRZwRy1bp/pZPzOAWeO2K04tDnF55W/x2WHuy76ia2evFxyppsMmMAkRFilllZTyk+jzVmAdMDkRsQxEbM3ILd+shXO+3Wfa6seBs7i/8jHuTbuMlO11UFdHywWX9div+dyLSb7u6j7bm874X9Lv+nmP3AuLH+aW0JNx3YZbF57Ndz1f62EYGc8iv3Xh2Zi1NaP676QYJpq202G2UB7wY9WWsG/rRjQncYn77hz13xZS2h2eOS0XpMCs0Egt6SDTCnJW9VMsSfoSn/r3B6Ir8TWB8Z/3sbdtT2zgioSQ8EtWCDEVOBj4MM7vLhVCLBdCLK+rG30L99iakb1zQ3FLivOdND5pX0NHqLnTrTiuk7Gux93uBIOdZciZ777BRf7HWNGxtl+34XwnrYdhZH+lziKiSoFHkhG7Lqs/Y1tAc/vcN0xmn7ry2AGH7xiDJCksOen1IB8dksK6mQEiZR48STaBPINT6v5CqtXKUxlnd70hEIBwWI1aJigJFRYhRCqwGPiBlLLPcl0p5UNSynlSynl5eaNviBhbMyK8PdeWBC78DjlrVzBZy2Nt/pNYXtHpVhzXydi24253qtwy5ObzLqVVN6mxGoH+3YZrtdZOe3ytpBinMRj/eH5VCjySjNh1WbWSLckeDDxQV8C+FXVjYrQS44R/NZHSZvP06bkY5TogSSkJEZARTqp/kbeTv0yZx514ELoOQhB+7gWkpaZmJxoJu2yFEF5cUVkkpfxbouIYiNiakbJwEpnP/RGtpJjAhd8h+fKLCX79FBpnHoBx9MlMag9gTi+CvLw+q+wz/vwIHff+Nu7q+467ft05BdaRk9xZ1hvPbTjt5Wf4k/XPTnv8pKVv4DlsHhm9LPLTXn4Gb75qITwuKV/G1mQ/tuNlekULKSFzTORXYiSFJQteb2L5wSl8NCsVq8adDgM4ue4FNMfm2fQzut7g9WKXV2C8/W6CIlYkCpGIFbJCCAE8CTRKKX+wK++ZN2+eXL58+cgGFgfHgWCrQ0ZkE6JiK7LwIOyVq9xpr8YgHXf9Gru6hswP3sLWBR5Luv4qjo30ekD3IEJhrIAHyzJoMVsIaiHw6Ozt5OP4vYSyU8j0ZQCuhYohDXzSQ1pjBM2w0AOBzqowQ5r4hJdcbxaa0LBtC7O2BhExkSNUFTZGGRN33GG7LiNtcN90vjsrl1fF/pzwy4O4ddXthJI0ZIJW3cfD9MANvyxG6oI/P7mB7INMNj81BavDw53FP2Fp9tH8u3whaU6bu/q+vQP/qd8i44kJ1b547PzBEkSiRixHAucCxwohVkYf30hQLAOiaZCVptHsm4k5bR6yupaWK64hePSJruHkL2913YA7QrTb2/H8cS9afDtoCTfjfL4WuWkL9oaNdDQ3cHbr7Xyh8Qq+Vn8dFzbeSX1+Ov6CIrL9rkh0tTqeRH4gj6S9ivBPnYpWkB9XVAB03UNg0mT8U6cSmDR5oojKnseOFUgrwuqMJKzaqRzc5jbNGkuiAuC14OLHaqkq8PLIgZMASJ3ujqOmWO0AABEsSURBVFpOq3uODhHgb2kLgGgSX0qMN/+FXaMKSiYSiaoK+6+UUkgpD5BSHhR9/D0RsewM03Ko7GigSa9Fdhg0nXpWH8PJlFtuBI+HpqQ0nKLDSBZ7kVLT0ilALVdcQ0pNC7/KuBJwV6/fk38337s9i9Xb3FFRfyh34QlC+TJ2eCHo1THKZnFQ3VrEGLXbmr0hzNHvtvCX43NYqaeQPqMdgJmhTcxt/Yzn0k7rWo2fFEC2tRP564sJjFgx2oyh1ODYw7Id1oW2cPrmizlq3SnUd1TFrcLSZ85gh6+d7TRT/9Uf44k4fcqLWy64jOlOLkv2e5FFUx/Brp1BTYO2U8t75S48QSj7L2vTApjo5K3PIr89iOkdW6OV7py7qJ7soMWtXyxBFBl4kt0E/al1f6HSM4l3k78EgPB4kNIh9MQilcSfQChhGYCaSJBLtnTd1Ft0I351l9/H9xp+RbKWhJGSA6YRV4Ck5XDCJYXsuzCHi+7Q+PnFUJg9sOW9cheeAISbofxD1mSkYDpeDt5SjdcZW4n73qSEHC7/Uw0V6X5+N2cv0qLTYUc0v0eeUcOi9P/p3Ff4A9gbNxN55fVEhasYZZSwDIBJz5t6kjclbnVXnd5GjdVIhxPC196ACP9/e/ceHVV1L3D8+5tn3gkhvEMSQBBE67MqqBWxonKt1qpLe9Wl6PIul4+q1bLQ26rLV33cKu31VuvbuqzFYtW2XrlY5Aq+uYIKgi+IQpCQEAiEPGYyc373jzmBAEMgMM/M78NikZlzOOd3Tvae35y9z9m7Pm4C+qopb4cJuq64H351Sc9D3vd2dGGb7TEL1b4FkQ6WlBUS2VrO8Q3vIzgZdatxPONXtDP1v5v5S00FK06KvecjylmNL7Eo73A+C7hTFAcDaDRC20MPoz21+5o+I8OLbnr52fFDfUAkP+7cKqVhPw+N/CXVgSF4Bx8O1aN3GFrfU11FySuzuOKpgTts/9t6GF3Z85D3vRld2PpjslTtfEI4fFKSh6wewbGb3sNDNCMejNyT819sYnh9mPtOHky4MnYVfcaGVyiKtPBIv8uBWCe+BINEPl1mVy05whJLDwYF+/H4qO0f6k4g/twqnmAePnzcUHs3U7+6hi98W/EcPI7y9+dT8U1s6PutNeNZt2nH0109GIryex7yvjejC1t/TBaKRuCrOSwvyaPD62HcglKKoi2EA5mfVCA2rP5V/1lPc8DHMz8rAKDQaeXchj+xIH8inwbHx1YMBtFIhNY77kVDoTRGbFIh5xNL3KYjx4Gt9Xha6xnmL2P22Ed455DZtJcX7/LgIi89Tm1xG02RZm4ZfjW3V13Pf6x5nKboZryDB+GtHh6bHKyjkc/uWc37d67n2HHO9gm69mJak+23IQ9mYKD/boest/6YLLTmXWhZx6L+pYTxcMxHzYhmxsCTe6vqy04ufq+BhYcUsGBybKbTsxtnU9a5kXv630gEb+yqJT+f6Jdf0/6HJ9McsUm2nH7ooavpqOtbflcz09g2B+bO4PNTpjNt1f3blj1aOZPAiEo65z1KWTRIszdE+ZBqWiKN3L565rb1fjPi37c1P6njEFn6Gc1nxQaxrKmu4s2XZ7F1xHj6lXgSOkFXV39M9+Risz1muOUvg9PJwvIi2FLED9cuoN0XJE/a0h1Zr0yd1czSMQU8eU0JQ+ocRn/ZxtV1M7l7xB08V3oB0zY/jwQCOO0dtN77IIGpp+I7YFS6wzZJktNXLLttOmpdzYZjLmfa2vt3WHZl3fX0zyth4NDxeKoqqagcg9fr57pVd+yw3o21dxN1J99yGhq3JRVwbz0++3zKOhoTPuujzfaYZcJtsOIVmn1elpQEOep/iigPbyLfuzndkfVaeK2XO978liGdIX4zo4i1wzyc2DyPic0L+F2/K3k7/1gApKgQZ/NmWq6bbrcf92E5fcWy26ajYBEEi6lr2nVZxBuiMri9Q78utC7uNhT36bZQKO6tx4TCCTySmO79MfGe0jcZZtmL0LaBhUP7E/YIU+a00uorpMS3kewbFUSIfuRlZsXXXHboWG69t5QLn23j5wvvZHrg99ww9HauzLuD8iGLiYa9/HDufOb8YiprbzyPKf1O4JCCA2NP6ps+IacTy26bjkJbIRLaq2algAR6Xi8YjD3r0i25eKqrYkMnJ4HN9pglHAcWPQrq8NqgEgZ9q3y/diXfFA2nhLp0R7dPttT6qalv5YmmL7h5zEgev6qAp6+A/K03oeWl/M7rJaCDqdgUonBDB0f98UPmDl3PE5NnMS7/AC4bfB6nlZ2I35PTH0t9Qk5/ld1t01FhFRUfPMnTw6bvsVlpT81PnoEDdrn1uOzVWXgGpn4aAJNBlv8VGlfQUFDAO2V5XPRYBx2efMo9a9Md2b5zhKbFQUYEO3hwVi233Psdp8/dxJGfbuKiV2qZ8ocheB+4k7rfvshdW95gTWQ8P7+7ge992M7Sts+5adWvOXP5Fcze8DohJ/FX9CZ10jK68b5I1ujGjjruiMLdmo4UaGvAcRw2BDyEhR6bleJuo9t66jg4DY2x5q9gAM/AAUiiO1hyT0a0m+xTuQxthccmwKZVzBwznH+2Bvj1tc38s9/pnBqalZxAU0apPLWNYD+H2r8XE2ndsZyHJcDiggm8WzSZT/OO4taVtzI0vJo7z7+AL6Z9RrSgCcFDP18pk0qP4eSyiRxXciSF3oI0Hc8+yYiymU45n1hM1sqIytvrcqkKr10Li59mY0ER54wbxq9uqIfvqlhZPpTj2t5MXrAp4i92qDqjlVCzlzVzC9Fo/F+VAisDY+nX0kFJpIUHD/oZ8y/2UTrxH4Q97YAieCj05jOp9BimlJ3ApLJjyffk9bj/iEb4vG0lX7TXsr5zAxGN4MFD0BMgIAGCHj8RjdKpnYScTvziY1ReFeMLxzDAX56IU5ARZTOdLLGYbJURlbdX5VIV3n0Q5t+JotwyahiHPLGZg94L8ED1L7m58SY8ZEd93JOiqk6GnNhO6zofa/+3EI3s/tcljiIdQVR9zB1wGjMnTCNw9hwKRy8hSoSQholoFJ/4KPeVMrV8EmeVn8L3Csdu6/APO5183Lqcec3v8Pqmt2jobCKi0W23/QuACIIAiiqo+0cQfOIlIAGOKzmScypOY3LZBHyyz309GVE208kSi8lWGVF597pctm+CN2+DJX/EcSI8mz+IgbO2UvWl8MCQuziz9TkO6vgk+QGnUMmoMIMmdhDe4qX+vXzaG3r4oFYlL+SgkQBN/gqer7yY108Yj/9H88gbVgsonRohpCFA8IuPQf4KRuYNJ6RhVrStpNVpo9OJICIExI9ffHjwbEs+XZ91XXdsSrciFCFK2NmewKrzhnLhgB/zk4pTKfYW9vbQM6JsppMlFpOtMqLyxi2XnR2wtR7ammDj12jt2zhLXua7jS2sbsmjaZWfkctaafEWc0/FQ0xqf43Tt2Tk7Nz7LX9whMHHt+MrUNobfbSu9dHR5CXS6iEaFiJtQvdfpSeqBNuVqAZp8xTzUb8j+aR6JKsPd9g0voXWmhYi/bcQ8rbTSSeO+7yYT7z4xU9A/Pt127KqEtZOwhoGhDJfMSeVTmBiyREckFfD8OAQirwFeMXb02YyomymkyUWk60yovLGLZdLZ8Fr19L8fDuddQoR2OL3EPJ4cNTD+vIC3hgwhffDP+GnbY9waPiD9ASfIuJVBtSEKK8KU1ASBdjW4Lfo7wNxorveyOLvjBJs9RDuLKY41E4U37bfuCK0e/PRoMOjzxzIupHFSYm7wwnRHGkhrJ2oOvjEF5vpFcErXo4uPpRHD7gr7iEnJaAskjWJRUQagW8TtLkKYEOCtrW/LJb49hTLBlU9LVXB7M5+lMtMOtf7w45jVxlRNtMpaxJLIonI/6nqUemOAyyW3cmkWJKhrxyfHYeJxx6mMMYYk1CWWIwxxiRUriaWx9IdQDcWS3yZFEsy9JXjs+Mwu8jJPhZjjDHJk6tXLMYYY5LEEosxxpiEyqnEIiKnicgXIvK1iMxI8b6Hi8h8EVkhIp+JyHXu++Ui8oaIfOX+m7LpHkXEKyJLROQf7usRIvKBG8ssEUnOpDHxYykTkdki8rl7jiak89wkUzrLYSKJyDcislREPhaRrHl6WUSeEpEGEVnW7b0+WdbSJWcSi4h4gf8CTgcOAn4qIgelMIQIcKOqjgOOBa529z8DmKeqo4F57utUuQ5Y0e31fcBDbiybgMtTGMtvgTmqOhY41I0rnecmKTKgHCbaSap6WJY9A/IMsPMDjH2urKVTziQW4Gjga1Vdpaph4M/AWanauaquU9XF7s8txD44h7kxPOuu9izw41TEIyKVwL8AT7ivBZgMzE5DLCXAD4AnAVQ1rKrNpOncJFlay6EBVV0AbNzp7b5Y1tImlxLLMGBNt9d17nspJyI1wOHAB8AgVV0HseQDDExRGDOB6eCO4gf9gWZVjbivU3l+RgKNwNNu09wTIlJI+s5NMmVMOUwABeaKyEci8m/pDmY/9cWylja5lFjiDQyX8nutRaQIeAm4XlW3pHr/bgxnAA2q+lH3t+Osmqrz4wOOAB5R1cOBVvpuU0RGlMMEOU5VjyDWrHe1iPwg3QGZzJBLiaUOGN7tdSXwXSoDEBE/saTyvKp2jZO+XkSGuMuHAA0pCOU44EwR+YZYU8xkYlcwZSLbZjdK5fmpA+pUtWuY39nEEk06zk2ypb0cJoqqfuf+2wC8TKyZL1v1xbKWNrmUWBYBo907nwLABcDfUrVztw/jSWCFqj7YbdHfgEvcny8BXk12LKp6s6pWqmoNsfPwpqpeCMwHzk1lLG489cAaETnQfetkYDlpODcpkNZymCgiUigixV0/A1OAZT3/r4zWF8ta2uTUk/ciMpXYN3Mv8JSq3p3CfR8PLASWsr1f4xZi/SwvAlXAauA8Vd25YzGZcU0CblLVM0RkJLErmHJgCXCRqoZSFMdhxG4kCACrgGnEvvik7dwkSzrLYaK4ZeVl96UP+FO2HIeIvABMIjZU/nrgNuAV+mBZS5ecSizGGGOSL5eawowxxqSAJRZjjDEJZYnFGGNMQlliMcYYk1CWWIwxxiSUJRZjzF5zR6G+yv15qIjM3tP/MbnHEksSda+EPaxTIyL/uhfbquk+zHec5ZeKyMO7WfbuztsQkUldw+Ub0wtlwFUQe/JeVc/dw/omB1liSa5tlbAHNcAeE8v+UNWJydy+ySn3AqPcOVj+0u2LyqUi8qqIzHHnmrktzXGaNLLEklzdK+ED7t9l7uRI53db5wR3nRvcq4qFIrLY/dubpDA8XsUWka2JPCiT02YAK1X1MOAXOy07GrgQOAw4T0SyaY4Wk0C+Pa9i9sMM4GBVPUxEzgGuJDaJVQWwSEQWuOvcpKpnAIhIAXCKqnaIyGjgBWBvK+jRwMFAm7v911Q1a2b2M1nvDVVtAhCRvwLHA1b+cpAlltQ5HnhBVaPERlJ9C/g+sPPQ+X7gYXfsrCgwphf7sIpt0mnn8aFsvKgcZYkldeLNwxHPDcQGxjuUWFNlRy/2YRXbJFsLULybZaeISDnQTmwGxstSFpXJKNbHklzdK+EC4HwR8YrIAGJT8X7IrhW1FFinqg5wMbERcPfWKSJSLiL5xCr2O/t7AMZ0514Rv+N22j+w0+K3geeAj4GXrBk2d9kVSxKpapOIdFXC14FPgU+IXUlMV9V6EWkCIiLyCfAM8HvgJRE5j9j8KK292GVXxT6A2DDmVrFNwqnq7u5ibFDVa1IajMlINmy+MWa/icilwFGWWAxYYjHGGJNg1hSWZUTkVOC+nd6uVdWz0xGPMcbszK5YjDHGJJTdFWaMMSahLLEYY4xJKEssxhhjEsoSizHGmIT6f7PDmR4HS9nHAAAAAElFTkSuQmCC\n\"></div>\n\n</details>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0b9847ebccfef7808d06871bcf4e8f2827482cc
111,677
ipynb
Jupyter Notebook
Linear_Regression_Scratch.ipynb
Saurabh2509/Linear_Regression_Scratch
e7fd930446e02f94941363743e3b7a665ac22f7e
[ "Apache-2.0" ]
null
null
null
Linear_Regression_Scratch.ipynb
Saurabh2509/Linear_Regression_Scratch
e7fd930446e02f94941363743e3b7a665ac22f7e
[ "Apache-2.0" ]
null
null
null
Linear_Regression_Scratch.ipynb
Saurabh2509/Linear_Regression_Scratch
e7fd930446e02f94941363743e3b7a665ac22f7e
[ "Apache-2.0" ]
null
null
null
241.725108
37,224
0.925455
[ [ [ "# Linear_Reg", "_____no_output_____" ], [ "Author ~ Saurabh Kumar \nDate ~ 05-Dec-21", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "#simple_linera_regression\n\nclass Simple_linear_regression:\n \n def __init__(self,learning_rate=1e-3,n_steps=1000):\n self.learning_rate =learning_rate\n self.n_steps =n_steps\n \n def fit(self,X,y):\n \n # adding the bias term\n X_train = np.c_[np.ones(X.shape[0]),X]\n \n # random initialization of the model weights\n self.W =np.random.rand((X_train.shape[1]))\n\n # random initialization of the model weights\n for i in range(self.n_steps):\n self.W =self.W -self.learning_rate*self.cal_gradiant_descent(X_train,y)\n\n def cal_gradiant_descent(self,X,y):\n \n #calculating gradiant descent\n \n return 2/X.shape[0] * np.dot(X.T,np.dot(X,self.W)-y)\n \n def predict(self,X):\n \n #Predicting Y for the X\n \n #adding bias term\n \n X_pred =np.c_[np.ones(X.shape[0]),X]\n \n return np.dot(X_pred,self.W)\n", "_____no_output_____" ], [ "#creating dataset\nfrom sklearn.datasets import make_regression \nX , y = make_regression (n_samples=1000,n_features = 1,n_targets=1,bias =2.5,noise=40,random_state = 44)\nprint(\"X_shape =\",X.shape)\nprint(\"y_shape =\",y.shape)", "X_shape = (1000, 1)\ny_shape = (1000,)\n" ], [ "#train_test_split\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test =train_test_split(X,y,test_size=.33,random_state=12)", "_____no_output_____" ], [ "print(\"Shape X_train :\",X_train.shape)\nprint(\"Shape y_train :\",y_train.shape)\nprint(\"Shape X_test :\",X_test.shape)\nprint(\"Shape y_test :\",y_test.shape)", "Shape X_train : (670, 1)\nShape y_train : (670,)\nShape X_test : (330, 1)\nShape y_test : (330,)\n" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nplt.xlabel('X_train')\nplt.ylabel('Y_train')\nplt.title('Relationship between X_train and Y_train variables')\nplt.scatter(X_train, y_train)", "_____no_output_____" ], [ "plt.xlabel('X_train')\nplt.ylabel('Y_train')\nplt.title('Relationship between X_train and Y_train variables')\nsns.regplot(X_train, y_train)", "_____no_output_____" ], [ "#model\nmodel = Simple_linear_regression()\nmodel.fit(X_train,y_train)", "_____no_output_____" ], [ "#prediction\ny_pred =model.predict(X_test)", "_____no_output_____" ], [ "#error\nprint(\"Mean squared error: %.2f\" % np.mean((model.predict(X_test) - y_test) ** 2))", "Mean squared error: 1716.39\n" ], [ "plt.xlabel('X_test')\nplt.ylabel('Y')\nplt.title('Real vs Predicted values comparison')\n\nplt.scatter(X_test, y_test)\nplt.scatter(X_test, y_pred)", "_____no_output_____" ], [ "#Same with Sklearn lib\nfrom sklearn.linear_model import LinearRegression\nmodelSk =LinearRegression()\nmodelSk.fit(X_train,y_train)", "_____no_output_____" ], [ "y_predict=modelSk.predict(X_test)", "_____no_output_____" ], [ "#error\nprint(\"Mean squared error: %.2f\" % np.mean((modelSk.predict(X_test) - y_test) ** 2))", "Mean squared error: 1534.17\n" ], [ "def accuracy(X_test,y_test, y_pred):\n print('accuracy (R^2):\\n', modelSk.score(X_test, y_test)*100, '%')", "_____no_output_____" ], [ "accuracy(X_test,y_test,y_predict)", "accuracy (R^2):\n 79.03370956723134 %\n" ], [ "plt.xlabel('X_test')\nplt.ylabel('Y')\nplt.title('Real vs Predicted values comparison')\n\nplt.scatter(X_test, y_test)\nplt.scatter(X_test, y_predict)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0b98c386f42239350f97744766e71dc4d8518f6
74,670
ipynb
Jupyter Notebook
15-p3-collab-compet/Tennis.ipynb
franckalbinet/my-drl-udacity-nanodegree
23d3a151c15db726c92d12fd3f5f34d77fda13ea
[ "MIT" ]
null
null
null
15-p3-collab-compet/Tennis.ipynb
franckalbinet/my-drl-udacity-nanodegree
23d3a151c15db726c92d12fd3f5f34d77fda13ea
[ "MIT" ]
null
null
null
15-p3-collab-compet/Tennis.ipynb
franckalbinet/my-drl-udacity-nanodegree
23d3a151c15db726c92d12fd3f5f34d77fda13ea
[ "MIT" ]
5
2018-12-20T22:33:38.000Z
2020-06-25T15:51:59.000Z
113.308042
26,852
0.831338
[ [ [ "# Collaboration and Competition\n\n---\n\nIn this notebook, you will learn how to use the Unity ML-Agents environment for the third project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program.\n\n### 1. Start the Environment\n\nWe begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).", "_____no_output_____" ] ], [ [ "from unityagents import UnityEnvironment\nimport numpy as np\nimport random\nimport torch\nfrom collections import deque\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom ddpg_agent import Agent\n\nplt.style.use('fivethirtyeight')\n\n%load_ext autoreload\n%autoreload 2\n\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.\n\n- **Mac**: `\"path/to/Tennis.app\"`\n- **Windows** (x86): `\"path/to/Tennis_Windows_x86/Tennis.exe\"`\n- **Windows** (x86_64): `\"path/to/Tennis_Windows_x86_64/Tennis.exe\"`\n- **Linux** (x86): `\"path/to/Tennis_Linux/Tennis.x86\"`\n- **Linux** (x86_64): `\"path/to/Tennis_Linux/Tennis.x86_64\"`\n- **Linux** (x86, headless): `\"path/to/Tennis_Linux_NoVis/Tennis.x86\"`\n- **Linux** (x86_64, headless): `\"path/to/Tennis_Linux_NoVis/Tennis.x86_64\"`\n\nFor instance, if you are using a Mac, then you downloaded `Tennis.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:\n```\nenv = UnityEnvironment(file_name=\"Tennis.app\")\n```", "_____no_output_____" ] ], [ [ "# Under Linux\n#env = UnityEnvironment(file_name='Tennis_Linux/Tennis.x86_64', seed=123)\n\n# Under Mac\nenv = UnityEnvironment(file_name='Tennis.app', seed=123)", "INFO:unityagents:\n'Academy' started successfully!\nUnity Academy name: Academy\n Number of Brains: 1\n Number of External Brains : 1\n Lesson number : 0\n Reset Parameters :\n\t\t\nUnity brain name: TennisBrain\n Number of Visual Observations (per agent): 0\n Vector Observation space type: continuous\n Vector Observation space size (per agent): 8\n Number of stacked Vector Observation: 3\n Vector Action space type: continuous\n Vector Action space size (per agent): 2\n Vector Action descriptions: , \n" ] ], [ [ "Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.", "_____no_output_____" ] ], [ [ "# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]", "_____no_output_____" ] ], [ [ "### 2. Examine the State and Action Spaces\n\nIn this environment, two agents control rackets to bounce a ball over a net. If an agent hits the ball over the net, it receives a reward of +0.1. If an agent lets a ball hit the ground or hits the ball out of bounds, it receives a reward of -0.01. Thus, the goal of each agent is to keep the ball in play.\n\nThe observation space consists of 8 variables corresponding to the position and velocity of the ball and racket. Two continuous actions are available, corresponding to movement toward (or away from) the net, and jumping. \n\nRun the code cell below to print some information about the environment.", "_____no_output_____" ] ], [ [ "# reset the environment\nenv_info = env.reset(train_mode=True)[brain_name]\n\n# number of agents \nnum_agents = len(env_info.agents)\nprint('Number of agents:', num_agents)\n\n# size of each action\naction_size = brain.vector_action_space_size\nprint('Size of each action:', action_size)\n\n# examine the state space \nstates = env_info.vector_observations\nstate_size = states.shape[1]\nprint('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))\nprint('The state for the first agent looks like:', states[0])", "Number of agents: 2\nSize of each action: 2\nThere are 2 agents. Each observes a state with length: 24\nThe state for the first agent looks like: [ 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. -6.51024199 -1.5\n -0. 0. 6.80061245 6. -0. 0. ]\n" ] ], [ [ "### 3. Take Random Actions in the Environment\n\nIn the next code cell, you will learn how to use the Python API to control the agents and receive feedback from the environment.\n\nOnce this cell is executed, you will watch the agents' performance, if they select actions at random with each time step. A window should pop up that allows you to observe the agents.\n\nOf course, as part of the project, you'll have to change the code so that the agents are able to use their experiences to gradually choose better actions when interacting with the environment!", "_____no_output_____" ] ], [ [ "for i in range(1, 20): # play game for 5 episodes\n env_info = env.reset(train_mode=False)[brain_name] # reset the environment \n states = env_info.vector_observations # get the current state (for each agent)\n scores = np.zeros(num_agents) # initialize the score (for each agent)\n while True:\n actions = np.random.randn(num_agents, action_size) # select an action (for each agent)\n actions = np.clip(actions, -1, 1) # all actions between -1 and 1\n env_info = env.step(actions)[brain_name] # send all actions to tne environment\n next_states = env_info.vector_observations # get next state (for each agent)\n rewards = env_info.rewards # get reward (for each agent)\n dones = env_info.local_done # see if episode finished\n scores += env_info.rewards # update the score (for each agent)\n states = next_states # roll over states to next time step\n if np.any(dones): # exit loop if episode finished\n break\n print('Score (max over agents) from episode {}: {}'.format(i, np.max(scores)))", "Score (max over agents) from episode 1: 0.0\nScore (max over agents) from episode 2: 0.10000000149011612\nScore (max over agents) from episode 3: 0.10000000149011612\nScore (max over agents) from episode 4: 0.10000000149011612\nScore (max over agents) from episode 5: 0.0\nScore (max over agents) from episode 6: 0.0\nScore (max over agents) from episode 7: 0.20000000298023224\nScore (max over agents) from episode 8: 0.0\nScore (max over agents) from episode 9: 0.0\nScore (max over agents) from episode 10: 0.0\nScore (max over agents) from episode 11: 0.0\nScore (max over agents) from episode 12: 0.0\nScore (max over agents) from episode 13: 0.0\nScore (max over agents) from episode 14: 0.0\nScore (max over agents) from episode 15: 0.0\nScore (max over agents) from episode 16: 0.0\nScore (max over agents) from episode 17: 0.0\nScore (max over agents) from episode 18: 0.0\nScore (max over agents) from episode 19: 0.0\n" ] ], [ [ "When finished, you can close the environment.", "_____no_output_____" ], [ "env.close()", "_____no_output_____" ], [ "### 4. It's Your Turn!\n\nNow it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:\n```python\nenv_info = env.reset(train_mode=True)[brain_name]\n```", "_____no_output_____" ], [ "### 5. Competing multi-agents as multiple non-competing agents", "_____no_output_____" ] ], [ [ "# Utilities functions\ndef unity_step_wrap(actions):\n \"\"\"Unity Environment action wrapper\n \n Params\n ======\n action (int): action to take\n \n Return\n ======\n OpenAI-like action outcome (tuple): bundled (next_state, reward, done)\n \"\"\"\n env_info = env.step(actions)[brain_name] # send the action to the environment\n next_states = env_info.vector_observations # get the next state\n rewards = env_info.rewards # get the reward\n dones = env_info.local_done # see if episode has finished\n return (next_states, rewards, dones)\n\ndef moving_average(a, n=3) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n\ndef plot_scores(scores, smooth_window=100):\n scores_smoothed = moving_average(scores, smooth_window)\n # plot the scores\n fig = plt.figure(figsize=(8, 6))\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores, linewidth=1, alpha=0.4, color='steelblue')\n plt.plot(np.arange(len(scores))[smooth_window-1:,], scores_smoothed, linewidth=1.5, alpha=1, color='firebrick')\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.show()\n return fig\n\ndef load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n return (checkpoint['actor'], checkpoint['critic'], checkpoint['scores'])", "_____no_output_____" ], [ "agent = Agent(state_size=state_size, action_size=action_size, random_seed=2)", "_____no_output_____" ], [ "def ddpg(n_episodes=1000, max_t=1000, print_every=100, learn_every=5, min_noise=0.02, solved_score = 0.5, name=\"default\"):\n scores_deque = deque(maxlen=print_every)\n scores = []\n noise = 1.0\n min_noise = min_noise\n noise_reduction = min_noise**(1/n_episodes) # Reaches min_noise after n_episodes with exponential decrease\n \n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name]\n states = env_info.vector_observations \n \n agent.reset()\n scores_episode = np.zeros(num_agents)\n noise *= noise_reduction\n \n for t in range(max_t):\n actions = agent.act(states, noise=max(noise, min_noise))\n next_states, rewards, dones = unity_step_wrap(actions)\n \n for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):\n agent.save_experience(state, action, reward, next_state, done)\n \n scores_episode += rewards\n states = next_states \n \n if t % learn_every == 0:\n agent.step()\n \n if np.any(dones):\n break \n \n score = np.max(scores_episode)\n scores_deque.append(score)\n scores.append(score)\n \n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end=\"\")\n \n # Save checkpoint\n checkpoint = {\n 'actor': agent.actor_local.state_dict(),\n 'critic': agent.critic_local.state_dict(),\n 'scores': scores\n }\n torch.save(checkpoint, 'saved-checkpoints/checkpoint-' + name + '.pth')\n \n if i_episode % print_every == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\n \n if np.mean(scores_deque) > solved_score:\n break\n \n return scores", "_____no_output_____" ] ], [ [ "* **Solving the environment**", "_____no_output_____" ] ], [ [ "scores = ddpg(n_episodes=10000, min_noise=0.02, learn_every=5, name='solved')", "Episode 100\tAverage Score: 0.03\nEpisode 200\tAverage Score: 0.03\nEpisode 300\tAverage Score: 0.05\nEpisode 400\tAverage Score: 0.07\nEpisode 500\tAverage Score: 0.04\nEpisode 600\tAverage Score: 0.06\nEpisode 700\tAverage Score: 0.08\nEpisode 800\tAverage Score: 0.08\nEpisode 900\tAverage Score: 0.06\nEpisode 1000\tAverage Score: 0.11\nEpisode 1100\tAverage Score: 0.13\nEpisode 1180\tAverage Score: 0.52" ], [ "_, _, scores_solved = load_checkpoint('saved-checkpoints/checkpoint-solved.pth')\nfigure = plot_scores(scores_solved)", "_____no_output_____" ] ], [ [ "* **Monitoring beyond solving scores in the long run**", "_____no_output_____" ] ], [ [ "scores = ddpg(n_episodes=10000, min_noise=0.01, learn_every=5, solved_score = 99, name='in-the-long-run')", "Episode 100\tAverage Score: 0.02\nEpisode 200\tAverage Score: 0.01\nEpisode 300\tAverage Score: 0.03\nEpisode 400\tAverage Score: 0.05\nEpisode 500\tAverage Score: 0.04\nEpisode 600\tAverage Score: 0.06\nEpisode 700\tAverage Score: 0.06\nEpisode 800\tAverage Score: 0.08\nEpisode 900\tAverage Score: 0.07\nEpisode 1000\tAverage Score: 0.13\nEpisode 1100\tAverage Score: 0.17\nEpisode 1200\tAverage Score: 0.71\nEpisode 1300\tAverage Score: 0.54\nEpisode 1400\tAverage Score: 0.57\nEpisode 1500\tAverage Score: 0.25\nEpisode 1600\tAverage Score: 0.27\nEpisode 1700\tAverage Score: 0.27\nEpisode 1800\tAverage Score: 0.24\nEpisode 1900\tAverage Score: 0.19\nEpisode 2000\tAverage Score: 0.12\nEpisode 2100\tAverage Score: 0.14\nEpisode 2200\tAverage Score: 0.12\nEpisode 2300\tAverage Score: 0.07\nEpisode 2400\tAverage Score: 0.07\nEpisode 2500\tAverage Score: 0.06\nEpisode 2600\tAverage Score: 0.06\nEpisode 2700\tAverage Score: 0.04\nEpisode 2800\tAverage Score: 0.04\nEpisode 2900\tAverage Score: 0.05\nEpisode 3000\tAverage Score: 0.04\nEpisode 3100\tAverage Score: 0.03\nEpisode 3200\tAverage Score: 0.03\nEpisode 3300\tAverage Score: 0.03\nEpisode 3400\tAverage Score: 0.02\nEpisode 3500\tAverage Score: 0.02\nEpisode 3600\tAverage Score: 0.02\nEpisode 3700\tAverage Score: 0.02\nEpisode 3800\tAverage Score: 0.03\nEpisode 3900\tAverage Score: 0.04\nEpisode 4000\tAverage Score: 0.01\nEpisode 4100\tAverage Score: 0.01\nEpisode 4200\tAverage Score: 0.01\nEpisode 4300\tAverage Score: 0.02\nEpisode 4400\tAverage Score: 0.02\nEpisode 4500\tAverage Score: 0.03\nEpisode 4600\tAverage Score: 0.02\nEpisode 4700\tAverage Score: 0.03\nEpisode 4800\tAverage Score: 0.04\nEpisode 4900\tAverage Score: 0.01\nEpisode 5000\tAverage Score: 0.02\nEpisode 5100\tAverage Score: 0.02\nEpisode 5200\tAverage Score: 0.01\nEpisode 5300\tAverage Score: 0.01\nEpisode 5400\tAverage Score: 0.02\nEpisode 5500\tAverage Score: 0.01\nEpisode 5600\tAverage Score: 0.01\nEpisode 5700\tAverage Score: 0.01\nEpisode 5800\tAverage Score: 0.01\nEpisode 5900\tAverage Score: 0.03\nEpisode 6000\tAverage Score: 0.02\nEpisode 6100\tAverage Score: 0.03\nEpisode 6200\tAverage Score: 0.02\nEpisode 6300\tAverage Score: 0.02\nEpisode 6400\tAverage Score: 0.03\nEpisode 6500\tAverage Score: 0.04\nEpisode 6600\tAverage Score: 0.04\nEpisode 6700\tAverage Score: 0.07\nEpisode 6800\tAverage Score: 0.04\nEpisode 6900\tAverage Score: 0.03\nEpisode 7000\tAverage Score: 0.03\nEpisode 7100\tAverage Score: 0.02\nEpisode 7200\tAverage Score: 0.04\nEpisode 7300\tAverage Score: 0.04\nEpisode 7400\tAverage Score: 0.03\nEpisode 7500\tAverage Score: 0.02\nEpisode 7600\tAverage Score: 0.06\nEpisode 7700\tAverage Score: 0.05\nEpisode 7800\tAverage Score: 0.05\nEpisode 7900\tAverage Score: 0.06\nEpisode 8000\tAverage Score: 0.07\nEpisode 8100\tAverage Score: 0.07\nEpisode 8200\tAverage Score: 0.06\nEpisode 8300\tAverage Score: 0.05\nEpisode 8400\tAverage Score: 0.06\nEpisode 8500\tAverage Score: 0.06\nEpisode 8600\tAverage Score: 0.08\nEpisode 8700\tAverage Score: 0.11\nEpisode 8800\tAverage Score: 0.02\nEpisode 8900\tAverage Score: 0.02\nEpisode 9000\tAverage Score: 0.02\nEpisode 9100\tAverage Score: 0.03\nEpisode 9200\tAverage Score: 0.03\nEpisode 9300\tAverage Score: 0.03\nEpisode 9400\tAverage Score: 0.04\nEpisode 9500\tAverage Score: 0.05\nEpisode 9600\tAverage Score: 0.05\nEpisode 9700\tAverage Score: 0.05\nEpisode 9800\tAverage Score: 0.05\nEpisode 9900\tAverage Score: 0.04\nEpisode 10000\tAverage Score: 0.05\n" ], [ "_, _, scores_long_run = load_checkpoint('saved-checkpoints/checkpoint-in-the-long-run.pth')\nfigure = plot_scores(scores_long_run)", "_____no_output_____" ] ], [ [ "* **Watching solved environment agent in action**", "_____no_output_____" ] ], [ [ "# Reloading networks weights\nactor_weights, critic_weights, scores_solved = load_checkpoint('saved-checkpoints/checkpoint-solved.pth')\n\n# Instantiating the agent\nagent = Agent(state_size=state_size, action_size=action_size, random_seed=2)\nagent.actor_local.load_state_dict(actor_weights)\nagent.critic_local.load_state_dict(critic_weights)", "_____no_output_____" ], [ "# Agent acting over 30 episodes\nfor i in range(1, 30): # play game for 30 episodes\n env_info = env.reset(train_mode=False)[brain_name] # reset the environment \n states = env_info.vector_observations # get the current state (for each agent)\n scores = np.zeros(num_agents) # initialize the score (for each agent)\n while True:\n actions = agent.act(states)\n env_info = env.step(actions)[brain_name] # send all actions to tne environment\n next_states = env_info.vector_observations # get next state (for each agent)\n rewards = env_info.rewards # get reward (for each agent)\n dones = env_info.local_done # see if episode finished\n scores += env_info.rewards # update the score (for each agent)\n states = next_states # roll over states to next time step\n if np.any(dones): # exit loop if episode finished\n break\n print('Score (max over agents) from episode {}: {}'.format(i, np.max(scores)))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0b98e03171b899957cdf499eec79f50cf327078
28,552
ipynb
Jupyter Notebook
scenarios/detection/11_exploring_hyperparameters_on_azureml.ipynb
kant/ComputerVision
dbad9772ba80636f274cecf017ad95f7c7043ade
[ "MIT" ]
null
null
null
scenarios/detection/11_exploring_hyperparameters_on_azureml.ipynb
kant/ComputerVision
dbad9772ba80636f274cecf017ad95f7c7043ade
[ "MIT" ]
null
null
null
scenarios/detection/11_exploring_hyperparameters_on_azureml.ipynb
kant/ComputerVision
dbad9772ba80636f274cecf017ad95f7c7043ade
[ "MIT" ]
null
null
null
40.044881
660
0.636733
[ [ [ "<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>\n\n<i>Licensed under the MIT License.</i>", "_____no_output_____" ], [ "# Testing different Hyperparameters and Benchmarking", "_____no_output_____" ], [ "In this notebook, we will cover how to test different hyperparameters for a particular dataset and how to benchmark different parameters across a group of datasets using AzureML. We assume familiarity with the basic concepts and parameters, which are discussed in the [01_training_introduction.ipynb](01_training_introduction.ipynb), [02_mask_rcnn.ipynb](02_mask_rcnn.ipynb) and [03_training_accuracy_vs_speed.ipynb](03_training_accuracy_vs_speed.ipynb) notebooks. ", "_____no_output_____" ], [ "We will be using a Faster R-CNN model with ResNet-50 backbone to find all objects in an image belonging to 4 categories: 'can', 'carton', 'milk_bottle', 'water_bottle'. We will then conduct hyper-parameter tuning to find the best set of parameters for this model. For this, we present an overall process of utilizing AzureML, specifically [Hyperdrive](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive?view=azure-ml-py) which can train and evaluate many different parameter combinations in parallel. We demonstrate the following key steps: \n* Configure AzureML Workspace\n* Create Remote Compute Target (GPU cluster)\n* Prepare Data\n* Prepare Training Script\n* Setup and Run Hyperdrive Experiment\n* Model Import, Re-train and Test\n\nThis notebook is very similar to the [24_exploring_hyperparameters_on_azureml.ipynb](../../classification/notebooks/24_exploring_hyperparameters_on_azureml.ipynb) hyperdrive notebook used for image classification. For key concepts of AzureML see this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-train-models-with-aml?view=azure-ml-py&toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fpython%2Fapi%2Fazureml_py_toc%2Ftoc.json%3Fview%3Dazure-ml-py&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fpython%2Fazureml_py_breadcrumb%2Ftoc.json%3Fview%3Dazure-ml-py) on model training and evaluation.", "_____no_output_____" ] ], [ [ "import os\nimport sys\nfrom distutils.dir_util import copy_tree\nimport numpy as np\nimport scrapbook as sb\nimport uuid\n\nimport azureml.core\nfrom azureml.core import Workspace, Experiment\nfrom azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\nimport azureml.data\nfrom azureml.train.estimator import Estimator\nfrom azureml.train.hyperdrive import (\n RandomParameterSampling, GridParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal, choice, uniform\n)\nimport azureml.widgets as widgets\n\nsys.path.append(\"../../\")\nfrom utils_cv.common.azureml import get_or_create_workspace\nfrom utils_cv.common.data import unzip_url\nfrom utils_cv.detection.data import Urls", "_____no_output_____" ] ], [ [ "Ensure edits to libraries are loaded and plotting is shown in the notebook.", "_____no_output_____" ] ], [ [ "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ] ], [ [ "We now define some parameters which will be used in this notebook:", "_____no_output_____" ] ], [ [ "# Azure resources\nsubscription_id = \"YOUR_SUBSCRIPTION_ID\"\nresource_group = \"YOUR_RESOURCE_GROUP_NAME\" \nworkspace_name = \"YOUR_WORKSPACE_NAME\" \nworkspace_region = \"YOUR_WORKSPACE_REGION\" #Possible values eastus, eastus2, etc.\n\n# Choose a size for our cluster and the maximum number of nodes\nVM_SIZE = \"STANDARD_NC6\" #STANDARD_NC6S_V3\"\nMAX_NODES = 8\n\n# Hyperparameter grid search space\nIM_MAX_SIZES = [600] #Default is 1333 pixels, defining small values here to speed up training\nLEARNING_RATES = [1e-4, 3e-4, 1e-3, 3e-3, 1e-2]\n\n# Image data\nDATA_PATH = unzip_url(Urls.fridge_objects_path, exist_ok=True)\n\n# Path to utils_cv library\nUTILS_DIR = os.path.join('..', '..', 'utils_cv')", "_____no_output_____" ] ], [ [ "### 1. Config AzureML workspace\nBelow we setup (or load an existing) AzureML workspace, and get all its details as follows. Note that the resource group and workspace will get created if they do not yet exist. For more information regaring the AzureML workspace see also the [20_azure_workspace_setup.ipynb](../../classification/notebooks/20_azure_workspace_setup.ipynb) notebook in the image classification folder.\n\nTo simplify clean-up (see end of this notebook), we recommend creating a new resource group to run this notebook.", "_____no_output_____" ] ], [ [ "ws = get_or_create_workspace(\n subscription_id, resource_group, workspace_name, workspace_region\n)\n\n# Print the workspace attributes\nprint(\n \"Workspace name: \" + ws.name,\n \"Workspace region: \" + ws.location,\n \"Subscription id: \" + ws.subscription_id,\n \"Resource group: \" + ws.resource_group,\n sep=\"\\n\",\n)", "_____no_output_____" ] ], [ [ "### 2. Create Remote Target\nWe create a GPU cluster as our remote compute target. If a cluster with the same name already exists in our workspace, the script will load it instead. This [link](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#compute-targets-for-training) provides more information about how to set up a compute target on different locations.\n\nBy default, the VM size is set to use STANDARD\\_NC6 machines. However, if quota is available, our recommendation is to use STANDARD\\_NC6S\\_V3 machines which come with the much faster V100 GPU. We set the minimum number of nodes to zero so that the cluster won't incur additional compute charges when not in use.", "_____no_output_____" ] ], [ [ "CLUSTER_NAME = \"gpu-cluster\"\n\ntry:\n # Retrieve if a compute target with the same cluster name already exists\n compute_target = ComputeTarget(workspace=ws, name=CLUSTER_NAME)\n print(\"Found existing compute target.\")\n\nexcept ComputeTargetException:\n # If it doesn't already exist, we create a new one with the name provided\n print(\"Creating a new compute target...\")\n compute_config = AmlCompute.provisioning_configuration(\n vm_size=VM_SIZE, min_nodes=0, max_nodes=MAX_NODES\n )\n\n # create the cluster\n compute_target = ComputeTarget.create(ws, CLUSTER_NAME, compute_config)\n compute_target.wait_for_completion(show_output=True)\n\n# we can use get_status() to get a detailed status for the current cluster.\nprint(compute_target.get_status().serialize())", "Creating a new compute target...\nCreating\nSucceeded\nAmlCompute wait for completion finished\nMinimum number of nodes requested have been provisioned\n{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-09-30T18:20:25.067000+00:00', 'errors': None, 'creationTime': '2019-09-30T18:18:06.217384+00:00', 'modifiedTime': '2019-09-30T18:20:38.458332+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 8, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" ] ], [ [ "The compute cluster and its status can be seen in the portal. For example in the screenshot below, its automatically resizing (eventually to 0 nodes) to adjust to the number of open runs:\n<img src=\"media/hyperdrive_cluster.jpg\" width=\"800\" alt=\"Compute cluster status\">", "_____no_output_____" ], [ "### 3. Prepare data\nIn this notebook, we'll use the Fridge Objects dataset, which is already stored in the correct format. We then upload our data to the AzureML workspace.\n", "_____no_output_____" ] ], [ [ "# Retrieving default datastore that got automatically created when we setup a workspace\nds = ws.get_default_datastore()\n\n# We now upload the data to a unique sub-folder to avoid accidentially training/evaluating also including older images.\ndata_subfolder = str(uuid.uuid4())\nds.upload(\n src_dir=DATA_PATH, target_path=data_subfolder, overwrite=False, show_progress=True\n)", "_____no_output_____" ] ], [ [ "\nHere's where you can see the data in your portal: \n<img src=\"media/datastore.jpg\" width=\"800\" alt=\"Datastore screenshot for Hyperdrive notebook run\">\n\n### 4. Prepare training script\n\nNext step is to prepare scripts that AzureML Hyperdrive will use to train and evaluate models with selected hyperparameters.", "_____no_output_____" ] ], [ [ "# Create a folder for the training script and copy the utils_cv library into that folder\nscript_folder = os.path.join(os.getcwd(), \"hyperdrive\")\nos.makedirs(script_folder, exist_ok=True)\n_ = copy_tree(UTILS_DIR, os.path.join(script_folder, 'utils_cv'))", "_____no_output_____" ], [ "%%writefile $script_folder/train.py\n\n# Use different matplotlib backend to avoid error during remote execution\nimport matplotlib \nmatplotlib.use(\"Agg\") \nimport matplotlib.pyplot as plt\n\nimport os\nimport sys\nimport argparse\nimport numpy as np\nfrom pathlib import Path\nfrom azureml.core import Run\nfrom utils_cv.detection.dataset import DetectionDataset\nfrom utils_cv.detection.model import DetectionLearner, get_pretrained_fasterrcnn\nfrom utils_cv.common.gpu import which_processor\nwhich_processor()\n\n# Parse arguments passed by Hyperdrive\nparser = argparse.ArgumentParser()\nparser.add_argument('--data-folder', type=str, dest='data_dir')\nparser.add_argument('--data-subfolder', type=str, dest='data_subfolder')\nparser.add_argument('--epochs', type=int, dest='epochs', default=20) \nparser.add_argument('--batch_size', type=int, dest='batch_size', default=2)\nparser.add_argument('--learning_rate', type=float, dest='learning_rate', default=1e-4)\nparser.add_argument('--min_size', type=int, dest='min_size', default=800)\nparser.add_argument('--max_size', type=int, dest='max_size', default=1333)\nparser.add_argument('--rpn_pre_nms_top_n_train', type=int, dest='rpn_pre_nms_top_n_train', default=2000)\nparser.add_argument('--rpn_pre_nms_top_n_test', type=int, dest='rpn_pre_nms_top_n_test', default=1000)\nparser.add_argument('--rpn_post_nms_top_n_train', type=int, dest='rpn_post_nms_top_n_train', default=2000)\nparser.add_argument('--rpn_post_nms_top_n_test', type=int, dest='rpn_post_nms_top_n_test', default=1000)\nparser.add_argument('--rpn_nms_thresh', type=float, dest='rpn_nms_thresh', default=0.7)\nparser.add_argument('--box_score_thresh', type=float, dest='box_score_thresh', default=0.05)\nparser.add_argument('--box_nms_thresh', type=float, dest='box_nms_thresh', default=0.5)\nparser.add_argument('--box_detections_per_img', type=int, dest='box_detections_per_img', default=100)\nargs = parser.parse_args()\nparams = vars(args)\nprint(f\"params = {params}\")\n\n# Get training and validation data\ndata_path = os.path.join(params['data_dir'], params[\"data_subfolder\"])\nprint(f\"data_path={data_path}\")\ndata = DetectionDataset(data_path, train_pct=0.5, batch_size = params[\"batch_size\"])\nprint(\n f\"Training dataset: {len(data.train_ds)} | Training DataLoader: {data.train_dl} \\n \\\n Testing dataset: {len(data.test_ds)} | Testing DataLoader: {data.test_dl}\"\n)\n\n# Get model\nmodel = get_pretrained_fasterrcnn(\n num_classes = len(data.labels)+1,\n min_size = params[\"min_size\"],\n max_size = params[\"max_size\"],\n rpn_pre_nms_top_n_train = params[\"rpn_pre_nms_top_n_train\"],\n rpn_pre_nms_top_n_test = params[\"rpn_pre_nms_top_n_test\"],\n rpn_post_nms_top_n_train = params[\"rpn_post_nms_top_n_train\"], \n rpn_post_nms_top_n_test = params[\"rpn_post_nms_top_n_test\"],\n rpn_nms_thresh = params[\"rpn_nms_thresh\"],\n box_score_thresh = params[\"box_score_thresh\"], \n box_nms_thresh = params[\"box_nms_thresh\"],\n box_detections_per_img = params[\"box_detections_per_img\"]\n)\ndetector = DetectionLearner(data, model)\n\n# Run Training\ndetector.fit(params[\"epochs\"], lr=params[\"learning_rate\"], print_freq=30)\nprint(f\"Average precision after each epoch: {detector.ap}\")\n\n# Get accuracy on test set at IOU=0.5:0.95\nacc = float(detector.ap[-1])\n\n# Add log entries\nrun = Run.get_context()\nrun.log(\"accuracy\", float(acc)) # Logging our primary metric 'accuracy'\nrun.log(\"data_dir\", params[\"data_dir\"])\nrun.log(\"epochs\", params[\"epochs\"])\nrun.log(\"batch_size\", params[\"batch_size\"])\nrun.log(\"learning_rate\", params[\"learning_rate\"])\nrun.log(\"min_size\", params[\"min_size\"])\nrun.log(\"max_size\", params[\"max_size\"])\nrun.log(\"rpn_pre_nms_top_n_train\", params[\"rpn_pre_nms_top_n_train\"])\nrun.log(\"rpn_pre_nms_top_n_test\", params[\"rpn_pre_nms_top_n_test\"])\nrun.log(\"rpn_post_nms_top_n_train\", params[\"rpn_post_nms_top_n_train\"])\nrun.log(\"rpn_post_nms_top_n_test\", params[\"rpn_post_nms_top_n_test\"])\nrun.log(\"rpn_nms_thresh\", params[\"rpn_nms_thresh\"])\nrun.log(\"box_score_thresh\", params[\"box_score_thresh\"])\nrun.log(\"box_nms_thresh\", params[\"box_nms_thresh\"])\nrun.log(\"box_detections_per_img\", params[\"box_detections_per_img\"])", "Overwriting C:\\Users\\pabuehle\\Desktop\\ComputerVision\\scenarios\\detection\\hyperdrive/train.py\n" ] ], [ [ "### 5. Setup and run Hyperdrive experiment\n\n#### 5.1 Create Experiment \nExperiment is the main entry point into experimenting with AzureML. To create new Experiment or get the existing one, we pass our experimentation name 'hyperparameter-tuning'.\n", "_____no_output_____" ] ], [ [ "exp = Experiment(workspace=ws, name=\"hyperparameter-tuning\")", "_____no_output_____" ] ], [ [ "#### 5.2. Define search space\n\nNow we define the search space of hyperparameters. To test discrete parameter values use 'choice()', and for uniform sampling use 'uniform()'. For more options, see [Hyperdrive parameter expressions](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.parameter_expressions?view=azure-ml-py).\n\nHyperdrive provides three different parameter sampling methods: 'RandomParameterSampling', 'GridParameterSampling', and 'BayesianParameterSampling'. Details about each method can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters). Here, we use the 'GridParameterSampling'.", "_____no_output_____" ] ], [ [ "# Grid-search\nparam_sampling = GridParameterSampling(\n {\"--learning_rate\": choice(LEARNING_RATES), \"--max_size\": choice(IM_MAX_SIZES)}\n)", "_____no_output_____" ] ], [ [ "<b>AzureML Estimator</b> is the building block for training. An Estimator encapsulates the training code and parameters, the compute resources and runtime environment for a particular training scenario.\nWe create one for our experimentation with the dependencies our model requires as follows:", "_____no_output_____" ] ], [ [ "script_params = {\"--data-folder\": ds.as_mount(), \"--data-subfolder\": data_subfolder}\n\nest = Estimator(\n source_directory=script_folder,\n script_params=script_params,\n compute_target=compute_target,\n entry_script=\"train.py\",\n use_gpu=True,\n pip_packages=[\"nvidia-ml-py3\", \"fastai\"],\n conda_packages=[\n \"scikit-learn\",\n \"pycocotools>=2.0\",\n \"torchvision==0.3\",\n \"cudatoolkit==9.0\",\n ],\n)", "_____no_output_____" ] ], [ [ "We now create a HyperDriveConfig object which includes information about parameter space sampling, termination policy, primary metric, estimator and the compute target to execute the experiment runs on.", "_____no_output_____" ] ], [ [ "hyperdrive_run_config = HyperDriveConfig(\n estimator=est,\n hyperparameter_sampling=param_sampling,\n policy=None, # Do not use any early termination\n primary_metric_name=\"accuracy\",\n primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n max_total_runs=None, # Set to none to run all possible grid parameter combinations,\n max_concurrent_runs=MAX_NODES,\n)", "_____no_output_____" ] ], [ [ "#### 5.3 Run Experiment\n\nWe now run the parameter sweep and visualize the experiment progress using the `RunDetails` widget:\n<img src=\"media/hyperdrive_widget_run.jpg\" width=\"700px\">\n\nOnce completed, the accuracy for the different runs can be analyzed via the widget, for example below is a plot of the accuracy versus learning rate below (for two different image sizes)\n<img src=\"media/hyperdrive_widget_analysis.jpg\" width=\"700px\">\n", "_____no_output_____" ] ], [ [ "hyperdrive_run = exp.submit(config=hyperdrive_run_config)\nprint(f\"Url to hyperdrive run on the Azure portal: {hyperdrive_run.get_portal_url()}\")", "Url to hyperdrive run on the Azure portal: https://mlworkspace.azure.ai/portal/subscriptions/989b90f7-da4f-41f9-84c9-44848802052d/resourceGroups/pabuehle_delme2_hyperdrive/providers/Microsoft.MachineLearningServices/workspaces/pabuehle_ws/experiments/hyperparameter-tuning/runs/hyperparameter-tuning_1569867670036119\n" ], [ "widgets.RunDetails(hyperdrive_run).show()", "_____no_output_____" ], [ "hyperdrive_run.wait_for_completion()", "_____no_output_____" ] ], [ [ "To load an existing Hyperdrive Run instead of start new one, we can use \n```python\nhyperdrive_run = azureml.train.hyperdrive.HyperDriveRun(exp, <your-run-id>, hyperdrive_run_config=hyperdrive_run_config)\n```\nWe also can cancel the Run with \n```python \nhyperdrive_run.cancel().\n```\n\nOnce all the child-runs are finished, we can get the best run and the metrics.", "_____no_output_____" ] ], [ [ "# Get best run and print out metrics\nbest_run = hyperdrive_run.get_best_run_by_primary_metric()\nbest_run_metrics = best_run.get_metrics()\nparameter_values = best_run.get_details()[\"runDefinition\"][\"arguments\"]\nbest_parameters = dict(zip(parameter_values[::2], parameter_values[1::2]))\n\nprint(f\"* Best Run Id:{best_run.id}\")\nprint(best_run)\nprint(\"\\n* Best hyperparameters:\")\nprint(best_parameters)\nprint(f\"Accuracy = {best_run_metrics['accuracy']}\")\nprint(\"Learning Rate =\", best_run_metrics[\"learning_rate\"])", "* Best Run Id:hyperparameter-tuning_1569867670036119_4\nRun(Experiment: hyperparameter-tuning,\nId: hyperparameter-tuning_1569867670036119_4,\nType: azureml.scriptrun,\nStatus: Completed)\n\n* Best hyperparameters:\n{'--data-folder': '$AZUREML_DATAREFERENCE_workspaceblobstore', '--data-subfolder': '01679d79-1c47-49b8-88c3-d657f36b0c0f', '--learning_rate': '0.01', '--max_size': '600'}\nAccuracy = 0.8918015856432082\nLearning Rate = 0.01\n" ], [ "hyperdrive_run.get_children_sorted_by_primary_metric()", "_____no_output_____" ] ], [ [ "### 7. Clean up\n\nTo avoid unnecessary expenses, all resources which were created in this notebook need to get deleted once parameter search is concluded. To simplify this clean-up step, we recommended creating a new resource group to run this notebook. This resource group can then be deleted, e.g. using the Azure Portal, which will remove all created resources.", "_____no_output_____" ] ], [ [ "# Log some outputs using scrapbook which are used during testing to verify correct notebook execution\nsb.glue(\"best_accuracy\", best_run_metrics[\"accuracy\"])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d0b9a29ed7fe13b89a58303db6e36c62a4687d2b
6,373
ipynb
Jupyter Notebook
Project/PlayerDetection.ipynb
fancent/CSC420
98cf1e091aabf9dde3f26c8e97b5b97d091c859f
[ "MIT" ]
1
2020-12-20T17:30:05.000Z
2020-12-20T17:30:05.000Z
Project/PlayerDetection.ipynb
fancent/CSC420
98cf1e091aabf9dde3f26c8e97b5b97d091c859f
[ "MIT" ]
null
null
null
Project/PlayerDetection.ipynb
fancent/CSC420
98cf1e091aabf9dde3f26c8e97b5b97d091c859f
[ "MIT" ]
null
null
null
30.78744
125
0.498196
[ [ [ "import os\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision.transforms import ToTensor, ToPILImage\nfrom torchvision.models.detection import fasterrcnn_resnet50_fpn\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom PIL import Image", "_____no_output_____" ], [ "class PlayerDataset(Dataset):\n def __init__(self, root):\n self.root = root\n self.images = list(sorted(os.listdir(root + '/images')))\n self.targets = [target for target in list(sorted(os.listdir(root + '/targets'))) if target != 'classes.txt']\n \n def __len__(self):\n return len(self.images)\n \n def __getitem__(self, idx):\n image_path = os.path.join(self.root, 'images', self.images[idx])\n target_path = os.path.join(self.root, 'targets', self.targets[idx])\n \n image = ToTensor()(Image.open(image_path).convert(\"RGB\"))\n \n f = open(target_path)\n target = f.readline().strip().split()\n \n w = 1280\n h = 720\n \n center_x = float(target[1]) * w\n center_y = float(target[2]) * h\n bbox_w = float(target[3]) * w\n bbox_h = float(target[4]) * h\n \n x0 = round(center_x - (bbox_w / 2))\n x1 = round(center_x + (bbox_w / 2))\n y0 = round(center_y - (bbox_h / 2))\n y1 = round(center_y + (bbox_h / 2))\n \n print(x1 - x0)\n print(y1 - y0)\n \n boxes = torch.as_tensor([x0, y0, x1, y1], dtype=torch.float32)\n labels = torch.as_tensor(0, dtype=torch.int64)\n \n target = [{'boxes': boxes, 'labels': labels}]\n \n return image, target", "_____no_output_____" ], [ "def train_model(model, optimizer, lr_scheduler, data_loader, device, num_epochs):\n model.train()\n for epoch in range(num_epochs):\n running_loss = 0.0\n for images, targets in data_loader:\n images = list(image.to(device) for image in images)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n print(targets)\n \n loss_dict = model(images, targets)\n losses = sum(loss for loss in loss_dict.values())\n \n optimizer.zero_grad()\n losses.backward()\n optimizer.step()\n lr_scheduler.step()\n \n running_loss += losses.item()\n print('epoch:%d loss: %.3f' % (epoch + 1, running_loss))", "_____no_output_____" ], [ "def evaluate(model, data_loader, device):\n model.eval()\n cpu_device = torch.device(\"cpu\")\n with torch.no_grad():\n for images, targets in data_loader:\n images = list(image.to(device) for image in images)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n \n outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in model(images)]\n print(outputs)", "_____no_output_____" ], [ "model = fasterrcnn_resnet50_fpn(num_classes=1)\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nmodel.to(device)\n\ntrain_dataset = PlayerDataset('data/train')\ntest_dataset = PlayerDataset('data/test')\n\ntrain_data_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=4)\ntest_data_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4)\n\nparams = [p for p in model.parameters() if p.requires_grad]\noptimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\nlr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)", "_____no_output_____" ], [ "train_model(model, optimizer, lr_scheduler, train_data_loader, device, 1)", "80\n136\n85\n67\n151\n146\n54\n131\n56\n146\n61\n147\n81\n151\n66\n150\n48\n150\n[{'boxes': tensor([[ 49., 227., 134., 378.]]), 'labels': tensor([0])}]\n89\n146\nepoch:1 loss: 0.698\n[{'boxes': tensor([[202., 212., 282., 348.]]), 'labels': tensor([0])}]\n62\n147\nepoch:1 loss: 1.395\n[{'boxes': tensor([[ 9., 232., 76., 378.]]), 'labels': tensor([0])}]\n" ], [ "evaluate(model, test_data_loader, device)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0b9a676aa0af2e92a3bd8e6f2ea85b13d881cea
53,998
ipynb
Jupyter Notebook
Untitled.ipynb
GraphGrailAi/ruGPT3-ZhirV
aaca5c7aaea64bbe28314b09add1c7e69b7e75ab
[ "MIT" ]
2
2022-02-10T04:20:04.000Z
2022-03-11T09:56:55.000Z
Untitled.ipynb
GraphGrailAi/ruGPT3-ZhirV
aaca5c7aaea64bbe28314b09add1c7e69b7e75ab
[ "MIT" ]
null
null
null
Untitled.ipynb
GraphGrailAi/ruGPT3-ZhirV
aaca5c7aaea64bbe28314b09add1c7e69b7e75ab
[ "MIT" ]
null
null
null
58.502709
914
0.575632
[ [ [ "!git clone https://github.com/GraphGrailAi/ruGPT3-ZhirV", "Cloning into 'ruGPT3-ZhirV'...\nremote: Enumerating objects: 280, done.\u001b[K\nremote: Counting objects: 100% (280/280), done.\u001b[K\nremote: Compressing objects: 100% (170/170), done.\u001b[K\nremote: Total 280 (delta 109), reused 279 (delta 108), pack-reused 0\u001b[K\nReceiving objects: 100% (280/280), 6.07 MiB | 2.10 MiB/s, done.\nResolving deltas: 100% (109/109), done.\n" ], [ "cd ruGPT3-ZhirV", "/home/jovyan/ruGPT3-ZhirV\n" ], [ "cd ..", "/home/jovyan\n" ], [ "!pip3 install -r requirements.txt", "Defaulting to user installation because normal site-packages is not writeable\nRequirement already satisfied: nltk>=3.4 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 1)) (3.5)\nRequirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 2)) (1.18.4)\nRequirement already satisfied: pandas>=0.24.0 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 3)) (1.0.3)\nRequirement already satisfied: sentencepiece>=0.1.8 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 4)) (0.1.91)\nCollecting tensorflow>=1.12.0\n Using cached tensorflow-2.3.1-cp36-cp36m-manylinux2010_x86_64.whl (320.4 MB)\nCollecting boto3==1.11.11\n Using cached boto3-1.11.11-py2.py3-none-any.whl (128 kB)\nCollecting regex==2020.1.8\n Using cached regex-2020.1.8-cp36-cp36m-manylinux2010_x86_64.whl (689 kB)\nCollecting transformers==2.8.0\n Using cached transformers-2.8.0-py3-none-any.whl (563 kB)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from nltk>=3.4->-r requirements.txt (line 1)) (4.46.0)\nRequirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from nltk>=3.4->-r requirements.txt (line 1)) (7.1.2)\nRequirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from nltk>=3.4->-r requirements.txt (line 1)) (0.15.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24.0->-r requirements.txt (line 3)) (2020.1)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24.0->-r requirements.txt (line 3)) (2.8.1)\nRequirement already satisfied: h5py<2.11.0,>=2.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (2.10.0)\nRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (0.34.2)\nRequirement already satisfied: gast==0.3.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (0.3.3)\nCollecting tensorboard<3,>=2.3.0\n Using cached tensorboard-2.4.0-py3-none-any.whl (10.6 MB)\nRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.12.1)\nRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (3.2.1)\nRequirement already satisfied: keras-preprocessing<1.2,>=1.1.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.1.2)\nRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (0.9.0)\nCollecting tensorflow-estimator<2.4.0,>=2.3.0\n Using cached tensorflow_estimator-2.3.0-py2.py3-none-any.whl (459 kB)\nRequirement already satisfied: astunparse==1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.6.3)\nRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.29.0)\nRequirement already satisfied: google-pasta>=0.1.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (0.2.0)\nRequirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (3.12.2)\nRequirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.15.0)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.1.0)\nRequirement already satisfied: s3transfer<0.4.0,>=0.3.0 in /usr/local/lib/python3.6/dist-packages (from boto3==1.11.11->-r requirements.txt (line 6)) (0.3.3)\nRequirement already satisfied: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3==1.11.11->-r requirements.txt (line 6)) (0.10.0)\nCollecting botocore<1.15.0,>=1.14.11\n Using cached botocore-1.14.17-py2.py3-none-any.whl (5.9 MB)\nCollecting filelock\n Using cached filelock-3.0.12-py3-none-any.whl (7.6 kB)\nCollecting dataclasses; python_version < \"3.7\"\n Using cached dataclasses-0.8-py3-none-any.whl (19 kB)\nCollecting tokenizers==0.5.2\n Using cached tokenizers-0.5.2-cp36-cp36m-manylinux1_x86_64.whl (3.7 MB)\nRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from transformers==2.8.0->-r requirements.txt (line 8)) (2.23.0)\nProcessing /home/jovyan/.cache/pip/wheels/29/3c/fd/7ce5c3f0666dab31a50123635e6fb5e19ceb42ce38d4e58f45/sacremoses-0.0.43-cp36-none-any.whl\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (3.2.2)\nRequirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.16.0)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.6.0.post3)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.0.1)\nRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (47.1.0)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (0.4.1)\nRequirement already satisfied: docutils<0.16,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.15.0,>=1.14.11->boto3==1.11.11->-r requirements.txt (line 6)) (0.15.2)\nRequirement already satisfied: urllib3<1.26,>=1.20; python_version != \"3.4\" in /usr/local/lib/python3.6/dist-packages (from botocore<1.15.0,>=1.14.11->boto3==1.11.11->-r requirements.txt (line 6)) (1.25.9)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==2.8.0->-r requirements.txt (line 8)) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==2.8.0->-r requirements.txt (line 8)) (2.9)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==2.8.0->-r requirements.txt (line 8)) (2020.4.5.1)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.6.0)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (0.2.8)\nRequirement already satisfied: rsa<4.1,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (4.0)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (4.1.0)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (1.3.0)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (3.1.0)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (0.4.8)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<3,>=2.3.0->tensorflow>=1.12.0->-r requirements.txt (line 5)) (3.1.0)\n\u001b[31mERROR: tensorflow-gpu 2.2.0 has requirement tensorboard<2.3.0,>=2.2.0, but you'll have tensorboard 2.4.0 which is incompatible.\u001b[0m\n\u001b[31mERROR: tensorflow-gpu 2.2.0 has requirement tensorflow-estimator<2.3.0,>=2.2.0, but you'll have tensorflow-estimator 2.3.0 which is incompatible.\u001b[0m\n\u001b[31mERROR: awscli 1.18.135 has requirement botocore==1.17.58, but you'll have botocore 1.14.17 which is incompatible.\u001b[0m\nInstalling collected packages: tensorboard, tensorflow-estimator, tensorflow, botocore, boto3, regex, filelock, dataclasses, tokenizers, sacremoses, transformers\n\u001b[33m WARNING: The script tensorboard is installed in '/home/jovyan/.local/bin' which is not on PATH.\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n\u001b[33m WARNING: The scripts estimator_ckpt_converter, saved_model_cli, tensorboard, tf_upgrade_v2, tflite_convert, toco and toco_from_protos are installed in '/home/jovyan/.local/bin' which is not on PATH.\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n\u001b[33m WARNING: The script sacremoses is installed in '/home/jovyan/.local/bin' which is not on PATH.\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\nSuccessfully installed boto3-1.11.11 botocore-1.14.17 dataclasses-0.8 filelock-3.0.12 regex-2020.1.8 sacremoses-0.0.43 tensorboard-2.4.0 tensorflow-2.3.1 tensorflow-estimator-2.3.0 tokenizers-0.5.2 transformers-2.8.0\n\u001b[33mWARNING: You are using pip version 20.1.1; however, version 20.2.4 is available.\nYou should consider upgrading via the '/usr/bin/python -m pip install --upgrade pip' command.\u001b[0m\n" ] ], [ [ "!python generate_transformers.py \\\n --model_type=gpt2 \\\n --model_name_or_path=sberbank-ai/rugpt3large_based_on_gpt2 \\\n --k=5 \\\n --p=0.95 \\\n --length=100", "_____no_output_____" ] ], [ [ "Обучение эссе", "_____no_output_____" ], [ "!python pretrain_transformers.py \\\n --output_dir=/home/jovyan/ruGPT3-ZhirV/ \\\n --overwrite_output_dir \\\n --model_type=gpt2 \\\n --model_name_or_path=sberbank-ai/rugpt3large_based_on_gpt2 \\\n --do_train \\\n --train_data_file=/home/jovyan/ruGPT3-ZhirV/data/all_essays.jsonl \\\n --do_eval \\\n --eval_data_file=/home/jovyan/ruGPT3-ZhirV/data/valid_essays.jsonl \\\n --num_train_epochs 10 \\\n --overwrite_cache \\\n --block_size=1024 \\\n --per_gpu_train_batch_size 1 \\\n --gradient_accumulation_steps 8", "_____no_output_____" ], [ "# Обучение Жириновский", "_____no_output_____" ] ], [ [ "!python pretrain_transformers.py \\\n--output_dir=/home/jovyan/ruGPT3-ZhirV/ \\\n--overwrite_output_dir \\\n--model_type=gpt2 \\\n--model_name_or_path=sberbank-ai/rugpt3large_based_on_gpt2 \\\n--do_train \\\n--train_data_file=/home/jovyan/ruGPT3-ZhirV/data/girik_all2.jsonl \\\n--do_eval \\\n--eval_data_file=/home/jovyan/ruGPT3-ZhirV/data/girik_valid.jsonl \\\n--num_train_epochs 20 \\\n--overwrite_cache \\\n--block_size=1024 \\\n--per_gpu_train_batch_size 1 \\\n--gradient_accumulation_steps 8", "2020-11-22 17:37:37.710892: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1\n11/22/2020 17:37:45 - WARNING - __main__ - Process rank: -1, device: cuda, n_gpu: 1, distributed training: False, 16-bits training: False\n11/22/2020 17:37:46 - INFO - transformers.configuration_utils - loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/sberbank-ai/rugpt3large_based_on_gpt2/config.json from cache at /home/jovyan/.cache/torch/transformers/53218293a9edec913332b4f2d178496a60f98d64a1af74f92984804152f9404c.02a103afdbdbf4896cc41fc6495e47b7e5e2f353a287fe98d178e669be028903\n11/22/2020 17:37:46 - INFO - transformers.configuration_utils - Model config GPT2Config {\n \"_num_labels\": 2,\n \"activation_function\": \"gelu_new\",\n \"architectures\": [\n \"GPT2LMHeadModel\"\n ],\n \"attn_pdrop\": 0.1,\n \"bad_words_ids\": null,\n \"bos_token_id\": 50256,\n \"decoder_start_token_id\": null,\n \"do_sample\": false,\n \"early_stopping\": false,\n \"embd_pdrop\": 0.1,\n \"eos_token_id\": 50256,\n \"finetuning_task\": null,\n \"gradient_checkpointing\": false,\n \"id2label\": {\n \"0\": \"LABEL_0\",\n \"1\": \"LABEL_1\"\n },\n \"initializer_range\": 0.02,\n \"is_decoder\": false,\n \"is_encoder_decoder\": false,\n \"label2id\": {\n \"LABEL_0\": 0,\n \"LABEL_1\": 1\n },\n \"layer_norm_epsilon\": 1e-05,\n \"length_penalty\": 1.0,\n \"max_length\": 20,\n \"min_length\": 0,\n \"model_type\": \"gpt2\",\n \"n_ctx\": 2048,\n \"n_embd\": 1536,\n \"n_head\": 16,\n \"n_inner\": null,\n \"n_layer\": 24,\n \"n_positions\": 2048,\n \"no_repeat_ngram_size\": 0,\n \"num_beams\": 1,\n \"num_return_sequences\": 1,\n \"output_attentions\": false,\n \"output_hidden_states\": false,\n \"output_past\": true,\n \"pad_token_id\": null,\n \"prefix\": null,\n \"pruned_heads\": {},\n \"repetition_penalty\": 1.0,\n \"resid_pdrop\": 0.1,\n \"summary_activation\": null,\n \"summary_first_dropout\": 0.1,\n \"summary_proj_to_labels\": true,\n \"summary_type\": \"cls_index\",\n \"summary_use_proj\": true,\n \"task_specific_params\": null,\n \"temperature\": 1.0,\n \"top_k\": 50,\n \"top_p\": 1.0,\n \"torchscript\": false,\n \"use_bfloat16\": false,\n \"vocab_size\": 50257\n}\n\n11/22/2020 17:37:46 - INFO - transformers.configuration_utils - loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/sberbank-ai/rugpt3large_based_on_gpt2/config.json from cache at /home/jovyan/.cache/torch/transformers/53218293a9edec913332b4f2d178496a60f98d64a1af74f92984804152f9404c.02a103afdbdbf4896cc41fc6495e47b7e5e2f353a287fe98d178e669be028903\n11/22/2020 17:37:46 - INFO - transformers.configuration_utils - Model config GPT2Config {\n \"_num_labels\": 2,\n \"activation_function\": \"gelu_new\",\n \"architectures\": [\n \"GPT2LMHeadModel\"\n ],\n \"attn_pdrop\": 0.1,\n \"bad_words_ids\": null,\n \"bos_token_id\": 50256,\n \"decoder_start_token_id\": null,\n \"do_sample\": false,\n \"early_stopping\": false,\n \"embd_pdrop\": 0.1,\n \"eos_token_id\": 50256,\n \"finetuning_task\": null,\n \"gradient_checkpointing\": false,\n \"id2label\": {\n \"0\": \"LABEL_0\",\n \"1\": \"LABEL_1\"\n },\n \"initializer_range\": 0.02,\n \"is_decoder\": false,\n \"is_encoder_decoder\": false,\n \"label2id\": {\n \"LABEL_0\": 0,\n \"LABEL_1\": 1\n },\n \"layer_norm_epsilon\": 1e-05,\n \"length_penalty\": 1.0,\n \"max_length\": 20,\n \"min_length\": 0,\n \"model_type\": \"gpt2\",\n \"n_ctx\": 2048,\n \"n_embd\": 1536,\n \"n_head\": 16,\n \"n_inner\": null,\n \"n_layer\": 24,\n \"n_positions\": 2048,\n \"no_repeat_ngram_size\": 0,\n \"num_beams\": 1,\n \"num_return_sequences\": 1,\n \"output_attentions\": false,\n \"output_hidden_states\": false,\n \"output_past\": true,\n \"pad_token_id\": null,\n \"prefix\": null,\n \"pruned_heads\": {},\n \"repetition_penalty\": 1.0,\n \"resid_pdrop\": 0.1,\n \"summary_activation\": null,\n \"summary_first_dropout\": 0.1,\n \"summary_proj_to_labels\": true,\n \"summary_type\": \"cls_index\",\n \"summary_use_proj\": true,\n \"task_specific_params\": null,\n \"temperature\": 1.0,\n \"top_k\": 50,\n \"top_p\": 1.0,\n \"torchscript\": false,\n \"use_bfloat16\": false,\n \"vocab_size\": 50257\n}\n\n11/22/2020 17:37:46 - INFO - transformers.tokenization_utils - Model name 'sberbank-ai/rugpt3large_based_on_gpt2' not found in model shortcut name list (gpt2, gpt2-medium, gpt2-large, gpt2-xl, distilgpt2). Assuming 'sberbank-ai/rugpt3large_based_on_gpt2' is a path, a model identifier, or url to a directory containing tokenizer files.\n11/22/2020 17:37:49 - INFO - transformers.tokenization_utils - loading file https://s3.amazonaws.com/models.huggingface.co/bert/sberbank-ai/rugpt3large_based_on_gpt2/vocab.json from cache at /home/jovyan/.cache/torch/transformers/39e50567636d4014628a4fb0b7665a179a6109d96765eb4e6a10e9f2306f963d.de52bc5880aff0437c7f24c33b71ecae48f6f03f0449dfe933503132c6c1cc26\n11/22/2020 17:37:49 - INFO - transformers.tokenization_utils - loading file https://s3.amazonaws.com/models.huggingface.co/bert/sberbank-ai/rugpt3large_based_on_gpt2/merges.txt from cache at /home/jovyan/.cache/torch/transformers/0a94bcfc9ca640e268e53959b05f2ebe267a5cb686289b46cac4ffac589eac40.5885500c9887f152893bfadf3b511a9105243c57bfc45889e3552bdc61090032\n11/22/2020 17:37:49 - INFO - transformers.tokenization_utils - loading file https://s3.amazonaws.com/models.huggingface.co/bert/sberbank-ai/rugpt3large_based_on_gpt2/added_tokens.json from cache at None\n11/22/2020 17:37:49 - INFO - transformers.tokenization_utils - loading file https://s3.amazonaws.com/models.huggingface.co/bert/sberbank-ai/rugpt3large_based_on_gpt2/special_tokens_map.json from cache at None\n11/22/2020 17:37:49 - INFO - transformers.tokenization_utils - loading file https://s3.amazonaws.com/models.huggingface.co/bert/sberbank-ai/rugpt3large_based_on_gpt2/tokenizer_config.json from cache at None\n11/22/2020 17:37:49 - INFO - transformers.modeling_utils - loading weights file https://s3.amazonaws.com/models.huggingface.co/bert/sberbank-ai/rugpt3large_based_on_gpt2/pytorch_model.bin from cache at /home/jovyan/.cache/torch/transformers/5f2ce73f5df1b0b20e9c0d5fadbedefdc9b484edcbc39252a1c913b1b4ce6cd2.5bdac7adaf803c2b7192441aba3020af4140f7177089f8f95940a0c073059a31\n" ] ], [ [ "# Генерация Жириновский", "_____no_output_____" ] ], [ [ "from transformers import GPT2Tokenizer, GPT2LMHeadModel\n\ntokenizer = GPT2Tokenizer.from_pretrained(\"checkpoint-1000\")\nmodel = GPT2LMHeadModel.from_pretrained(\"checkpoint-1000\")\nmodel.to(\"cuda\")", "_____no_output_____" ], [ "import copy\n\nbad_word_ids = [\n [203], # \\n\n [225], # weird space 1\n [28664], # weird space 2\n [13298], # weird space 3\n [206], # \\r\n [49120], # html\n [25872], # http\n [3886], # amp\n [38512], # nbsp\n [10], # &\n [5436], # & (another)\n [5861], # http\n [372], # yet another line break\n [421, 4395], # МСК\n [64], # \\\n [33077], # https\n [1572], # ru\n [11101], # Источник\n]\n\ndef gen_fragment(context, bad_word_ids=bad_word_ids, print_debug_output=False):\n input_ids = tokenizer.encode(context, add_special_tokens=False, return_tensors=\"pt\").to(\"cuda\")\n input_ids = input_ids[:, -1700:]\n input_size = input_ids.size(1)\n output_sequences = model.generate(\n input_ids=input_ids,\n max_length=175 + input_size,\n min_length=40 + input_size,\n top_p=0.95,\n #top_k=0,\n do_sample=True,\n num_return_sequences=1,\n temperature=1.0, # 0.9,\n pad_token_id=0,\n eos_token_id=2,\n bad_words_ids=bad_word_ids,\n no_repeat_ngram_size=6\n )\n if len(output_sequences.shape) > 3:\n output_sequences.squeeze_()\n generated_sequence = output_sequences[0].tolist()[input_size:]\n if print_debug_output:\n for idx in generated_sequence:\n print(idx, tokenizer.decode([idx], clean_up_tokenization_spaces=True).strip())\n text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)\n text = text[: text.find(\"</s>\")]\n text = text[: text.rfind(\".\") + 1]\n return context + text\n\ndef gen_girik(context, sign, bad_word_ids, print_debug_output=False):\n bad_word_ids_girik = copy.copy(bad_word_ids)\n bad_word_ids_girik += [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in signs]\n bad_word_ids_girik += [tokenizer.encode(\".\" + bad_word, add_prefix_space=False) for bad_word in signs]\n return gen_fragment(context + \"\\n\\n\" + sign + \"\\n\", bad_word_ids_girik, print_debug_output=False)\n\nsigns = [\"Лингвистическому мусору и иностранным словам в русском языке не место!\",\n \"Будет ли Путин президентом после 2024 года?\", \n \"Кто победил: Армения или Азербайджан?\",\n \"И последнее. Когда в России настанет долгожданный мир во всём мире? И чтобы больше таких вопросов не было.\",\n \"Почему Европа постоянно вводит санкции против России?\",\n \"Не надо шутить с войной. Здесь другие ребята.\",\n \"Ночью наши учёные чуть-чуть изменят гравитационное поле Земли, и твоя страна будет под водой.\",\n \"Что было бы, если бы Жириновский стал президентом?\",\n \"Когда Россия станет самой богатой и могущественной страной в мире?\",\n \"Джордж, Джордж! Посмотри ковбойские фильмы!\",\n \"От чего коровы с ума сходят? От британской демократии.\",\n\n ]\nbeginning = \"Жириновский говорит:.\"\ncurrent_text = beginning\nfor sign in signs:\n current_text = gen_girik(current_text, sign, bad_word_ids)\nprint(current_text)", "Жириновский говорит:.\n\nЛингвистическому мусору и иностранным словам в русском языке не место!\n ДА! ДА! В русском языке есть иностранные слова, но их не должно быть! Я лично считаю, что все иностранные слова должны быть убраны из нашего великого и могучего языка! Они мешают нам правильно воспринимать русскую речь. Когда иностранное слово употребляется рядом с русскими словами, то это значит, что иностранное слово уже заняло правильное место в русском языке. Вот так должны звучать в русском языке все иностранные слова. А если есть какое-либо иностранное слово, то есть и другие иностранные слова, которые также должны быть убраны из русского языка. Я против того, чтобы иностранные слова занимали в русском языке правильные места. В русском языке все слова должны звучать красиво. Почему в русском языке много иностранных слов? Потому что нас с вами обманывают. Нас заставляют учить английский язык. Нас убеждают в том, что он самый красивый.\n\nБудет ли Путин президентом после 2024 года?\n Почему? Какие прогнозы? Владислав Сурков, советник Президента РФ Путина, поделился своим видением ситуации с окончательным определением будущего главы Российского государства. – Владислав Юрьевич, как Вы считаете, в 2024 году президентом России может стать Владимир Владимирович Путин, который одержал убедительную победу на президентских выборах в 2018 году? – На мой взгляд, вполне вероятно, что в 2024 году президентом РФ действительно будет Владимир Владимирович Путин, которого поддерживает, я бы даже сказал, боготворит большая часть россиян, я в этом совершенно уверен. Несмотря на то что Владимир Путин одержал убедительную победу на выборах и, безусловно, этот успех будет использован его сторонниками для укрепления президентской власти в России, он будет опираться исключительно на поддержку своей огромной армии сторонников, а также тех сил, которые его выдвинули в президенты России.\n\nКто победил: Армения или Азербайджан?\n Почему? Есть ли шансы у Армении вернуть Карабахский конфликт в правовое поле? И можно ли совместить в одном лице дипломата и политика? Обострение ситуации вокруг карабахского конфликта и перспективы его разрешения в российско-азербайджанских отношениях. Арцах – Самый проблемный регион Азербайджана, один из самых бедных. Он пострадал больше других территорий бывшего СССР. И в 1990 году, сразу после провозглашения независимости Азербайджана, этот регион подвергся этническому геноциду со стороны армянских националистов. В 1991 году они полностью уничтожили армянское население села Кармир-Уллу, расположенного в одноименном районе Азербайджана. Более 800 тысяч армян были убиты или пропали без вести. В 1994 году в результате общенациональной забастовки шахтеры прекратили добычу угля, и город Степанакерт оказался парализованным.\n\nИ последнее. Когда в России настанет долгожданный мир во всём мире? И чтобы больше таких вопросов не было.\n Восточная Европа. Взгляд через столетие Сегодня мы приглашаем вас совершить небольшое путешествие во времени, чтобы узнать, что происходило на территории Восточной Европы в начале 20-го века. Что представляла собой эта обширная территория тогда, в эпоху великих географических открытий? Какие этнические, политические и религиозные противоречия таились в её недрах? На эти и другие вопросы вам ответят книги из знаменитой серии «Рюриковичи» издательства «Вече», выпущенные в серии «ЖЗЛ». Вы узнаете о древних русских городах – Москве, Новгороде, Великом Новгороде и Пскове, об одном из величайших полководцев русской истории – Андрее Боголюбском, о князе Владимире Крестителе, о княгине Ольге и многом другом.\n\nПочему Европа постоянно вводит санкции против России?\n Санкции против России всегда были болезненным ударом по экономике Запада. Особенно больно бьют санкции по Европе. Евросоюз страдает из-за отсутствия в России товаров европейского производства. Особенно тяжело приходится в этом отношении гражданам Евросоюза. Без российских товаров они, например, не могут купить автомобиль. За всё приходится платить – и за труд рабочих, и за электричество, и за транспорт. Вот в чём главная беда и трагедия Евросоюза. Но ещё большая беда и трагедия Евросоюза – это Россия. Страдают и европейцы, и русские. Только в России на каждого человека, страдающего от санкций, приходится около трёхсот человек, страдающих от эмбарго России. Но и здесь есть выход из положения. Нужно, наоборот, поддержать тех, кто страдает, и помочь тем, кто в этом нуждается, особенно в России.\n\nНе надо шутить с войной. Здесь другие ребята.\n Это не ИГИЛ[1 - ИГИЛ – запрещённая в России террористическая организация]. ИГИЛ[2 - ИГИЛ – запрещённое в России террористическое организация] – это шутки для школьников. ИГИЛ[3 - ИГИЛ – запрещён в России террористическая организация] – это серьёзные ребята. ИГИЛ[4 - ИГИЛ – запрещёнв России террористическая организация] шутить не будет. ИГИЛ[5 - ИГИЛ – запрещённа в России террористическая организация], как Гитлер, пришёл за жизнями простых людей. ИГИЛ[6 - ИГИЛ – запрещён террористическая организация] не шуточки шутить. ИГИЛ[7 - ИГИЛ – запрещён запрещён в России террористическа организация] не шутит. ИГИЛ[8 - ИГИЛ – запрещённы в России террористическа организации] уничтожит вас всех.\n\nНочью наши учёные чуть-чуть изменят гравитационное поле Земли, и твоя страна будет под водой.\n Под водой. Они поднимут со дна океаны. Весь мир утонет. Весь мир будет под водой. Понимаешь? Весь! Только малая часть будет под водой. Но зато какая! Супер! Весь мир! Озеро Байкал, Ангара, море Лаптевых. Понимаешь? Там тоже будет супер! Супер! Весь мир будет под водой! Но не весь. Малый кусок в океане останется. Но зато какой! Море Лаптевых, море золота. И золото! Это тоже супер! Но это уже после войны будет. А пока только ночью. Ночью. Они чуть изменят гравитационное поле. И вся планета будет под водой. Но и мы под водой. Но сначала чуть-чуть сверху. Но это будет чуть-чуть. Но в океане. Под водой. Но зато под водой. И подольше подольше подольше под водой.\n\nЧто было бы, если бы Жириновский стал президентом?\n Жириновский тогда во Франции? В 2007 году в беседе с журналистами он высказал свою версию. А если бы стал президентом… Но ведь есть же и другие варианты. Это и другие варианты? Вот Жириновский ведь и во Франции баллотировался. Ведь и мэр там баллотировался. Ведь есть же другие. Ведь и есть другие кандидаты в президенты. Ведь это же не запрещено, не запрещено, не запрещено. И Туркмения же тоже баллотировалась, там много где-там. И Иран тоже не запрещают баллотировалась. И в Турции баллотировалась, там тоже много где-тамалась. Значит, там тоже есть альтернативные кандидаты. И Косово ведь не запрещено, там не запрещено. В Китае баллотировалась, а там много где-там запрещено. И в Европе. И там нельзя. И там можно баллотироваться.\n\nКогда Россия станет самой богатой и могущественной страной в мире?\n В ближайшие 10—20 лет. Если мы все ресурсы планеты используем. Это ресурсы нужно тратить в первую очередь на восстановление России, на возрождение российской державы. Потому что у нас самая сильная армия, самая мощная экономика и огромный ресурс. Мы единственная страна. Но ресурсов мало. Но мы не умеем ими правильно распоряжаться. Мы должны научиться правильно использовать их. Надо, и много чему научиться. А то опять проиграем. И опять проиграем. И в Сирии проигрывать. Будем опять проигрывать. Поэтому опять проиграем в очередной раз. Поэтому у Сирии проигрывать будем проигрывать. У нас опять проигрывать в очередной раз. И опять опять у Сирии проигрывать. И опять проиграем опять в очередной раз. Поэтому опять Сирия опять проиграем. У нас проиграем, опять у Украины будем проигрывать. Снова проиграем опять. И снова в очередной раз.\n\nДжордж, Джордж! Посмотри ковбойские фильмы!\n. Помолчи! Помолчи! Помоги мне выиграть! Помолчи! А то, что я там сказал, что я там сказал. Помолчи! Мне надо, что я там сказал! Помолчи! И помолчи! Помолчать и посмотри, что он там сказал! Давай играть в покер! Джордж!». Уколоть там кого-то там там. Помолчите! Помолчите, я сказал! Помолчите там! Помолчите и не мешайте мне играть в покер! Помолчите. Помолчите, кому надо — покер! А то, что хочу! Помолчите, кому не надо! Помолчите и помолчите.\n\nОт чего коровы с ума сходят? От британской демократии.\n А мы должны в парламенте сидеть и слушать. Помолчать! Помолчать, чтобы они там, что я сказал, кто там сказал. Помолчать и помолчать! О чем я сказал. Помолчать! А то, что надо делать, чтобы они там не сходили с ума! Помолчать! Молчать! Помолчать и помалкивать! А то, что мы там не хотим! Помолчать! Сидеть и помалкивать. Молчать, чтобы другие не сходили с ума. Сидеть в нашем парламенте. Пока другие там не сходили с разума! Помолчать! Помалкивать и помалкивать, где там не сходили с него. А то, что другие не сходили с разума. Молчать и не мешают. Помолчать! Сидят и помалкивают.\n" ] ] ]
[ "code", "raw", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "raw" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0b9b2a0d1abab77efe3c6b6d9e3556b0ee7de96
11,268
ipynb
Jupyter Notebook
notebooks/chap11/10_04/data_frame.ipynb
BlueMoon55/jekyll_python
133a0249f4534460edc2ef6947103601b0ae9154
[ "CC0-1.0" ]
null
null
null
notebooks/chap11/10_04/data_frame.ipynb
BlueMoon55/jekyll_python
133a0249f4534460edc2ef6947103601b0ae9154
[ "CC0-1.0" ]
null
null
null
notebooks/chap11/10_04/data_frame.ipynb
BlueMoon55/jekyll_python
133a0249f4534460edc2ef6947103601b0ae9154
[ "CC0-1.0" ]
null
null
null
22.809717
75
0.322595
[ [ [ "import pandas as pd\ndf = pd.DataFrame(\n {'A': [10, 20, 30, 40, 50], # 列Aとその値\n 'B': [0.8, 1.6, 2.4, 4.3, 7.6], # 列Bとその値\n 'C': [-1, -2.6, -3.5, -4.3, -5.1] }, # 列Cとその値\n index = ['row1', 'row2', 'row3', 'row4', 'row5'] # 行名を設定\n)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df['A']", "_____no_output_____" ], [ "df['B']", "_____no_output_____" ], [ "df['C']", "_____no_output_____" ], [ "df[['A', 'C']]", "_____no_output_____" ], [ "df[1 : 4]", "_____no_output_____" ], [ "df[: 2]", "_____no_output_____" ], [ "df['row1' : 'row3']", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0b9b5a683578045e38063120cf55fbef4ffa6aa
6,230
ipynb
Jupyter Notebook
codes/data_statistics.ipynb
stevehuanghe/whats-cooking
ef1887abac41a4c4ccefdf61b7985fc7f62c6450
[ "Apache-2.0" ]
null
null
null
codes/data_statistics.ipynb
stevehuanghe/whats-cooking
ef1887abac41a4c4ccefdf61b7985fc7f62c6450
[ "Apache-2.0" ]
null
null
null
codes/data_statistics.ipynb
stevehuanghe/whats-cooking
ef1887abac41a4c4ccefdf61b7985fc7f62c6450
[ "Apache-2.0" ]
null
null
null
48.671875
1,266
0.606742
[ [ [ "#\n# This script creates a plot with the 10 most used ingredients.\n#\n# The original recipe, contained in the 'ingredients' column, is cleaned as follow:\n#\n# - to lowecase\n# - replacing symbols\n# - removing digits\n# - stemming the words using the WordNetLemmatizer\n#\n# The ingredients should be cleaned mote, making 'low fat mozzarella' and \n# 'reduced fat mozzarella' the same ingredient. Ideas are welcome.\n# \n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom nltk.stem import WordNetLemmatizer\nfrom collections import Counter\n\n# Reading the data\ntrain = pd.read_json('../data/train.json')\nprint 'data loaded'\nstemmer = WordNetLemmatizer()\n#cachedStopWords = stopwords.words(\"english\")\n\n# Auxiliar function for cleaning\ndef clean_recipe(recipe):\n # To lowercase\n recipe = [ str.lower(i) for i in recipe ]\n\n # Remove some special characters\n # Individuals replace have a very good performance\n # http://stackoverflow.com/a/27086669/670873\n def replacing(i):\n i = i.replace('&', '').replace('(', '').replace(')','')\n i = i.replace('\\'', '').replace('\\\\', '').replace(',','')\n i = i.replace('.', '').replace('%', '').replace('/','')\n i = i.replace('\"', '')\n \n return i\n \n # Replacing characters\n recipe = [ replacing(i) for i in recipe ]\n \n # Remove digits\n recipe = [ i for i in recipe if not i.isdigit() ]\n \n # Stem ingredients\n recipe = [ stemmer.lemmatize(i) for i in recipe ]\n \n return recipe\n\n# The number of times each ingredient is used is stored in the 'sumbags' dictionary\nbags_of_words = [ Counter(clean_recipe(recipe)) for recipe in train.ingredients ]\nsumbags = sum(bags_of_words, Counter())\nprint 'plotting...'\n# Finally, plot the 10 most used ingredients\nplt.style.use(u'ggplot')\nfig = pd.DataFrame(sumbags, index=[0]).transpose()[0].sort(ascending=False, inplace=False)[:10].plot(kind='barh')\nfig.invert_yaxis()\nfig = fig.get_figure()\nfig.tight_layout()\nfig.savefig('10_most_used_ingredients.jpg')\n", "data loaded\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
d0b9ba200caa2c6acd210fe5b84771ae31d2889f
15,059
ipynb
Jupyter Notebook
packages/propensity/04.model_training.ipynb
google/compass
4c2b1718e223480c77ef5ed8a949a0c0f9ff69d3
[ "Apache-2.0" ]
5
2021-12-28T20:29:00.000Z
2022-03-30T17:22:19.000Z
packages/propensity/04.model_training.ipynb
google/compass
4c2b1718e223480c77ef5ed8a949a0c0f9ff69d3
[ "Apache-2.0" ]
null
null
null
packages/propensity/04.model_training.ipynb
google/compass
4c2b1718e223480c77ef5ed8a949a0c0f9ff69d3
[ "Apache-2.0" ]
null
null
null
32.665944
448
0.54479
[ [ [ "# Copyright 2022 Google LLC.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# 4. Model Training\n\nThis notebook demonstrates how to train a Propensity Model using BigQuery ML.", "_____no_output_____" ], [ "### Requirements\n\n* Input features used for training needs to be stored as a BigQuery table. This can be done using [2. ML Data Preparation Notebook](2.ml_data_preparation.ipynb).", "_____no_output_____" ], [ "### Install and import required modules", "_____no_output_____" ] ], [ [ "# Uncomment to install required python modules\n# !sh ../utils/setup.sh", "_____no_output_____" ], [ "# Add custom utils module to Python environment\nimport os\nimport sys\nsys.path.append(os.path.abspath(os.pardir))\n\nfrom gps_building_blocks.cloud.utils import bigquery as bigquery_utils\n\nfrom utils import model\nfrom utils import helpers", "_____no_output_____" ] ], [ [ "### Set paramaters", "_____no_output_____" ] ], [ [ "configs = helpers.get_configs('config.yaml')\ndest_configs = configs.destination\n\n# GCP project ID\nPROJECT_ID = dest_configs.project_id\n# Name of the BigQuery dataset\nDATASET_NAME = dest_configs.dataset_name", "_____no_output_____" ], [ "# To distinguish the separate runs of the training pipeline\nRUN_ID = 'TRAIN_01'\n\n# BigQuery table name containing model development dataset\nFEATURES_DEV_TABLE = f'features_dev_table_{RUN_ID}'\n\n# BigQuery table name containing model testing dataset\nFEATURES_TEST_TABLE = f'features_test_table_{RUN_ID}'\n\n# Output model name to save in BigQuery\nMODEL_NAME = f'propensity_model_{RUN_ID}'", "_____no_output_____" ] ], [ [ "Next, let's configure modeling options.", "_____no_output_____" ], [ "### Model and features configuration", "_____no_output_____" ], [ "Model options can be configured in detail based on BigQuery ML specifications\nlisted in [The CREATE MODEL statement](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create).\n\n**NOTE**: Propensity modeling supports only following four types of models available in BigQuery ML:\n- LOGISTIC_REG\n- [AUTOML_CLASSIFIER](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create-automl)\n- [BOOSTED_TREE_CLASSIFIER](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create-boosted-tree)\n- [DNN_CLASSIFIER](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create-dnn-models)\n\nIn order to use specific model options, you can add options to following configuration exactly same as listed in the [The CREATE MODEL statement](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create). For example, if you want to trian [AUTOML_CLASSIFIER](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create-automl) with `BUDGET_HOURS=1`, you can specify it as:\n\n```python\nparams = {\n 'model_type': 'AUTOML_CLASSIFIER',\n 'budget_hours': 1\n}\n```", "_____no_output_____" ] ], [ [ "# Read in Features table schema to select feature names for model training\nsql = (\"SELECT column_name \"\n f\"FROM `{PROJECT_ID}.{DATASET_NAME}`.INFORMATION_SCHEMA.COLUMNS \"\n f\"WHERE table_name='{FEATURES_DEV_TABLE}';\")\n\nprint(sql)\nfeatures_schema = bq_utils.run_query(sql).to_dataframe()\n\n# Columns to remove from the feature list\nto_remove = ['window_start_ts', 'window_end_ts', 'snapshot_ts', 'user_id',\n 'label', 'key', 'data_split']\n\n# Selected features for model training\ntraining_features = [v for v in features_schema['column_name']\n if v not in to_remove]\n\nprint('Number of training features:', len(training_features))\nprint(training_features)", "_____no_output_____" ], [ "# Set parameters for AUTOML_CLASSIFIER model\n\nFEATURE_COLUMNS = training_features\nTARGET_COLUMN = 'label'\n\nparams = {\n 'model_path': f'{PROJECT_ID}.{DATASET_NAME}.{MODEL_NAME}',\n 'features_table_path': f'{PROJECT_ID}.{DATASET_NAME}.{FEATURES_DEV_TABLE}',\n 'feature_columns': FEATURE_COLUMNS,\n 'target_column': TARGET_COLUMN,\n 'MODEL_TYPE': 'AUTOML_CLASSIFIER',\n 'BUDGET_HOURS': 1.0,\n # Enable data_split_col if you want to use custom data split.\n # Details on AUTOML data split column:\n # https://cloud.google.com/automl-tables/docs/prepare#split\n # 'DATA_SPLIT_COL': 'data_split',\n 'OPTIMIZATION_OBJECTIVE': 'MAXIMIZE_AU_ROC'\n}", "_____no_output_____" ] ], [ [ "## Train the model\n\nFirst, we initialize `PropensityModel` with config parameters.", "_____no_output_____" ] ], [ [ "bq_utils = bigquery_utils.BigQueryUtils(project_id=PROJECT_ID)\npropensity_model = model.PropensityModel(bq_utils=bq_utils,\n params=params)", "_____no_output_____" ] ], [ [ "Next cell triggers model training job in BigQuery which takes some time to finish depending on dataset size and model complexity. Set `verbose=True`, if you want to verify training query details.", "_____no_output_____" ] ], [ [ "propensity_model.train(verbose=False)", "_____no_output_____" ] ], [ [ "Following cell allows you to see detailed information about the input features used to train a model. It provides following columns:\n- input — The name of the column in the input training data.\n- min — The sample minimum. This column is NULL for non-numeric inputs.\n- max — The sample maximum. This column is NULL for non-numeric inputs.\n- mean — The average. This column is NULL for non-numeric inputs.\n- stddev — The standard deviation. This column is NULL for non-numeric inputs.\n- category_count — The number of categories. This column is NULL for non-categorical columns.\n- null_count — The number of NULLs.\n\nFor more details refer to [help page](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-feature).", "_____no_output_____" ] ], [ [ "propensity_model.get_feature_info()", "_____no_output_____" ] ], [ [ "### Evaluate the model\nThis section helps to do quick model evaluation to get following model metrics:\n\n* recall\n* accuracy\n* f1_score\n* log_loss\n* roc_auc\n\nTwo optional parameters can be specified for evaluation:\n\n* eval_table: BigQuery table containing evaluation dataset\n* threshold: Custom probability threshold to be used for evaluation (to binarize the predictions). Default value is 0.5.\n\nIf neither of these options are specified, the model is evaluated using evaluation dataset split during training with default threshold of 0.5.\n\n**NOTE:** This evaluation provides basic model performance metrics. For thorough evaluation refer to [5. Model evaluation notebook](5.model_evaluation_and_diagnostics.ipynb) notebook.\n\nTODO(): Add sql code to calculate the proportion of positive examples in the evaluation dataset to be used as the *threshold*.", "_____no_output_____" ] ], [ [ "# Model performance on the model development dataset on which the final\n# model has been trained\n\nEVAL_TABLE_NAME = FEATURES_DEV_TABLE\n\neval_params = {\n 'eval_table_path': f'{PROJECT_ID}.{DATASET_NAME}.{EVAL_TABLE_NAME}',\n 'threshold': 0.5\n}\npropensity_model.evaluate(eval_params, verbose=False)", "_____no_output_____" ], [ "# Model performance on the held out test dataset\n\nEVAL_TABLE_NAME = FEATURES_TEST_TABLE\n\neval_params = {\n 'eval_table_path': f'{PROJECT_ID}.{DATASET_NAME}.{EVAL_TABLE_NAME}',\n 'threshold': 0.5\n}\npropensity_model.evaluate(eval_params, verbose=False)", "_____no_output_____" ] ], [ [ "## Next", "_____no_output_____" ], [ "Use [5. Model evaluation notebook](5.model_evaluation_and_diagnostics.ipynb) to get detailed performance metrics of the model and decide of model actually solves the business problem.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
d0b9bd8ee6da290e7ef19506cf47153094d75c2b
5,066
ipynb
Jupyter Notebook
examples/agents/cartpole_deep.ipynb
ayush-alag/cosIW_sp21
fc5e30762a1b27ae7b5fc8b61fec3f37bd44a4b2
[ "Apache-2.0" ]
25
2020-10-27T19:10:36.000Z
2022-01-04T14:34:29.000Z
examples/agents/cartpole_deep.ipynb
ayush-alag/cosIW_sp21
fc5e30762a1b27ae7b5fc8b61fec3f37bd44a4b2
[ "Apache-2.0" ]
5
2020-10-15T00:52:30.000Z
2021-01-18T18:42:40.000Z
examples/agents/cartpole_deep.ipynb
ayush-alag/cosIW_sp21
fc5e30762a1b27ae7b5fc8b61fec3f37bd44a4b2
[ "Apache-2.0" ]
5
2020-12-04T23:12:13.000Z
2021-06-26T12:38:06.000Z
28.948571
138
0.52349
[ [ [ "%load_ext autoreload\n%autoreload 2", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "import jax.numpy as jnp\nimport matplotlib.pyplot as plt\nimport jax\nfrom jax import lax\nfrom deluca.envs import CartPole\nfrom deluca.agents import Deep", "[autoreload of deluca.agents._deep failed: Traceback (most recent call last):\n File \"/Users/alexjyu/miniconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 245, in check\n superreload(m, reload, self.old_objects)\n File \"/Users/alexjyu/miniconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 410, in superreload\n update_generic(old_obj, new_obj)\n File \"/Users/alexjyu/miniconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 347, in update_generic\n update(a, b)\n File \"/Users/alexjyu/miniconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 302, in update_class\n if update_generic(old_obj, new_obj): continue\n File \"/Users/alexjyu/miniconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 347, in update_generic\n update(a, b)\n File \"/Users/alexjyu/miniconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 266, in update_function\n setattr(old, name, getattr(new, name))\nValueError: __init__() requires a code object with 1 free vars, not 0\n]\n" ], [ "def loop(context, x):\n env, agent = context\n control = agent(env.state)\n _, reward, _, _ = env.step(control)\n # agent.feed(reward)\n # agent.update()\n return (env, agent), reward", "_____no_output_____" ], [ "# Deep\nenv = CartPole()\nagent = Deep(\n env_state_size = 4,\n action_space = jnp.array([0,1]),\n learning_rate = 0.1,\n gamma = 0.99,\n max_episode_length = 500,\n seed = 0\n )\n", "_____no_output_____" ], [ " # for loop version\nT = 100\nxs = jnp.array(jnp.arange(T))\nprint(env.reset())\nreward = 0\nfor i in range(T):\n (env, agent), r = loop((env, agent), 0)\n reward += r\nreward_forloop = reward\nprint('reward_forloop = ' + str(reward_forloop))\n\n\n# scan version\nenv = CartPole()\nagent = Deep(\n env_state_size = 4,\n action_space = jnp.array([0,1]),\n learning_rate = 0.1,\n gamma = 0.99,\n max_episode_length = 500,\n seed = 0\n )\nprint(env.reset())\n_,reward_scan = lax.scan(loop, (env, agent), xs)\n\n# correctness test\nprint('reward_scan = ' + str(reward_scan))\nprint('reward_scan sum = ' + str(jnp.sum(reward_scan)))", "[ 0.00322265 -0.01503431 -0.01464135 0.04524388]\nreward_forloop = 52\n[ 0.00322265 -0.01503431 -0.01464135 0.04524388]\nreward_scan = [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\nreward_scan sum = 52\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0b9c75bd45586be47b2619bc8f7ad0ee8b5c38b
13,156
ipynb
Jupyter Notebook
docs/source/Library.ipynb
cmhcbb/Robustness_seq2seq
6870a0c5c81de8ec6ae519449e979272bb8ac789
[ "MIT" ]
194
2018-11-06T20:33:43.000Z
2022-03-31T09:06:29.000Z
OpenNMT-py/docs/source/Library.ipynb
Yuran-Zhao/OpenNMT-py-1.x
5d26fbd86a2197adeb3d08c2b7cdfe29b7086429
[ "MIT" ]
46
2019-11-04T09:51:51.000Z
2022-03-06T18:40:13.000Z
OpenNMT-py/docs/source/Library.ipynb
Yuran-Zhao/OpenNMT-py-1.x
5d26fbd86a2197adeb3d08c2b7cdfe29b7086429
[ "MIT" ]
58
2018-11-09T13:53:05.000Z
2022-03-30T12:05:27.000Z
41.1125
348
0.489739
[ [ [ "import torch\nimport torch.nn as nn\n\nimport onmt\nimport onmt.inputters\nimport onmt.modules\nimport onmt.utils", "_____no_output_____" ] ], [ [ "We begin by loading in the vocabulary for the model of interest. This will let us check vocab size and to get the special ids for padding.", "_____no_output_____" ] ], [ [ "vocab = dict(torch.load(\"../../data/data.vocab.pt\"))\nsrc_padding = vocab[\"src\"].stoi[onmt.inputters.PAD_WORD]\ntgt_padding = vocab[\"tgt\"].stoi[onmt.inputters.PAD_WORD]", "_____no_output_____" ] ], [ [ "Next we specify the core model itself. Here we will build a small model with an encoder and an attention based input feeding decoder. Both models will be RNNs and the encoder will be bidirectional", "_____no_output_____" ] ], [ [ "emb_size = 10\nrnn_size = 6\n# Specify the core model. \nencoder_embeddings = onmt.modules.Embeddings(emb_size, len(vocab[\"src\"]),\n word_padding_idx=src_padding)\n\nencoder = onmt.encoders.RNNEncoder(hidden_size=rnn_size, num_layers=1, \n rnn_type=\"LSTM\", bidirectional=True,\n embeddings=encoder_embeddings)\n\ndecoder_embeddings = onmt.modules.Embeddings(emb_size, len(vocab[\"tgt\"]),\n word_padding_idx=tgt_padding)\ndecoder = onmt.decoders.decoder.InputFeedRNNDecoder(hidden_size=rnn_size, num_layers=1, \n bidirectional_encoder=True,\n rnn_type=\"LSTM\", embeddings=decoder_embeddings)\nmodel = onmt.models.model.NMTModel(encoder, decoder)\n\n# Specify the tgt word generator and loss computation module\nmodel.generator = nn.Sequential( \n nn.Linear(rnn_size, len(vocab[\"tgt\"])), \n nn.LogSoftmax())\nloss = onmt.utils.loss.NMTLossCompute(model.generator, vocab[\"tgt\"]) ", "_____no_output_____" ] ], [ [ "Now we set up the optimizer. This could be a core torch optim class, or our wrapper which handles learning rate updates and gradient normalization automatically.", "_____no_output_____" ] ], [ [ "optim = onmt.utils.optimizers.Optimizer(method=\"sgd\", lr=1, max_grad_norm=2)\noptim.set_parameters(model.named_parameters())", "_____no_output_____" ] ], [ [ "Now we load the data from disk. Currently will need to call a function to load the fields into the data as well. ", "_____no_output_____" ] ], [ [ "# Load some data\ndata = torch.load(\"../../data/data.train.1.pt\")\nvalid_data = torch.load(\"../../data/data.valid.1.pt\")\ndata.load_fields(vocab)\nvalid_data.load_fields(vocab)\ndata.examples = data.examples[:100] ", "_____no_output_____" ] ], [ [ "To iterate through the data itself we use a torchtext iterator class. We specify one for both the training and test data. ", "_____no_output_____" ] ], [ [ "train_iter = onmt.inputters.OrderedIterator( \n dataset=data, batch_size=10, \n device=-1, \n repeat=False)\nvalid_iter = onmt.inputters.OrderedIterator( \n dataset=valid_data, batch_size=10, \n device=-1,\n train=False) ", "_____no_output_____" ] ], [ [ "Finally we train.", "_____no_output_____" ] ], [ [ "trainer = onmt.Trainer(model, loss, loss, optim)\n\ndef report_func(*args):\n stats = args[-1]\n stats.output(args[0], args[1], 10, 0)\n return stats\n\nfor epoch in range(2):\n trainer.train(epoch, report_func)\n val_stats = trainer.validate()\n\n print(\"Validation\")\n val_stats.output(epoch, 11, 10, 0)\n trainer.epoch_step(val_stats.ppl(), epoch)", "Epoch 0, 0/ 10; acc: 0.00; ppl: 1225.23; 1320 src tok/s; 1320 tgt tok/s; 1514090454 s elapsed\nEpoch 0, 1/ 10; acc: 9.50; ppl: 996.33; 1188 src tok/s; 1194 tgt tok/s; 1514090454 s elapsed\nEpoch 0, 2/ 10; acc: 16.51; ppl: 694.48; 1265 src tok/s; 1267 tgt tok/s; 1514090454 s elapsed\nEpoch 0, 3/ 10; acc: 20.49; ppl: 470.39; 1459 src tok/s; 1420 tgt tok/s; 1514090454 s elapsed\nEpoch 0, 4/ 10; acc: 22.68; ppl: 387.03; 1511 src tok/s; 1462 tgt tok/s; 1514090454 s elapsed\nEpoch 0, 5/ 10; acc: 24.58; ppl: 345.44; 1625 src tok/s; 1509 tgt tok/s; 1514090454 s elapsed\nEpoch 0, 6/ 10; acc: 25.37; ppl: 314.39; 1586 src tok/s; 1493 tgt tok/s; 1514090454 s elapsed\nEpoch 0, 7/ 10; acc: 26.14; ppl: 291.15; 1593 src tok/s; 1520 tgt tok/s; 1514090455 s elapsed\nEpoch 0, 8/ 10; acc: 26.32; ppl: 274.79; 1606 src tok/s; 1545 tgt tok/s; 1514090455 s elapsed\nEpoch 0, 9/ 10; acc: 26.83; ppl: 247.32; 1669 src tok/s; 1614 tgt tok/s; 1514090455 s elapsed\nValidation\nEpoch 0, 11/ 10; acc: 13.41; ppl: 111.94; 0 src tok/s; 7329 tgt tok/s; 1514090464 s elapsed\nEpoch 1, 0/ 10; acc: 6.59; ppl: 147.05; 1849 src tok/s; 1743 tgt tok/s; 1514090464 s elapsed\nEpoch 1, 1/ 10; acc: 22.10; ppl: 130.66; 2002 src tok/s; 1957 tgt tok/s; 1514090464 s elapsed\nEpoch 1, 2/ 10; acc: 20.16; ppl: 122.49; 1748 src tok/s; 1760 tgt tok/s; 1514090464 s elapsed\nEpoch 1, 3/ 10; acc: 23.52; ppl: 117.41; 1690 src tok/s; 1698 tgt tok/s; 1514090464 s elapsed\nEpoch 1, 4/ 10; acc: 24.16; ppl: 119.42; 1647 src tok/s; 1662 tgt tok/s; 1514090464 s elapsed\nEpoch 1, 5/ 10; acc: 25.44; ppl: 115.31; 1775 src tok/s; 1709 tgt tok/s; 1514090465 s elapsed\nEpoch 1, 6/ 10; acc: 24.05; ppl: 115.11; 1780 src tok/s; 1718 tgt tok/s; 1514090465 s elapsed\nEpoch 1, 7/ 10; acc: 25.32; ppl: 109.59; 1799 src tok/s; 1765 tgt tok/s; 1514090465 s elapsed\nEpoch 1, 8/ 10; acc: 25.14; ppl: 108.16; 1771 src tok/s; 1734 tgt tok/s; 1514090465 s elapsed\nEpoch 1, 9/ 10; acc: 25.58; ppl: 107.13; 1817 src tok/s; 1757 tgt tok/s; 1514090465 s elapsed\nValidation\nEpoch 1, 11/ 10; acc: 19.58; ppl: 88.09; 0 src tok/s; 7371 tgt tok/s; 1514090474 s elapsed\n" ] ], [ [ "To use the model, we need to load up the translation functions ", "_____no_output_____" ] ], [ [ "import onmt.translate", "_____no_output_____" ], [ "translator = onmt.translate.Translator(beam_size=10, fields=data.fields, model=model)\nbuilder = onmt.translate.TranslationBuilder(data=valid_data, fields=data.fields)\n\nvalid_data.src_vocabs\nfor batch in valid_iter:\n trans_batch = translator.translate_batch(batch=batch, data=valid_data)\n translations = builder.from_batch(trans_batch)\n for trans in translations:\n print(trans.log(0))\n break", "PRED SCORE: -4.0690\n\nSENT 0: ('The', 'competitors', 'have', 'other', 'advantages', ',', 'too', '.')\nPRED 0: .\n\nPRED SCORE: -4.2736\n\nSENT 0: ('The', 'company', '&apos;s', 'durability', 'goes', 'back', 'to', 'its', 'first', 'boss', ',', 'a', 'visionary', ',', 'Thomas', 'J.', 'Watson', 'Sr.')\nPRED 0: .\n\nPRED SCORE: -4.0144\n\nSENT 0: ('&quot;', 'From', 'what', 'we', 'know', 'today', ',', 'you', 'have', 'to', 'ask', 'how', 'I', 'could', 'be', 'so', 'wrong', '.', '&quot;')\nPRED 0: .\n\nPRED SCORE: -4.1361\n\nSENT 0: ('Boeing', 'Co', 'shares', 'rose', '1.5%', 'to', '$', '67.94', '.')\nPRED 0: .\n\nPRED SCORE: -4.1382\n\nSENT 0: ('Some', 'did', 'not', 'believe', 'him', ',', 'they', 'said', 'that', 'he', 'got', 'dizzy', 'even', 'in', 'the', 'truck', ',', 'but', 'always', 'wanted', 'to', 'fulfill', 'his', 'dream', ',', 'that', 'of', 'becoming', 'a', 'pilot', '.')\nPRED 0: .\n\nPRED SCORE: -3.8881\n\nSENT 0: ('In', 'your', 'opinion', ',', 'the', 'council', 'should', 'ensure', 'that', 'the', 'band', 'immediately', 'above', 'the', 'Ronda', 'de', 'Dalt', 'should', 'provide', 'in', 'its', 'entirety', ',', 'an', 'area', 'of', 'equipment', 'to', 'conduct', 'a', 'smooth', 'transition', 'between', 'the', 'city', 'and', 'the', 'green', '.')\nPRED 0: .\n\nPRED SCORE: -4.0778\n\nSENT 0: ('The', 'clerk', 'of', 'the', 'court', ',', 'Jorge', 'Yanez', ',', 'went', 'to', 'the', 'jail', 'of', 'the', 'municipality', 'of', 'San', 'Nicolas', 'of', 'Garza', 'to', 'notify', 'Jonah', 'that', 'he', 'has', 'been', 'legally', 'pardoned', 'and', 'his', 'record', 'will', 'be', 'filed', '.')\nPRED 0: .\n\nPRED SCORE: -4.2479\n\nSENT 0: ('&quot;', 'In', 'a', 'research', 'it', 'is', 'reported', 'that', 'there', 'are', 'no', 'parts', 'or', 'components', 'of', 'the', 'ship', 'in', 'another', 'place', ',', 'the', 'impact', 'is', 'presented', 'in', 'a', 'structural', 'way', '.')\nPRED 0: .\n\nPRED SCORE: -3.8585\n\nSENT 0: ('On', 'the', 'asphalt', 'covering', ',', 'he', 'added', ',', 'is', 'placed', 'a', 'final', 'layer', 'called', 'rolling', 'covering', ',', 'which', 'is', 'made', '\\u200b', '\\u200b', 'of', 'a', 'fine', 'stone', 'material', ',', 'meaning', 'sand', 'also', 'dipped', 'into', 'the', 'asphalt', '.')\nPRED 0: .\n\nPRED SCORE: -4.2298\n\nSENT 0: ('This', 'is', '200', 'bar', 'on', 'leaving', 'and', '100', 'bar', 'on', 'arrival', '.')\nPRED 0: .\n\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0b9d129071f1792f7131dc76ae8e5b60ac98bb4
48,429
ipynb
Jupyter Notebook
ML-Base-MOOC/chapt-9 Decision Tree/01- Decision Tree and Entropy.ipynb
NovemberChopin/machine-learning
a87a2b8189b5a2990350083038c997fb22fb39ff
[ "MIT" ]
3
2019-10-28T19:46:36.000Z
2020-02-25T06:59:19.000Z
ML-Base-MOOC/chapt-9 Decision Tree/01- Decision Tree and Entropy.ipynb
NovemberChopin/machine-learning
a87a2b8189b5a2990350083038c997fb22fb39ff
[ "MIT" ]
null
null
null
ML-Base-MOOC/chapt-9 Decision Tree/01- Decision Tree and Entropy.ipynb
NovemberChopin/machine-learning
a87a2b8189b5a2990350083038c997fb22fb39ff
[ "MIT" ]
3
2019-10-05T16:52:11.000Z
2021-06-17T02:16:53.000Z
81.530303
14,556
0.817754
[ [ [ "# 决策树\n\n- 非参数学习算法\n- 天然解决多分类问题\n- 也可以解决回归问题\n- 非常好的可解释性", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import cross_val_score", "_____no_output_____" ], [ "from sklearn import datasets\n\niris = datasets.load_iris()", "_____no_output_____" ], [ "print(iris.DESCR)", ".. _iris_dataset:\n\nIris plants dataset\n--------------------\n\n**Data Set Characteristics:**\n\n :Number of Instances: 150 (50 in each of three classes)\n :Number of Attributes: 4 numeric, predictive attributes and the class\n :Attribute Information:\n - sepal length in cm\n - sepal width in cm\n - petal length in cm\n - petal width in cm\n - class:\n - Iris-Setosa\n - Iris-Versicolour\n - Iris-Virginica\n \n :Summary Statistics:\n\n ============== ==== ==== ======= ===== ====================\n Min Max Mean SD Class Correlation\n ============== ==== ==== ======= ===== ====================\n sepal length: 4.3 7.9 5.84 0.83 0.7826\n sepal width: 2.0 4.4 3.05 0.43 -0.4194\n petal length: 1.0 6.9 3.76 1.76 0.9490 (high!)\n petal width: 0.1 2.5 1.20 0.76 0.9565 (high!)\n ============== ==== ==== ======= ===== ====================\n\n :Missing Attribute Values: None\n :Class Distribution: 33.3% for each of 3 classes.\n :Creator: R.A. Fisher\n :Donor: Michael Marshall (MARSHALL%[email protected])\n :Date: July, 1988\n\nThe famous Iris database, first used by Sir R.A. Fisher. The dataset is taken\nfrom Fisher's paper. Note that it's the same as in R, but not as in the UCI\nMachine Learning Repository, which has two wrong data points.\n\nThis is perhaps the best known database to be found in the\npattern recognition literature. Fisher's paper is a classic in the field and\nis referenced frequently to this day. (See Duda & Hart, for example.) The\ndata set contains 3 classes of 50 instances each, where each class refers to a\ntype of iris plant. One class is linearly separable from the other 2; the\nlatter are NOT linearly separable from each other.\n\n.. topic:: References\n\n - Fisher, R.A. \"The use of multiple measurements in taxonomic problems\"\n Annual Eugenics, 7, Part II, 179-188 (1936); also in \"Contributions to\n Mathematical Statistics\" (John Wiley, NY, 1950).\n - Duda, R.O., & Hart, P.E. (1973) Pattern Classification and Scene Analysis.\n (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218.\n - Dasarathy, B.V. (1980) \"Nosing Around the Neighborhood: A New System\n Structure and Classification Rule for Recognition in Partially Exposed\n Environments\". IEEE Transactions on Pattern Analysis and Machine\n Intelligence, Vol. PAMI-2, No. 1, 67-71.\n - Gates, G.W. (1972) \"The Reduced Nearest Neighbor Rule\". IEEE Transactions\n on Information Theory, May 1972, 431-433.\n - See also: 1988 MLC Proceedings, 54-64. Cheeseman et al\"s AUTOCLASS II\n conceptual clustering system finds 3 classes in the data.\n - Many, many more ...\n" ], [ "X = iris.data[:, 2:] # 取后两个特征\ny = iris.target", "_____no_output_____" ], [ "plt.scatter(X[y==0, 0], X[y==0, 1])\nplt.scatter(X[y==1, 0], X[y==1, 1])\nplt.scatter(X[y==2, 0], X[y==2, 1])", "_____no_output_____" ] ], [ [ "### 1. scikit-learn 中的决策树", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier\n# entropy : 熵\ndt_clf = DecisionTreeClassifier(max_depth=3, criterion=\"entropy\")\ndt_clf.fit(X, y)", "_____no_output_____" ], [ "def plot_decision_boundary(model, axis):\n \n x0, x1 = np.meshgrid(\n np.linspace(axis[0], axis[1], int((axis[1] - axis[0])*100)).reshape(1, -1),\n np.linspace(axis[2], axis[3], int((axis[3] - axis[2])*100)).reshape(-1, 1)\n )\n X_new = np.c_[x0.ravel(), x1.ravel()]\n \n y_predic = model.predict(X_new)\n zz = y_predic.reshape(x0.shape)\n \n from matplotlib.colors import ListedColormap\n custom_cmap = ListedColormap(['#EF9A9A', '#FFF590', '#90CAF9'])\n \n plt.contourf(x0, x1, zz, linewidth=5, cmap=custom_cmap)", "_____no_output_____" ], [ "plot_decision_boundary(dt_clf, axis=(0.5, 7.5, 0, 3))\nplt.scatter(X[y==0, 0], X[y==0, 1])\nplt.scatter(X[y==1, 0], X[y==1, 1])\nplt.scatter(X[y==2, 0], X[y==2, 1])", "/home/js/pyEnvs/tf_cpu/lib/python3.6/site-packages/ipykernel_launcher.py:15: UserWarning: The following kwargs were not used by contour: 'linewidth'\n from ipykernel import kernelapp as app\n" ] ], [ [ "### 2. 如何构建决策树\n\n**问题**\n\n- 每个节点在那个维度做划分?\n- 某个维度在那个值上做划分?\n\n- 划分的标准就是,**划分后使得信息熵降低**", "_____no_output_____" ], [ "**信息熵**\n\n- 熵在信息论中代表随机变量不确定的度量\n- 熵越大,数据的不确定性越高\n- 熵越小,数据的不确定性越低\n\n$$H = -\\sum_{i=1}^kp_i\\log{(p_i)}$$\n- 其中 $p_i$ 表示每一类信息在所有信息类别中占的比例\n![GArxBV.png](https://s1.ax1x.com/2020/03/28/GArxBV.png)\n- 对于二分类,香农公式为:\n$$H=-x\\log(x)-(1-x)\\log(1-x)$$", "_____no_output_____" ], [ "**信息熵函数**", "_____no_output_____" ] ], [ [ "def entropy(p):\n return -p * np.log(p) - (1-p) * np.log(1-p)", "_____no_output_____" ], [ "x = np.linspace(0.01, 0.99)", "_____no_output_____" ], [ "plt.plot(x, entropy(x))", "_____no_output_____" ] ], [ [ "- 可以看出,当 x 越接近0.5,熵越高", "_____no_output_____" ], [ "### 3. 模拟使用信息熵进行划分", "_____no_output_____" ] ], [ [ "# 基于维度 d 的 value 值进行划分\ndef split(X, y, d, value):\n index_a = (X[:, d] <= value)\n index_b = (X[:, d] > value)\n return X[index_a], X[index_b], y[index_a], y[index_b]", "_____no_output_____" ], [ "from collections import Counter\nfrom math import log\n# 计算每一类样本点的熵的和\ndef entropy(y):\n counter = Counter(y)\n res = 0.0\n for num in counter.values():\n p = num / len(y)\n res += -p * log(p)\n return res ", "_____no_output_____" ], [ "# 寻找要划分的 value 值,寻找最小信息熵及相应的点\ndef try_split(X, y):\n best_entropy = float('inf') # 最小的熵的值\n best_d, best_v = -1, -1 # 划分的维度,划分的位置\n # 遍历每一个维度\n for d in range(X.shape[1]):\n # 每两个样本点在 d 这个维度中间的值. 首先把 d 维所有样本排序\n sorted_index = np.argsort(X[:, d])\n for i in range(1, len(X)):\n if X[sorted_index[i-1], d] != X[sorted_index[i], d]:\n v = (X[sorted_index[i-1], d] + X[sorted_index[i], d]) / 2\n x_l, x_r, y_l, y_r = split(X, y, d, v)\n # 计算当前划分后的两部分结果熵是多少\n e = entropy(y_l) + entropy(y_r)\n if e < best_entropy: \n best_entropy, best_d, best_v = e, d, v\n return best_entropy, best_d, best_v", "_____no_output_____" ], [ "best_entropy, best_d, best_v = try_split(X, y)\nprint(\"best_entropy = \", best_entropy)\nprint(\"best_d\", best_d)\nprint(\"best_v\", best_v)", "best_entropy = 0.6931471805599453\nbest_d 0\nbest_v 2.45\n" ] ], [ [ "**即在第 0 个维度的 2.45 位置进行划分,可以得到最低的熵,值为 0.693**", "_____no_output_____" ] ], [ [ "X1_l, X1_r, y1_l, y1_r = split(X, y, best_d, best_v)", "_____no_output_____" ], [ "entropy(y1_r)", "_____no_output_____" ], [ "entropy(y1_l) # 从上图可以看出,粉红色部分只有一类,故熵为 0", "_____no_output_____" ], [ "best_entropy2, best_d2, best_v2 = try_split(X1_r, y1_r)\nprint(\"best_entropy = \", best_entropy2)\nprint(\"best_d\", best_d2)\nprint(\"best_v\", best_v2)", "best_entropy = 0.4132278899361904\nbest_d 1\nbest_v 1.75\n" ], [ "X2_l, X2_r, y2_l, y2_r = split(X1_r, y1_r, best_d2, best_v2)", "_____no_output_____" ], [ "entropy(y2_r)", "_____no_output_____" ], [ "entropy(y2_l)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0b9dbd6bb4bd708a326880381adf0a1e2e444f5
21,745
ipynb
Jupyter Notebook
notebooks/num_concurrent_event.ipynb
aliciawyy/financial_machine_learning
00c20c250976104dd8ea1484697064272c8231b7
[ "MIT" ]
null
null
null
notebooks/num_concurrent_event.ipynb
aliciawyy/financial_machine_learning
00c20c250976104dd8ea1484697064272c8231b7
[ "MIT" ]
null
null
null
notebooks/num_concurrent_event.ipynb
aliciawyy/financial_machine_learning
00c20c250976104dd8ea1484697064272c8231b7
[ "MIT" ]
null
null
null
28.462042
93
0.309266
[ [ [ "import pandas as pd\nimport numpy as np\n\nimport fml\n%matplotlib inline", "_____no_output_____" ] ], [ [ "We compute here the number of labels concurrent at t.", "_____no_output_____" ] ], [ [ "df_price = fml.DataHandler().get_time_series_data(\"SPY\")\nprice = fml.Price(df_price[\"Adj Close\"], freq=\"B\")", "_____no_output_____" ], [ "cross_time = price.bounds_cross_time(window=10)", "_____no_output_____" ], [ "cross_time.index = range(len(cross_time))\ncross_time.head()", "_____no_output_____" ], [ "cross_time_int = cross_time.min(axis=1).to_frame(\"cross\")\ncross_time_int[\"cnt\"] = 0\ncross_time_int.head(10)", "_____no_output_____" ], [ "for i in cross_time_int.index[:20]:\n cross_time_int.loc[i + 1: i + cross_time_int.loc[i, \"cross\"], \"cnt\"] += 1\ncross_time_int.head(10)", "_____no_output_____" ], [ "window = 10\nfor i in range(1, window + 2):\n cross_time_int[i] = cross_time_int[\"cross\"].shift(i) >= i", "_____no_output_____" ], [ "cross_time_int[\"cnt_vec\"] = cross_time_int[list(range(1, window + 2))].sum(axis=1)\nseries = price.num_concurrent_events()", "_____no_output_____" ], [ "cross_time_int[series.name] = series.values", "_____no_output_____" ], [ "cross_time_int.head(15)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0b9e7222bf302a3c456331f14c89f220096fbe0
27,442
ipynb
Jupyter Notebook
Hybrid_SGO.ipynb
Enixes/Hybrid-Social-Group-Optimization-algorithm
90cca60a16cb928c5b019ecff9b9e56a5d2683cd
[ "Apache-2.0" ]
1
2021-04-24T03:23:05.000Z
2021-04-24T03:23:05.000Z
Hybrid_SGO.ipynb
Enixes/Hybrid-Social-Group-Optimization-algorithm
90cca60a16cb928c5b019ecff9b9e56a5d2683cd
[ "Apache-2.0" ]
null
null
null
Hybrid_SGO.ipynb
Enixes/Hybrid-Social-Group-Optimization-algorithm
90cca60a16cb928c5b019ecff9b9e56a5d2683cd
[ "Apache-2.0" ]
1
2021-04-24T03:23:08.000Z
2021-04-24T03:23:08.000Z
29.100742
184
0.429779
[ [ [ "import pandas as pd\nimport numpy as np\n\nfrom PIL import Image\nimport os\nimport sys\n!pip install ipython-autotime\n\n%load_ext autotime\n%matplotlib inline\n", "Requirement already satisfied: ipython-autotime in /usr/local/lib/python3.6/dist-packages (0.3.1)\nRequirement already satisfied: ipython in /usr/local/lib/python3.6/dist-packages (from ipython-autotime) (5.5.0)\nRequirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython->ipython-autotime) (0.7.5)\nRequirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython->ipython-autotime) (1.0.18)\nRequirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython->ipython-autotime) (2.6.1)\nRequirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.6/dist-packages (from ipython->ipython-autotime) (4.3.3)\nRequirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython->ipython-autotime) (0.8.1)\nRequirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.6/dist-packages (from ipython->ipython-autotime) (51.3.3)\nRequirement already satisfied: pexpect; sys_platform != \"win32\" in /usr/local/lib/python3.6/dist-packages (from ipython->ipython-autotime) (4.8.0)\nRequirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from ipython->ipython-autotime) (4.4.2)\nRequirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython->ipython-autotime) (0.2.5)\nRequirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython->ipython-autotime) (1.15.0)\nRequirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from traitlets>=4.2->ipython->ipython-autotime) (0.2.0)\nRequirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.6/dist-packages (from pexpect; sys_platform != \"win32\"->ipython->ipython-autotime) (0.7.0)\nThe autotime extension is already loaded. To reload it, use:\n %reload_ext autotime\ntime: 2.43 s (started: 2021-01-22 18:15:43 +00:00)\n" ] ], [ [ "1. Extract your dataset and split into train_x, train_y, test_x and test_y.\r\n2. Execute the following cells", "_____no_output_____" ], [ "\n\n---\n\n\n## Hybrid Social Group Optimization \n\n\n---\n\n\n", "_____no_output_____" ] ], [ [ "N = 5 # Number of persons in population\nD = len(train_x.columns) # Number of features in dataset\ng = 10 # Number of generations\nc = 0.6 # Self Introspection factor\nr0 = 1\nr1 = 0.4\nr2 = 0.6\nprint(r1, r2)", "0.4 0.6\ntime: 3.13 ms (started: 2021-01-22 18:19:07 +00:00)\n" ] ], [ [ "**Population Initialization**", "_____no_output_____" ] ], [ [ "population = np.random.choice([0,1,2,3,4,5,6,7,8,9], (N,D), p=[0.16, 0.16, 0.16, 0.16, 0.16, 0.04, 0.04, 0.04, 0.04, 0.04]) #Determines no. of features selected by probablity", "time: 5.05 ms (started: 2021-01-22 18:19:07 +00:00)\n" ], [ "population = population.astype(float)\nprint(population.shape)\npopulation", "(5, 10000)\n" ], [ "fitness = np.zeros(N)\ntest_x.shape", "_____no_output_____" ], [ "def fitter_trait(X_old, X_new):\n if X_new > X_old:\n return X_old\n else:\n return X_new\n \n ", "time: 2.58 ms (started: 2021-01-22 18:19:07 +00:00)\n" ] ], [ [ "\n\n\n\n\n**Fitness Function**", "_____no_output_____" ] ], [ [ "global classifier\nclassifier = Svc #Change classifier here\nselect = train_x.columns\nselectno = len(train_x.columns)\nclassifier.fit(train_x, train_y)\nselect_acc = classifier.score(test_x, test_y) \ndef fitness_function(pop): #Fitness Function\n \n for i in range(N):\n new_train_x = train_x\n \n new_test_x = test_x\n \n global select\n global selectno\n global select_acc\n \n new_train_x = new_train_x.drop(train_x.columns[pop[i] < 4], axis = 1)\n \n new_test_x = new_test_x.drop(test_x.columns[pop[i] < 4], axis = 1)\n \n classifier.fit(new_train_x, train_y) \n fitness[i] = classifier.score(new_test_x, test_y) \n if (fitness[i] > select_acc):\n select = new_train_x.columns\n # print(select.shape)\n selectno = new_train_x.shape[1]\n select_acc = fitness[i]\n elif fitness[i] == select_acc and new_train_x.shape[1] < selectno:\n select = new_train_x.columns\n selectno = new_train_x.shape[1]\n \n print(\"\\nPerson \"+ str(i+1))\n \n print(\"No. of Features Used = \"+ str(new_train_x.shape[1])+ \"/\"+str(D)+\"\\nFitness = \" + str(fitness[i]))\n print(\"Feature Used = \", end = \" \") \n \n #print(new_train_x.columns)\n\n print(new_train_x.shape[1])\n ", "time: 5.47 s (started: 2021-01-22 18:19:07 +00:00)\n" ], [ "# Initializing Fitness values of population\n# fitness_function(population) \n# selectno", "time: 785 µs (started: 2021-01-22 18:19:13 +00:00)\n" ] ], [ [ "**Gbest : Fittest person in population**", "_____no_output_____" ] ], [ [ "###Determining GBest\ngbest = 0\ngbest_i = 0", "time: 874 µs (started: 2021-01-22 18:19:13 +00:00)\n" ], [ "def find_gbest():\n gbest = max(fitness)#This can be any function\n gbest_i = fitness.argmax()\n print(\"Best fitness value for the generation = \"+str(gbest) + \" Person \" + str(gbest_i+1)+\"\\n\")\nfind_gbest() \n#we chose maximum fitness value to be better for simplicity\ndef cal_fitness(person):\n new_train_x = train_x\n \n new_test_x = test_x\n\n new_train_x = new_train_x.drop(train_x.columns[person < 4], axis = 1)\n \n new_test_x = new_test_x.drop(test_x.columns[person < 4], axis = 1)\n \n classifier.fit(new_train_x, train_y) \n return classifier.score(new_test_x, test_y)\n\ncal_fitness(population[0])", "Best fitness value for the generation = 0.0 Person 1\n\n" ], [ "# new_train_x = train_x\r\n \r\n# new_test_x = test_x\r\n\r\n# new_train_x = new_train_x.drop(train_x.columns[person < 4], axis = 1)\r\n \r\n# new_test_x = new_test_x.drop(test_x.columns[person < 4], axis = 1)", "time: 913 µs (started: 2021-01-22 18:19:15 +00:00)\n" ], [ "per1 = np.zeros((1,10000))\nprint(per1.shape)\nper1[0][5] = 8\nper1[0][89] = 7\nper1[0][45] = 6\ncal_fitness(per1[0])", "(1, 10000)\n" ] ], [ [ "\n\n---\n\n**Mutation Phase**", "_____no_output_____" ] ], [ [ "def mutate():\n gworst_i = fitness.argmin()\n gworst = min(fitness)\n mut = np.random.randint(0,2,size=(1,D))[0]\n print(\"Mutating the Generation's Worst....Person \"+ str(gworst_i+1))\n for i in range(D):\n if mut[i] > 0:\n mut[i] = population[gbest_i][i]\n else:\n mut[i] = population[gworst_i][i]\n if cal_fitness(mut) > gworst:\n population[gworst_i] = mut\n print(\"Person \"+str(gworst_i)+\" mutated\")\n else:\n print(\"No Mutations in this generation\")", "time: 5.35 ms (started: 2021-01-22 18:19:15 +00:00)\n" ], [ "mut = np.random.randint(0,2,size=(1,D))[0]\r\nmut", "_____no_output_____" ], [ "div = pd.DataFrame(np.random.randint(0,2,size=(1,D))[0])\n# div.iloc[:,div > 0] = population[2][div>0]\n# div", "time: 1.79 ms (started: 2021-01-22 18:19:15 +00:00)\n" ] ], [ [ "\n\n---\n\n**Improving Phase**", "_____no_output_____" ] ], [ [ "## Improving Phase\n# i = 1\ndef improve():\n print(\"Improving.......\")\n for i in range(N):\n Xnew = population[i]\n print('Persona '+ str(i+1))\n for j in range(D):\n Xnew[j] = c * population[i][j] + r0 * (population[gbest_i][j] - population[i][j])\n try:\n if cal_fitness(Xnew) > fitness[i]:\n population[i] = Xnew\n except:\n print(\"Oops!\", sys.exc_info()[0], \"occurred.\")\n print(\"Next entry.\")\n \n", "time: 4.73 ms (started: 2021-01-22 18:19:15 +00:00)\n" ], [ "", "_____no_output_____" ] ], [ [ "\n\n---\n\n**Acquiring Phase**", "_____no_output_____" ] ], [ [ "## Acquiring Phase\ndef acquire():\n random_person = np.random.randint(low=0, high=N)\n for i in range(N):\n if random_person == i:\n random_person = np.random.randint(low=0, high=N)\n i = i - 1\n continue\n X_new = population[i]\n if fitness[random_person] > fitness[i]:\n for j in range(D):\n X_new[j] = population[i][j] + r1*(population[random_person][j]-population[i][j]) + r2*(population[gbest_i][j]-population[i][j])\n if cal_fitness(X_new) > fitness[i]:\n population[i] = X_new\n else:\n for j in range(D):\n X_new[j] = population[i][j] + r1*(population[i][j]-population[random_person][j]) + r2*(population[gbest_i][j]-population[i][j])\n if cal_fitness(X_new) > fitness[i]:\n population[i] = X_new\n \n", "time: 8.58 ms (started: 2021-01-22 18:19:15 +00:00)\n" ], [ "#Run\ntry:\n for k in range(g):\n print(\"Generation \"+ str(k+1) + \"\\n---------------\")\n fitness_function(population)\n find_gbest()\n mutate()\n improve()\n acquire() \nexcept:\n print()\n print(\"........................\")\n print(\"Optimal Solution Reached\")\n print(\"........................\")", "Generation 1\n---------------\n\nPerson 1\nNo. of Features Used = 3582/10000\nFitness = 0.9863945578231292\nFeature Used = 3582\n\nPerson 2\nNo. of Features Used = 3608/10000\nFitness = 0.9897959183673469\nFeature Used = 3608\n\nPerson 3\nNo. of Features Used = 3614/10000\nFitness = 0.9897959183673469\nFeature Used = 3614\n\nPerson 4\nNo. of Features Used = 3570/10000\nFitness = 0.9897959183673469\nFeature Used = 3570\n\nPerson 5\nNo. of Features Used = 3581/10000\nFitness = 0.9897959183673469\nFeature Used = 3581\nBest fitness value for the generation = 0.9897959183673469 Person 2\n\nMutating the Generation's Worst....Person 1\nNo Mutations in this generation\nImproving.......\nPersona 1\nPersona 2\nPersona 3\nPersona 4\nPersona 5\nGeneration 2\n---------------\n\nPerson 1\nNo. of Features Used = 830/10000\nFitness = 0.9897959183673469\nFeature Used = 830\n\nPerson 2\nNo. of Features Used = 660/10000\nFitness = 0.9863945578231292\nFeature Used = 660\n\nPerson 3\nNo. of Features Used = 647/10000\nFitness = 0.9829931972789115\nFeature Used = 647\n\nPerson 4\nNo. of Features Used = 514/10000\nFitness = 0.9897959183673469\nFeature Used = 514\n\nPerson 5\nNo. of Features Used = 572/10000\nFitness = 0.9829931972789115\nFeature Used = 572\nBest fitness value for the generation = 0.9897959183673469 Person 1\n\nMutating the Generation's Worst....Person 3\nPerson 2 mutated\nImproving.......\nPersona 1\nOops! <class 'ValueError'> occurred.\nNext entry.\nPersona 2\nOops! <class 'ValueError'> occurred.\nNext entry.\nPersona 3\nOops! <class 'ValueError'> occurred.\nNext entry.\nPersona 4\nOops! <class 'ValueError'> occurred.\nNext entry.\nPersona 5\nOops! <class 'ValueError'> occurred.\nNext entry.\n\n........................\nOptimal Solution Reached\n........................\ntime: 17.7 s (started: 2021-01-22 18:19:15 +00:00)\n" ], [ "select.shape", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0b9ea6033b10076105ee7ca8b6ce5a2ead7b8f8
1,282
ipynb
Jupyter Notebook
numpy-data-science-essential-training/Ex_Files_NumPy_Data_EssT/Exercise Files/Ch 3/03_02/Starting/.ipynb_checkpoints/Boolean Mask Arrays-checkpoint.ipynb
saint1729/in-learning
fe58495846f05e2dcd15d1dbb6ff87535d35d6c5
[ "MIT" ]
null
null
null
numpy-data-science-essential-training/Ex_Files_NumPy_Data_EssT/Exercise Files/Ch 3/03_02/Starting/.ipynb_checkpoints/Boolean Mask Arrays-checkpoint.ipynb
saint1729/in-learning
fe58495846f05e2dcd15d1dbb6ff87535d35d6c5
[ "MIT" ]
null
null
null
numpy-data-science-essential-training/Ex_Files_NumPy_Data_EssT/Exercise Files/Ch 3/03_02/Starting/.ipynb_checkpoints/Boolean Mask Arrays-checkpoint.ipynb
saint1729/in-learning
fe58495846f05e2dcd15d1dbb6ff87535d35d6c5
[ "MIT" ]
null
null
null
16.868421
59
0.480499
[ [ [ "<h1>Boolean Mask Arrays</h1>", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "my_vector = np.array([-17, -4, 0, 2, 21, 37, 105])\nmy_vector", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
d0b9ec9e5bb93aad01c147a398d2afd98e250598
5,527
ipynb
Jupyter Notebook
project/template.ipynb
cliburn/sta-662-2020
05a965e2f33c8925d25e000c6c76b084b5bf0ed6
[ "MIT" ]
47
2020-01-08T21:45:54.000Z
2022-03-25T09:25:59.000Z
project/template.ipynb
cliburn/sta-662-2020
05a965e2f33c8925d25e000c6c76b084b5bf0ed6
[ "MIT" ]
null
null
null
project/template.ipynb
cliburn/sta-662-2020
05a965e2f33c8925d25e000c6c76b084b5bf0ed6
[ "MIT" ]
54
2020-01-08T21:46:05.000Z
2022-03-15T05:04:00.000Z
41.556391
373
0.651167
[ [ [ "# Final Project \n\nFor the final project, you will need to implement a \"new\" statistical algorithm in Python from the research literature and write a \"paper\" describing the algorithm. \n\nSuggested papers can be found in Sakai:Resources:Final_Project_Papers\n\n## Paper\n\nThe paper should have the following:\n\n### Title\n\nShould be concise and informative.\n\n### Abstract\n\n250 words or less. Identify 4-6 key phrases.\n\n### Background\n\nState the research paper you are using. Describe the concept of the algorithm and why it is interesting and/or useful. If appropriate, describe the mathematical basis of the algorithm. Some potential topics for the background include:\n\n- What problem does it address? \n- What are known and possible applications of the algorithm? \n- What are its advantages and disadvantages relative to other algorithms?\n- How will you use it in your research?\n\n### Description of algorithm\n\nFirst, explain in plain English what the algorithm does. Then describes the details of the algorithm, using mathematical equations or pseudocode as appropriate. \n\n### Describe optimization for performance\n\nFirst implement the algorithm using plain Python in a straightforward way from the description of the algorithm. Then profile and optimize it using one or more appropriate methods, such as:\n\n1. Use of better algorithms or data structures\n2. Use of vectorization\n3. JIT or AOT compilation of critical functions\n4. Re-writing critical functions in C++ and using pybind11 to wrap them\n5. Making use of parallelism or concurrency\n6. Making use of distributed compuitng\n\nDocument the improvement in performance with the optimizations performed.\n\n### Applications to simulated data sets\n\nAre there specific inputs that give known outputs (e.g. there might be closed form solutions for special input cases)? How does the algorithm perform on these? \n\nIf no such input cases are available (or in addition to such input cases), how does the algorithm perform on simulated data sets for which you know the \"truth\"? \n\n### Applications to real data sets\n\nTest the algorithm on the real-world examples in the original paper if possible. Try to find at least one other real-world data set not in the original paper and test it on that. Describe and interpret the results.\n\n### Comparative analysis with competing algorithms\n\nFind two other algorithm that address a similar problem. Perform a comparison - for example, of accuracy or speed. You can use native libraries of the other algorithms - you do not need to code them yourself. Comment on your observations. \n\n### Discussion/conclusion\n\nYour thoughts on the algorithm. Does it fulfill a particular need? How could it be generalized to other problem domains? What are its limitations and how could it be improved further?\n\n### References/bibliography\n\nMake sure you cite your sources.\n\n## Code\n\nThe code should be in a public GitHub repository with:\n\n1. A README file\n2. An open source license\n3. Source code\n4. Test code\n5. Examples\n6. A reproducible report\n\n The package should be downloadable and installable with `python setup.py install`, or even posted to PyPI adn installable with `pip install package`. See https://packaging.python.org/tutorials/packaging-projects/ for how to upload to a Python repository. Use the repository at https://test.pypi.org - this is for testing and will be wiped clean after a period.\n\n## Rubric\n\nHere are some considerations I use when grading. Note that the \"difficulty factor\" of the chosen algorithm will be factored into the grading. \n\n1. Is the abstract, background and discussion readable and clear? \n2. Is the algorithm description clear and accurate? \n3. Has the algorithm been optimized? \n4. Are the applications to simulated/real data clear and useful? \n5. Was the comparative analysis done well? \n6. Is there a well-maintained GitHub repository for the code? \n7. Is the document show evidence of literate programming? \n8. Is the analysis reproducible? \n9. Is the code tested? Are examples provided? \n10. Is the package easily installable? \n\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
d0b9f538c2eff46f63b02b0fc24e8c9986558e11
66,745
ipynb
Jupyter Notebook
notebooks/ClusterFinderLabelledContigCheck.ipynb
prihoda/bgc-pipeline-1
3df49f1b4c25232ce3f7e622607465c8f6a88df8
[ "MIT" ]
6
2019-05-06T04:32:47.000Z
2022-01-25T03:24:10.000Z
notebooks/ClusterFinderLabelledContigCheck.ipynb
prihoda/bgc-pipeline
29300da912fd1836eea8e285e2e50f5326f021f3
[ "MIT" ]
6
2019-02-15T19:02:56.000Z
2021-07-12T08:28:48.000Z
notebooks/ClusterFinderLabelledContigCheck.ipynb
prihoda/bgc-pipeline
29300da912fd1836eea8e285e2e50f5326f021f3
[ "MIT" ]
4
2019-09-09T07:15:23.000Z
2021-07-12T07:25:04.000Z
53.056439
15,488
0.592329
[ [ [ "# ClusterFinder Reference genomes reconstruction\n\nThis notebook validates the 10 genomes we obtained from NCBI based on the ClusterFinder supplementary table. \n\nWe check that the gene locations from the supplementary table match locations in the GenBank files.", "_____no_output_____" ] ], [ [ "from Bio import SeqIO\nfrom Bio.SeqFeature import FeatureLocation\nimport pandas as pd\nfrom Bio import Entrez\nimport seaborn as sns\n\ndef get_features_of_type(sequence, feature_type):\n return [feature for feature in sequence.features if feature.type == feature_type]", "_____no_output_____" ], [ "def get_reference_gene_location(gene_csv_row):\n start = gene_csv_row['gene start'] - 1\n end = gene_csv_row['gene stop']\n strand = 1 if gene_csv_row['gene strand'] == '+' else (-1 if gene_csv_row['gene strand'] == '-' else None)\n return FeatureLocation(start, end, strand)\n\ndef feature_locus_matches(feature, reference_locus):\n return feature.qualifiers.get('locus_tag',[None])[0] == reference_locus", "_____no_output_____" ] ], [ [ "# Loading reference cluster gene locations", "_____no_output_____" ] ], [ [ "reference_genes = pd.read_csv('../data/clusterfinder/labelled/CF_labelled_genes_orig.csv', sep=';')\nreference_genes.head()", "_____no_output_____" ] ], [ [ "# Genes with no sequence", "_____no_output_____" ] ], [ [ "no_sequence_genes = reference_genes[reference_genes['NCBI ID'] == '?']\nno_sequence_counts = no_sequence_genes.groupby('Genome ID')['gene locus'].count()\nprint('{} genes don\\'t have a sequence!'.format(len(no_sequence_genes)))\npd.DataFrame({'missing genes':no_sequence_counts})", "421 genes don't have a sequence!\n" ], [ "reference_ids = reference_genes[reference_genes['NCBI ID'] != '?']['NCBI ID'].unique()\nreference_ids", "_____no_output_____" ] ], [ [ "# Validating that reference genes are found in our sequences", "_____no_output_____" ] ], [ [ "def validate_genome(record, record_reference_genes):\n print('Validating {}'.format(record.id))\n record_genes = get_features_of_type(record, 'gene')\n record_cds = get_features_of_type(record, 'CDS')\n validation = []\n record_length = len(record.seq)\n min_location = record_length\n max_location = -1\n prev_gene_index = None\n prev_cluster_start = None\n for i, reference_gene in record_reference_genes.iterrows():\n reference_gene_location = get_reference_gene_location(reference_gene)\n reference_gene_locus = reference_gene['gene locus']\n reference_cluster_start = reference_gene['NPL start']\n gene_matches_locus = [f for f in record_genes if feature_locus_matches(f, reference_gene_locus)]\n cds_matches_locus = [f for f in record_cds if feature_locus_matches(f, reference_gene_locus)]\n gene_matches_location = [f for f in gene_matches_locus if reference_gene_location == f.location]\n cds_matches_location = [f for f in cds_matches_locus if reference_gene_location == f.location]\n validation.append({\n 'gene_locus_not_found':not gene_matches_locus, \n 'cds_locus_not_found':not cds_matches_locus, \n 'gene_location_correct': bool(gene_matches_location), \n 'cds_location_correct': bool(cds_matches_location)\n })\n if not cds_matches_locus:\n print('No CDS found for gene locus {}'.format(reference_gene_locus))\n if gene_matches_locus:\n gene_match = gene_matches_locus[0]\n if not cds_matches_locus:\n print(' Gene: ', gene_match.qualifiers)\n \n # Use gene index to check if we have a consecutive sequence of genes (except when going from one cluster to another)\n gene_index = [gi for gi,f in enumerate(record_genes) if feature_locus_matches(f, reference_gene_locus)][0]\n if reference_cluster_start == prev_cluster_start and gene_index != prev_gene_index + 1:\n print('Additional unexpected genes found before {} (index {} -> {}) at cluster start {}'.format(reference_gene_locus, prev_gene_index, gene_index, reference_cluster_start))\n \n # Calculate min and max cluster gene location to see how much of the sequence is covered by the reference genes\n min_location = min(gene_match.location.start, min_location)\n max_location = max(gene_match.location.end, max_location)\n prev_gene_index = gene_index\n prev_cluster_start = reference_cluster_start\n \n result = pd.DataFrame(validation).sum().to_dict()\n result['location correct'] = min(result['gene_location_correct'], result['cds_location_correct']) / len(validation)\n result['ID'] = record.id\n result['genome'] = record_reference_genes.iloc[0]['Genome ID']\n result['sequence length'] = record_length\n result['total genes'] = len(record_genes)\n result['reference genes'] = len(record_reference_genes)\n result['first location'] = min_location / record_length\n result['last location'] = max_location / record_length\n result['covered'] = (max_location - min_location) / record_length\n return result", "_____no_output_____" ], [ "validations = []\nreference_gene_groups = reference_genes.groupby('NCBI ID')\nrecords = SeqIO.parse('../data/clusterfinder/labelled/CF_labelled_contigs.gbk', 'genbank')\nfor record in records:\n ncbi_id = record.id\n print(ncbi_id)\n record_reference_genes = reference_gene_groups.get_group(ncbi_id)\n validations.append(validate_genome(record, record_reference_genes))\nvalidations = pd.DataFrame(validations)\nvalidations.set_index('ID', inplace=True)\nvalidations", "CM000950.1\nValidating CM000950.1\nNo CDS found for gene locus SSDG_00035\n Gene: OrderedDict([('locus_tag', ['SSDG_00035']), ('note', ['ABC transporter; frameshift'])])\nNo CDS found for gene locus SSDG_00089\n Gene: OrderedDict([('locus_tag', ['SSDG_00089']), ('note', ['squalene/phytoene dehydrogenase; frameshift'])])\nNo CDS found for gene locus SSDG_04325\n Gene: OrderedDict([('locus_tag', ['SSDG_04325']), ('note', ['AMP-dependent synthetase and ligase; frameshift'])])\nNo CDS found for gene locus SSDG_06129\n Gene: OrderedDict([('locus_tag', ['SSDG_06129']), ('note', ['transferase; frameshift'])])\nNo CDS found for gene locus SSDG_06352\n Gene: OrderedDict([('locus_tag', ['SSDG_06352']), ('note', ['predicted protein; frameshift'])])\nNo CDS found for gene locus SSDG_00185\n Gene: OrderedDict([('locus_tag', ['SSDG_00185']), ('note', ['glycosyl transferase; frameshift'])])\nNo CDS found for gene locus SSDG_06509\n Gene: OrderedDict([('locus_tag', ['SSDG_06509']), ('note', ['beta keto-acyl synthase; frameshift'])])\nNo CDS found for gene locus SSDG_07501\n Gene: OrderedDict([('locus_tag', ['SSDG_07501']), ('note', ['acyltransferase; frameshift'])])\nNo CDS found for gene locus SSDG_07518\n Gene: OrderedDict([('locus_tag', ['SSDG_07518']), ('note', ['predicted protein; frameshift'])])\nNo CDS found for gene locus SSDG_07538\n Gene: OrderedDict([('locus_tag', ['SSDG_07538']), ('note', ['pristinamycin I synthase 3 and 4; frameshift'])])\nDS999641.1\nValidating DS999641.1\nDS999642.1\nValidating DS999642.1\nDS999645.1\nValidating DS999645.1\nGG657738.1\nValidating GG657738.1\nGG657746.1\nValidating GG657746.1\nGG657747.1\nValidating GG657747.1\nGG657750.1\nValidating GG657750.1\nGG657751.1\nValidating GG657751.1\nGG657752.1\nValidating GG657752.1\nGG657754.1\nValidating GG657754.1\nKK037166.1\nValidating KK037166.1\nKK037233.1\nValidating KK037233.1\n" ], [ "validations['location correct'].mean()", "_____no_output_____" ], [ "1 - validations['location correct'].mean()", "_____no_output_____" ], [ "validations[['genome','first location','last location','covered','location correct','reference genes','total genes']]", "_____no_output_____" ] ], [ [ "# Cluster genes", "_____no_output_____" ] ], [ [ "genes = pd.read_csv('../data/clusterfinder/labelled/CF_labelled_genes.csv', sep=';')\ngenes.head()", "_____no_output_____" ], [ "cluster_counts = genes.groupby('contig_id')['cluster_id'].nunique()\ncluster_counts.sort_values().plot.barh()", "_____no_output_____" ], [ "gene_counts = genes.groupby('cluster_id')['locus_tag'].count()\ngene_counts.hist(bins=50)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0b9feb432e1ab790142cc80e805157847412991
114,605
ipynb
Jupyter Notebook
recursive_filters/cascaded_structures.ipynb
davidjustin1974/digital-signal-processing-lecture
3959b6c929828b0e2b5ae440523d9adc43ea928c
[ "MIT" ]
1
2020-11-04T03:40:49.000Z
2020-11-04T03:40:49.000Z
recursive_filters/cascaded_structures.ipynb
davidjustin1974/digital-signal-processing-lecture
3959b6c929828b0e2b5ae440523d9adc43ea928c
[ "MIT" ]
null
null
null
recursive_filters/cascaded_structures.ipynb
davidjustin1974/digital-signal-processing-lecture
3959b6c929828b0e2b5ae440523d9adc43ea928c
[ "MIT" ]
null
null
null
458.42
47,104
0.932865
[ [ [ "# Realization of Recursive Filters\n\n*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).*", "_____no_output_____" ], [ "## Cascaded Structures\n\nThe realization of recursive filters with a high order may be subject to numerical issues. For instance, when the coefficients span a wide amplitude range, their quantization may require a small quantization step or may impose a large relative error for small coefficients. The basic concept of cascaded structures is to decompose a high order filter into a cascade of lower order filters, typically first and second order recursive filters.", "_____no_output_____" ], [ "### Decomposition into Second-Order Sections\n\nThe rational transfer function $H(z)$ of a linear time-invariant (LTI) recursive system can be [expressed by its zeros and poles](introduction.ipynb#Transfer-Function) as\n\n\\begin{equation}\nH(z) = \\frac{b_M}{a_N} \\cdot \\frac{\\prod_{\\mu=1}^{P} (z - z_{0\\mu})^{m_\\mu}}{\\prod_{\\nu=1}^{Q} (z - z_{\\infty\\nu})^{n_\\nu}}\n\\end{equation}\n\nwhere $z_{0\\mu}$ and $z_{\\infty\\nu}$ denote the $\\mu$-th zero and $\\nu$-th pole of degree $m_\\mu$ and $n_\\nu$ of $H(z)$, respectively. The total number of zeros and poles is denoted by $P$ and $Q$.\n\nThe poles and zeros of a real-valued filter $h[k] \\in \\mathbb{R}$ are either single real valued or conjugate complex pairs. This motivates to split the transfer function into\n\n* first order filters constructed from a single pole and zero\n* second order filters constructed from a pair of conjugated complex poles and zeros\n\nDecomposing the transfer function into these two types by grouping the poles and zeros into single poles/zeros and conjugate complex pairs of poles/zeros results in\n\n\\begin{equation}\nH(z) = K \\cdot \\prod_{\\eta=1}^{S_1} \\frac{(z - z_{0\\eta})}{(z - z_{\\infty\\eta})} \n\\cdot \\prod_{\\eta=1}^{S_2} \\frac{(z - z_{0\\eta}) (z - z_{0\\eta}^*)} {(z - z_{\\infty\\eta})(z - z_{\\infty\\eta}^*)}\n\\end{equation}\n\nwhere $K$ denotes a constant and $S_1 + 2 S_2 = N$ with $N$ denoting the order of the system. The cascade of two systems results in a multiplication of their transfer functions. Above decomposition represents a cascade of first- and second-order recursive systems. The former can be treated as a special case of second-order recursive systems. The decomposition is therefore known as decomposition into second-order sections (SOSs) or [biquad filters](https://en.wikipedia.org/wiki/Digital_biquad_filter). Using a cascade of SOSs the transfer function of the recursive system can be rewritten as\n\n\\begin{equation}\nH(z) = \\prod_{\\mu=1}^{S} \\frac{b_{0, \\mu} + b_{1, \\mu} \\, z^{-1} + b_{2, \\mu} \\, z^{-2}}{1 + a_{1, \\mu} \\, z^{-1} + a_{2, \\mu} \\, z^{-2}}\n\\end{equation}\n\nwhere $S = \\lceil \\frac{N}{2} \\rceil$ denotes the total number of SOSs. These results state that any real valued system of order $N > 2$ can be decomposed into SOSs. This has a number of benefits\n\n* quantization effects can be reduced by sensible grouping of poles/zeros, e.g. such that the spanned amplitude range of the filter coefficients is limited\n* A SOS may be extended by a gain factor to further reduce quantization effects by normalization of the coefficients\n* efficient and numerically stable SOSs serve as generic building blocks for higher-order recursive filters", "_____no_output_____" ], [ "### Example - Cascaded second-order section realization of a lowpass\n\nThe following example illustrates the decomposition of a higher-order recursive Butterworth lowpass filter into a cascade of second-order sections.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.markers import MarkerStyle\nfrom matplotlib.patches import Circle\nimport scipy.signal as sig\n\nN = 9 # order of recursive filter\n\ndef zplane(z, p, title='Poles and Zeros'):\n \"Plots zero and pole locations in the complex z-plane\"\n ax = plt.gca()\n \n ax.plot(np.real(z), np.imag(z), 'bo', fillstyle='none', ms = 10)\n ax.plot(np.real(p), np.imag(p), 'rx', fillstyle='none', ms = 10)\n unit_circle = Circle((0,0), radius=1, fill=False,\n color='black', ls='solid', alpha=0.9)\n ax.add_patch(unit_circle)\n ax.axvline(0, color='0.7')\n ax.axhline(0, color='0.7')\n \n plt.title(title)\n plt.xlabel(r'Re{$z$}')\n plt.ylabel(r'Im{$z$}')\n plt.axis('equal')\n plt.xlim((-2, 2))\n plt.ylim((-2, 2))\n plt.grid()\n\n\n# design filter\nb, a = sig.butter(N, 0.2)\n# decomposition into SOS\nsos = sig.tf2sos(b, a, pairing='nearest')\n\n\n# print filter coefficients\nprint('Coefficients of the recursive part \\n')\nprint(['%1.2f'%ai for ai in a])\nprint('\\n')\nprint('Coefficients of the recursive part of the individual SOS \\n')\nprint('Section \\t a1 \\t\\t a2')\nfor n in range(sos.shape[0]):\n print('%d \\t\\t %1.5f \\t %1.5f'%(n, sos[n, 4], sos[n, 5]))\n\n# plot pole and zero locations\nplt.figure(figsize=(5,5))\nzplane(np.roots(b), np.roots(a), 'Poles and Zeros - Overall')\n\nplt.figure(figsize=(10, 7))\nfor n in range(sos.shape[0]): \n plt.subplot(231+n)\n zplane(np.roots(sos[n, 0:3]), np.roots(sos[n, 3:6]), title='Poles and Zeros - Section %d'%n)\nplt.tight_layout()\n\n# compute and plot frequency response of sections\nplt.figure(figsize=(10,5))\nfor n in range(sos.shape[0]):\n Om, H = sig.freqz(sos[n, 0:3], sos[n, 3:6])\n plt.plot(Om, 20*np.log10(np.abs(H)), label=r'Section %d'%n)\n\nplt.xlabel(r'$\\Omega$')\nplt.ylabel(r'$|H_n(e^{j \\Omega})|$ in dB')\nplt.legend()\nplt.grid()", "Coefficients of the recursive part \n\n['1.00', '-5.39', '13.38', '-19.96', '19.62', '-13.14', '5.97', '-1.78', '0.31', '-0.02']\n\n\nCoefficients of the recursive part of the individual SOS \n\nSection \t a1 \t\t a2\n0 \t\t -0.50953 \t 0.00000\n1 \t\t -1.04232 \t 0.28838\n2 \t\t -1.11568 \t 0.37905\n3 \t\t -1.25052 \t 0.54572\n4 \t\t -1.46818 \t 0.81477\n" ] ], [ [ "**Exercise**\n\n* What amplitude range is spanned by the filter coefficients?\n* What amplitude range is spanned by the SOS coefficients?\n* Change the pole/zero grouping strategy from `pairing='nearest'` to `pairing='keep_odd'`. What changes?\n* Increase the order `N` of the filter. What changes?\n\nSolution: Inspecting both the coefficients of the recursive part of the original filter and of the individual SOS reveals that the spanned amplitude range is lower for the latter. The choice of the pole/zero grouping strategy influences the locations of the poles/zeros in the individual SOS, the spanned amplitude range of their coefficients and the transfer functions of the individual sections. The total number of SOS scales with the order of the original filter.", "_____no_output_____" ], [ "**Copyright**\n\nThis notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples, 2016-2018*.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0ba03016ce59d9a376de46fc587a1fa736c07a0
237,825
ipynb
Jupyter Notebook
examples/daal4py_data_science.ipynb
maria-Petrova/daal4py
044b7d797d45885b97250353af0fa170d48da3d9
[ "Apache-2.0" ]
1
2021-06-11T22:09:13.000Z
2021-06-11T22:09:13.000Z
examples/daal4py_data_science.ipynb
maria-Petrova/daal4py
044b7d797d45885b97250353af0fa170d48da3d9
[ "Apache-2.0" ]
1
2021-01-21T12:13:36.000Z
2021-01-21T12:13:36.000Z
examples/daal4py_data_science.ipynb
maria-Petrova/daal4py
044b7d797d45885b97250353af0fa170d48da3d9
[ "Apache-2.0" ]
1
2019-11-12T19:17:13.000Z
2019-11-12T19:17:13.000Z
372.766458
37,710
0.924756
[ [ [ "# Utilizing daal4py in Data Science Workflows\n\nThe notebook below has been made to demonstrate daal4py in a data science context. It utilizes a Cycling Dataset for pyworkout-toolkit, and attempts to create a linear regression model from the 5 features collected for telemetry to predict the user's Power output in the absence of a power meter.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport glob\nimport sys\n%matplotlib inline\nsys.version", "_____no_output_____" ] ], [ [ "This example will be exploring workout data pulled from Strava, processed into a CSV for Pandas and daal4py usage. Below, we utilize pandas to read in the CSV file, and look at the head of dataframe with .head()", "_____no_output_____" ] ], [ [ "workout_data_dd= pd.read_csv('data/batch/cycling_dataset.csv', index_col=0)\nworkout_data_dd.head()", "_____no_output_____" ] ], [ [ "The data above has several key features that would be of great use here. \n- Altitude can affect performance, so it might be a useful feature. \n- Cadence is the revolutions per minute of the crank, and may have possible influence. \n- Heart Rate is a measure of the body's workout strain, and would have a high possibly of influence.\n- Distance may have a loose correlation as it is highly route dependent, but might be possible.\n- Speed has possible correlations as it ties directly into power.", "_____no_output_____" ], [ "## Explore and visualize some of the data", "_____no_output_____" ], [ "In general, we are trying to predict on the 'power' in Watts to see if we can generate a model that can predict one's power output without the usage of a cycling power meter. Below are some basic scatterplots as we explore the data. Scatterplots are great for looking for patterns and correlation in the data itself. Below, we can see that cadence and speed are positively correlated. ", "_____no_output_____" ] ], [ [ "workout_data_dd.plot.scatter('cadence','power')\nplt.show()\nworkout_data_dd.plot.scatter('hr','power')\nplt.show()\nworkout_data_dd.plot.scatter('cadence','speed')\nplt.show()\nworkout_data_dd.plot.scatter('speed','power')\nplt.show()\nworkout_data_dd.plot.scatter('altitude','power')\nplt.show()\nworkout_data_dd.plot.scatter('distance','power')\nplt.show()", "_____no_output_____" ] ], [ [ "## Using daal4py for Machine Learning tasks\n\nIn the sections below, we will be using daal4py directly. After importing the model, we will arrange it in a separate independent and dependent dataframes, then use the daal4py's training and prediction classes to generate a workable model.", "_____no_output_____" ] ], [ [ "import daal4py as d4p", "_____no_output_____" ] ], [ [ "It is now the time to split the dataset into train and test sets. This is demonstrated below.", "_____no_output_____" ] ], [ [ "print(workout_data_dd.shape)\ntrain_set = workout_data_dd[0:3000]\ntest_set = workout_data_dd[3000:]\nprint(train_set.shape, test_set.shape)", "(3902, 9)\n(3000, 9) (902, 9)\n" ], [ "# Reduce the dataset, create X. We drop the target, and other non-essential features.\nreduced_dataset = train_set.drop(['time','power','latitude','longitude'], axis=1)\n# Get the target, create Y\ntarget = train_set.power.values.reshape((-1,1))\n# This is essentially doing np.array(dataset.power.values, ndmin=2).T\n# as it needs to force a 2 dimensional array as we only have 1 target", "_____no_output_____" ] ], [ [ "X is 5 features by 3k rows, Y is 3k rows by 1 column", "_____no_output_____" ] ], [ [ "print(reduced_dataset.values.shape, target.shape)", "(3000, 5) (3000, 1)\n" ] ], [ [ "## Training the model", "_____no_output_____" ], [ "Create the Linear Regression Model, and train the model with the data. We utilize daal4py's linear_regression_training class to create the model, then call .compute() with the independent and dependent data as the parameters.", "_____no_output_____" ] ], [ [ "d4p_lm = d4p.linear_regression_training(interceptFlag=True)\nlm_trained = d4p_lm.compute(reduced_dataset.values, target)", "_____no_output_____" ], [ "print(\"Model has this number of features: \", lm_trained.model.NumberOfFeatures)", "Model has this number of features: 5\n" ] ], [ [ "## Prediction (inference) with the trained model", "_____no_output_____" ], [ "Now that the model is trained, we can test it with the test part of the dataset. We drop the same features to match that of the trained model, and put it into daal4py's linear_regression_prediction class.", "_____no_output_____" ] ], [ [ "subset = test_set.drop(['time','power','latitude','longitude'], axis=1)", "_____no_output_____" ] ], [ [ "Now we can create the Prediction object and use the reduced dataset for prediction. The class's arguments use the independent data and the trained model from above as the parameters.", "_____no_output_____" ] ], [ [ "lm_predictor_component = d4p.linear_regression_prediction()\nresult = lm_predictor_component.compute(subset.values, lm_trained.model)", "_____no_output_____" ], [ "plt.plot(result.prediction[0:300])\nplt.plot(test_set.power.values[0:300])\nplt.show()", "_____no_output_____" ] ], [ [ "The graph above shows the Orange (predicted) result over the Blue (original data). This data is notoriously sparse in features leading to a difficult to predict target!", "_____no_output_____" ], [ "## Model properties\nAnother aspect of the model is the trained model's properties, which are explored below.", "_____no_output_____" ] ], [ [ "print(\"Betas:\",lm_trained.model.Beta) \nprint(\"Number of betas:\", lm_trained.model.NumberOfBetas)\nprint(\"Number of Features:\", lm_trained.model.NumberOfFeatures)", "Betas: [[ 1.51003501e+01 -1.25075548e-01 1.32249115e+00 1.64363922e-03\n 8.53155955e-01 -1.09595022e+01]]\nNumber of betas: 6\nNumber of Features: 5\n" ] ], [ [ "## Additional metrics\nWe can generate metrics on the independent data with daal4py's low_order_moments() class.", "_____no_output_____" ] ], [ [ "metrics_processor = d4p.low_order_moments()\ndata = metrics_processor.compute(reduced_dataset.values)\ndata.standardDeviation", "_____no_output_____" ] ], [ [ "## Migrating the trained model for inference on external systems\n\nOccasionally one may need to migrate the trained model to another system for inference only--this use case allows the training on a much more powerful machine with a larger dataset, and placing the trained model for inference-only on a smaller machine.", "_____no_output_____" ] ], [ [ "import pickle", "_____no_output_____" ], [ "with open('trained_model2.pickle', 'wb') as model_pi:\n pickle.dump(lm_trained.model, model_pi)\n model_pi.close", "_____no_output_____" ] ], [ [ "The trained model file above can be moved to an inference-only or embedded system. This is useful if the training is extreamly heavy or computed-limited. ", "_____no_output_____" ] ], [ [ "with open('trained_model2.pickle', 'rb') as model_import:\n lm_import = pickle.load(model_import)", "_____no_output_____" ] ], [ [ "The imported model from file is now usable again. We can check the betas from the model to ensure that the trained model is present.", "_____no_output_____" ] ], [ [ "lm_import.Beta", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0ba381c794daac3ccd21b8ae510065a8e7a9c46
10,609
ipynb
Jupyter Notebook
doc/explanation/jive_explanation.ipynb
taebinkim7/py_jive
0229ae4a6d05a532580e239ae2fa33cdf2b53958
[ "MIT" ]
13
2017-07-19T20:01:21.000Z
2021-03-19T06:50:08.000Z
doc/explanation/jive_explanation.ipynb
taebinkim7/py_jive
0229ae4a6d05a532580e239ae2fa33cdf2b53958
[ "MIT" ]
7
2017-07-17T19:18:47.000Z
2022-01-14T21:02:41.000Z
doc/explanation/jive_explanation.ipynb
taebinkim7/py_jive
0229ae4a6d05a532580e239ae2fa33cdf2b53958
[ "MIT" ]
7
2019-06-14T09:24:00.000Z
2022-03-15T04:39:49.000Z
72.664384
631
0.64483
[ [ [ "# **JIVE: Joint and Individual Variation Explained**", "_____no_output_____" ], [ "JIVE (Joint and Individual Variation Explained) is a dimensional reduction algorithm that can be used when there are multiple data matrices (data blocks). The multiple data block setting means there are $K$ different data matrices, with the same number of observations $n$ and (possibly) different numbers of variables ($d_1, \\dots, d_k$). JIVE finds modes of variation which are common (joint) to all $K$ data blocks and modes of individual variation which are specific to each block. For a detailed discussion of JIVE see [Angle-Based Joint and Individual Variation Explained](https://arxiv.org/pdf/1704.02060.pdf).[^1]\n\nFor a concrete example, consider a two block example from a medical study. Suppose there are $n=500$ patients (observations). For each patient we have $d_1 = 100$ bio-medical variables (e.g. hit, weight, etc). Additionally we have $d_2 = 10,000$ gene expression measurements for each patient.\n\n## **The JIVE decomposition**\n\nSuppose we have $K$ data data matrices (blocks) with the same number of observations, but possibly different numbers of variables; in particular let $X^{(1)}, \\dots, X^{(K)}$ where $X^{(k)} \\in \\mathbb{R}^{n \\times d_k}$. JIVE will then decompose each matrix into three components: joint signal, individual signal and noise\n\n\\begin{equation}\nX^{(k)} = J^{(k)} + I^{(k)} + E^{(k)}\n\\end{equation}\n\nwhere $J^{(k)}$ is the joint signal estimate, $I^{(k)}$ is the individual signal estimate and $E^{(k)}$ is the noise estimate (each of these matrices must the same shape as the original data block: $\\mathbb{R}^{n \\times d_k}$). Note: **we assume each data matrix** $X^{(k)}$ **has been column mean centered**.\n\n\nThe matrices satisfy the following constraints:\n\n1. The joint matrices have a common rank: $rk(J^{(k)}) = r_{joint}$ for $k=1, \\dots, K$.\n2. The individual matrices have block specific ranks $rk(I^{(k)}) = r_{individual}^{(k)}$.\n3. The columns of the joint matrices share a common space called the joint score space (a subspace of $\\mathbb{R}^n$); in particular the $\\text{col-span}(J^{(1)}) = \\dots = \\text{col-span}(J^{(K)})$ (hence the name joint).\n4. Each individual spaces score subspace (of $\\mathbb{R}^n$) is orthogonal to the the joint space; in particular $\\text{col-span}(J^{(k)}) \\perp \\text{col-span}(I^{(k)})$ for $k=1, \\dots, K$.\n\nNote that JIVE may be more natural if we think about data matrices subspaces of $\\mathbb{R}^n$ (the score space perspective). Typically we think of a data matrix as $n$ points in $\\mathbb{R}^d$. The score space perspective views a data matrix as $d$ vectors in $\\mathbb{R}^n$ (or rather the span of these vectors). One important consequence of this perspective is that it makes sense to related the data blocks in score space (e.g. as subspaces of $\\mathbb{R}^n$) since they share observtions.\n\n## Quantities of interest\n\nThere are a number of potential quantities of interest depending on the application. For example the user may be interested in the full matrices $J^{(k)}$ and/or $I^{(k)}$. By construction these matrices are not full rank and we may also be interested in their singular value decomposition which we define as\n\n\\begin{align}\n& U^{(k)}_{joint}, D^{(k)}_{joint}, V^{(k)}_{joint} = \\text{rank } r_{joint} \\text{ SVD of } J^{(k)} \\\\\n& U^{(k)}_{individual}, D^{(k)}_{individual}, V^{(k)}_{individual} = \\text{rank } r_{individual}^{{k}} \\text{ SVD of } I^{(k)}\n\\end{align}\n\n\nOne additional quantity of interest is $U_{joint} \\in \\mathbb{R}^{n \\times r_{joint}}$ which is an orthogonal basis of $\\text{col-span}(J^{(k)})$. This matrix is produced from an intermediate JIVE computation. \n\n## **PCA analogy**\nWe give a brief discussion of the PCA/SVD decomposition (assuming the reading is already familiar).\n\n#### Basic decomposition\nSuppose we have a data matrix $X \\in \\mathbb{n \\times d}$. Assume that $X$ has been column mean centered and consider the SVD decomposition (this is PCA since we have mean centered the data):\n\n\\begin{equation}\nX = U D V^T.\n\\end{equation}\nwhere $U \\in \\mathbb{R}^{n \\times m}$, $D \\in \\mathbb{R}^{m \\times m}$ is diagonal, and $V \\in \\mathbb{R}^{d \\times m}$ with $m = min(n, d)$. Note $U^TU = V^TV = I_{m \\times m}$. \n\nSuppose we have decided to use a rank $r$ approximation. We can then decompose $X$ into a signal matrix ($A$) and an noise matrix ($E$)\n\n\\begin{equation}\nX = A + E,\n\\end{equation}\nwhere $A$ is the rank $r$ SVD approximation of $X$ i.e. \n\\begin{align}\nA := & U_{:, 1:r} D_{1:r, 1:r} V_{:, 1:r}^T \\\\\n = & \\widetilde{U}, \\widetilde{D} \\widetilde{V}^T\n\\end{align}\nThe notation $U_{:, 1:r} \\in \\mathbb{R}^{n \\times r}$ means the first $r$ columns of $U$. Similarly we can see the error matrix is $E :=U_{:, r+1:n} D_{r+1:m, r_1:m} V_{:, r+1:d}^T$.\n\n#### Quantities of interest\n\nThere are many ways to use a PCA/SVD decomposition. Some common quantities of interest include\n\n- The normalized scores: $\\widetilde{U} \\in \\mathbb{R}^{n \\times r}$\n- The unnormalized scores: $\\widetilde{U}\\widetilde{D} \\in \\mathbb{R}^{n \\times r}$\n- The loadings: $\\widetilde{V} \\in \\mathbb{R}^{d \\times r}$\n- The full signal approximation: $A \\in \\mathbb{R}^{n \\times d}$\n\n\n#### Scores and loadings\n\nFor both PCA and JIVE we use the notation $U$ (scores) and $V$ (loadings). These show up in several places.\n\nWe refer to all $U \\in \\mathbb{R}^{n \\times r}$ matrices as scores. We can view the $n$ rows of $U$ as representing the $n$ data points with $r$ derived variables (put differently, columns of $U$ are $r$ derived variables). The columns of $U$ are orthonormal: $U^TU = I_{r \\times r}$.\n\nSometimes we may want $UD$ i.e scale the columns of $U$ by $D$ (the columns are still orthogonal). The can be useful when we want to represent the original data by $r$ variables. We refer to $UD$ as unnormalized scores.\n\nWe refer to all $V\\in \\mathbb{R}^{d \\times r}$ matrices as loadings[^2]. The j$th$ column of $V$ gives the linear combination of the original $d$ variables which is equal to the j$th$ unnormalized scores (j$th$ column of $UD$). Equivalently, if we project the $n$ data points (rows of $X$) onto the j$th$ column of $V$ we get the j$th$ unnormalized scores. \n\nThe typical geometric perspective of PCA is that the scores represent $r$ new derived variables. For example, if $r = 2$ we can look at a scatter plot that gives a two dimensional approximation of the data. In other words, the rows of the scores matrix are $n$ data points living in $\\mathbb{R}^r$. \n\nAn alternative geometric perspective is the $r$ columns of the scores matrix are vectors living in $\\mathbb{R}^n$. The original $d$ variables span a subspace of $\\mathbb{R}^n$ given by $\\text{col-span}(X)$. The scores then span a lower dimensional subspace of $\\mathbb{R}^n$ that approximates $\\text{col-span}(X)$.\n\nThe first perspective says PCA finds a lower dimensional approximation to a subspace in $\\mathbb{R}^d$ (spanned by the $n$ data points). The second perspective says PCA finds a lower dimensional approximation to a subspace in $\\mathbb{R}^n$ (spanned by the $d$ data points).\n\n## **JIVE operating in score space**\n\nFor a data matrix $X$ let's call the span of the variables (columns) the *score subpace*, $\\text{col-span}(X) \\subset \\mathbb{R}^n$. Typically we think of a data matrix as $n$ points in $\\mathbb{R}^d$. The score space perspective reverses this and says a data matrix is $d$ points in $\\mathbb{R}^n$. When thinking in the score space it's common to consider about subspaces i.e. the span of the $d$ variables in $\\mathbb{R}^n$. In other words, if two data matrices have the same column span then their score subspaces are the same[^3].\n\nJIVE partitions the score space of each data matrix into three subspaces: joint, individual and noise. The joint score subspace for each data block is the same. The individual score subspace, however, is (possibly) different for each of the $K$ blocks. The k$th$ block's individual score subspace is orthogonal to the joint score subspace. Recall that the $K$ data matrices have the same number of observations ($n$) so it makes sense to think about how the data matrices relate to each other in score space.\n\nPCA partitions the score space into two subspaces: signal and noise (see above). For JIVE we might combine the joint and individual score subspaces and call this the signal score subspace.\n\n\n", "_____no_output_____" ], [ "# Footnotes\n[^1]: Note this paper calls the algorithm AJIVE (angle based JIVE) however, we simply use JIVE. Additionally, the paper uses columns as observations in data matrices where as we use rows as observations.\n\n[^2]: For PCA we used tildes (e.g. $\\widetilde{U}$) to denote the \"partial\" SVD approximation however for the final JIVE decomposition we do not use tildes. This is intentional since for JIVE the SVD comes from the $I$ and $J$ matrices which are exactly rank $r$. Therefore we view this SVD as the \"full\" SVD.\n\n[^3]: This might remind the reader of TODO", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
d0ba3f34580e636cb4f81f08a97b6a994c80dd45
6,060
ipynb
Jupyter Notebook
Tutorials/Operators.ipynb
mbaas2/APLcourse
3acdbef4a1f7c06be049e8677b71ce8536815a72
[ "MIT" ]
1
2020-11-08T18:16:22.000Z
2020-11-08T18:16:22.000Z
Tutorials/Operators.ipynb
mbaas2/APLcourse
3acdbef4a1f7c06be049e8677b71ce8536815a72
[ "MIT" ]
1
2019-10-29T16:57:47.000Z
2019-10-29T16:57:47.000Z
Tutorials/Operators.ipynb
mbaas2/APLcourse
3acdbef4a1f7c06be049e8677b71ce8536815a72
[ "MIT" ]
null
null
null
23.129771
248
0.434158
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0ba654e700f94a374297920c9b974f365df1c86
1,956
ipynb
Jupyter Notebook
gs_quant/examples/01_pricing_and_risk/00_rates/010003_calc_swap_risk_measures.ipynb
jeamick/gs-quant
a61dd2866dafc8453949391e900f9bf1ce2ad52e
[ "Apache-2.0" ]
null
null
null
gs_quant/examples/01_pricing_and_risk/00_rates/010003_calc_swap_risk_measures.ipynb
jeamick/gs-quant
a61dd2866dafc8453949391e900f9bf1ce2ad52e
[ "Apache-2.0" ]
null
null
null
gs_quant/examples/01_pricing_and_risk/00_rates/010003_calc_swap_risk_measures.ipynb
jeamick/gs-quant
a61dd2866dafc8453949391e900f9bf1ce2ad52e
[ "Apache-2.0" ]
null
null
null
21.032258
130
0.56544
[ [ [ "from gs_quant.common import PayReceive, Currency\nfrom gs_quant.instrument import IRSwap\nfrom gs_quant.session import Environment, GsSession\nfrom gs_quant.risk import DollarPrice, IRDelta, IRDeltaParallel", "_____no_output_____" ], [ "# external users should substitute their client id and secret; please skip this step if you are using internal jupyterhub\nGsSession.use(Environment.PROD, client_id=None, client_secret=None, scopes=('run_analytics',))", "_____no_output_____" ], [ "swap = IRSwap(PayReceive.Pay, '10y', Currency.GBP)", "_____no_output_____" ], [ "result = swap.calc((DollarPrice, IRDeltaParallel)) ", "_____no_output_____" ], [ "print(result) # all results", "_____no_output_____" ], [ "print(result[IRDeltaParallel]) # single measure", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d0ba6b21b7607ed212c99ece97e60ab54696de71
156,338
ipynb
Jupyter Notebook
lab_05/ME3264_Lab-05.ipynb
cooperrc/applied_measurements
d9a7c2b774a0f3f3cb864c645f95fb8d314c1356
[ "BSD-3-Clause" ]
null
null
null
lab_05/ME3264_Lab-05.ipynb
cooperrc/applied_measurements
d9a7c2b774a0f3f3cb864c645f95fb8d314c1356
[ "BSD-3-Clause" ]
1
2021-01-26T17:01:14.000Z
2021-01-26T17:16:58.000Z
lab_05/ME3264_Lab-05.ipynb
cooperrc/applied_measurements
d9a7c2b774a0f3f3cb864c645f95fb8d314c1356
[ "BSD-3-Clause" ]
2
2021-01-26T16:56:18.000Z
2021-02-08T23:47:42.000Z
296.094697
43,576
0.923352
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport check_lab05 as p\n\nplt.rcParams.update({'font.size': 14})\nplt.rcParams['lines.linewidth'] = 3\npi=np.pi", "_____no_output_____" ] ], [ [ "ME 3264 - Applied Measurements Laboratory\n===========================================\n\nLab #5 - Linear Variable Differential Transformer(LVDT)\n=====================================================\n\n## Objective\nThe objectives of this laboratory are :\n\n1. Gain familiarity with the physical operating principle of Linear Variable Differential Transformer (LVDT) measurements\n2. Calibrate the LVDT measurement using voltage measurements\n3. Determine relationship between supplied voltage amplitude and linear motion\n4. Investigate effects of changing input frequency and sampling rate on response frequency \n", "_____no_output_____" ], [ "## Background\n\n\n### Working principle of LVDT\n\nA linear variable differential transformer (LVDT) is a device that can measure the absolute linear position or changes in position of a separate device. LVDTs operate on the principle of a transformer. Because it is a transformer, the LVDT requires an ac drive signal. As shown in Fig.1 (and Fig.2A) , an LVDT consists of a coil assembly and a core. The coil assembly is typically mounted to a stationary form, while the core is secured to the object whose position is being measured. The coil assembly consists of three coils of wire wound on the hollow form. A core of permeable material can slide freely through the center of the form. The inner coil is the primary, which is excited by an AC source as shown. Magnetic flux produced by the primary is coupled to the two secondary coils, inducing an AC voltage in each coil [2].\n\n<center><img src=\"https://upload.wikimedia.org/wikipedia/commons/5/57/LVDT.png\" alt=\"Drawing\" style=\"width: 300px;\"/> </center>\n\n<center>Figure 1: Cutaway view of an LVDT. Current is driven through the primary coil at A, causing an induction current to be generated through the secondary coils at B. [4] </center>\n\n### LVDT Measurement\n\nAn LVDT measures displacement by associating a specific signal value for any given position of the core. This association of a signal value to a position occurs through electromagnetic coupling of an AC excitation signal on the primary winding to the core and back to the secondary windings as shown in Fig.2B. The position of the core determines how tightly the signal of the primary coil is coupled to each of the secondary coils. The two secondary coils are series-opposed, which means wound in series but in opposite directions. This results in the two signals on each secondary being 180 deg out of phase. Therefore phase of the output signal determines direction and its amplitude, distance [2].\n\nFig.2C shows the operational characteristics of LVDT with respect to the core displacement. \n\n<center><img src=\"https://ars.els-cdn.com/content/image/3-s2.0-B9780081028841000042-f04-12-9780081028841.jpg\" alt=\"Drawing\" style=\"width: 200px;\"/> </center>\n\n<center>Figure 2: The operation of the LVDT (A) Internal arrangement (B) Electrical circuit, the dots signify the positive ending of the winding (C) Operational characteristics [5] </center>\n\n### Advantages of LVDT\n\nLVDTs have a number of advantages, including -\n\n* The ability to measure absolute position, the ability to be completely sealed from the environment, nearly frictionless operation, and excellent repeatability of the measurement\n* Because the device relies on the coupling of magnetic flux, an LVDT can have infinite resolution. Therefore the smallest fraction of movement can be detected by suitable signal conditioning hardware, and the resolution of the transducer is solely determined by the resolution of the data acquisition system\n* Linearity of operation as output is a direct and linear function of the input\n\n\n\n", "_____no_output_____" ], [ "## Part 1 - LVDT calibaration", "_____no_output_____" ], [ "### Problem 1 - Relate voltage to displacement\n\nLet's consider a LVDT with it's core attached to a micrometer. Following table consists of core displacment, and mean DC voltage, recorded by DAQ in an experiment. Obtain the caliberation curve of LVDT using linear regression. \n\n\n|Displacement| Mean DC |\n|--- | --- | \n|5 mm | 4.829 V | \n|6 mm | 6.690 V | \n|7 mm | 8.333 V | \n|4 mm | 3.868 V | \n|3 mm | 2.024 V | \n|2 mm | 0.145 V | \n|1 mm | -1.738 V | \n\n\n", "_____no_output_____" ] ], [ [ "from scipy.optimize import curve_fit\n\n\ndef func(x,a,b):\n '''fits the linear equation y = a + bx\n This equation can be replaced by polynomial or exponential \n as per the fitting goals of the problem'''\n return (a + b*x)\n\nx = [5,6,7,4,3,2,1] # mm\ny = [4.829,6.690,8.333,3.686,2.024,0.145,-1.738] # Volt\n\nk,pcov=curve_fit(func, x, y) \nk_error=np.sqrt(np.diag(pcov)) # Co-variance matrix\na = np.asarray(k[0])\nb = np.asarray(k[1])\n\nprint(\"Caliberation equation for LVDT is y = %1.3f +%1.3fx \\n \"%(a,b))\nprint(\"Caliberation coefficent a =%1.3f +/- %1.3f \\n\"%(k[0],k_error[0]))\nprint(\"Caliberation coefficent b = %1.3f +/- %1.3f \\n\"%(k[1],k_error[1]))\n\n\nplt.plot(x,y,'o',label='experiment')\nplt.plot(x,func(x,a,b),label='model')\nplt.legend()\nplt.xlabel(r'Displacement, mm')\nplt.ylabel('Mean DC, Volts')", "Caliberation equation for LVDT is y = -3.163 +1.647x \n \nCaliberation coefficent a =-3.163 +/- 0.185 \n\nCaliberation coefficent b = 1.647 +/- 0.041 \n\n" ] ], [ [ "Note - In the curvefit,\n\n`k,pcov=curve_fit(func, x, y)`\n\n* `k` - Optimal values for the parameters so that the sum of the squared residuals of (f(x,k) - y) is minimized.\n* `pcov` - The estimated covariance of k. The diagonals provide the variance of the parameter estimate. To compute one standard deviation errors on the parameters, we use k_error = np.sqrt(np.diag(pcov)).", "_____no_output_____" ], [ "### Problem 2 - Check your work\n\nCalculate the linear regression coefficents a and b, and their standard variances for data in problem 1 using linear least square fitting method described in [Ref 3](https://mathworld.wolfram.com/LeastSquaresFitting.html). Does these values compare well with the coefficents and standard variance values obtained from covarianc matrix in the above example?", "_____no_output_____" ] ], [ [ "## # enter your work here - Uncomment the following lines of code and make necessary changes\n\n# n = len(x)\n\n# # sum of squares\n\n# x_mean = np.mean(x)\n# y_mean = \n# ss_xx = np.sum((x-x_mean)**2)\n# ss_yy = \n# ss_xy = np.sum((y-y_mean)*(x-x_mean))\n\n# # linear regression coefficients\n# b = ss_xy/ss_xx\n# a = y_mean - b*x_mean\n# print(\"Equation for linear egression line is y = %1.3f +%1.3fx \\n \"%(a,b))\n\n# # correlation coefficient,\n# r2 = ss_xy**2/ss_xx/ss_yy\n# print(\"Correlation coefficient, is y = %1.3f \\n \"%(r2))\n\n# # The standard errors for a and b \n# s = (ss_yy -ss_xy**2/ss_xx)/(n-2)\n\n# sigma_a = np.sqrt(s)*np.sqrt(1/n + (x_mean**2/ss_xx))\n# sigma_b = np.sqrt(s)/np.sqrt(ss_xx)\n\n# print(\"Std (a)= %1.3f\\n \"%sigma_a)\n# print(\"Std (b)= %1.3f\\n \"%sigma_b)\n\np.check_p02(a, b)", "Nice work!\n" ] ], [ [ "## Part 2 - Calibrate Piezoelectric with LVDT output\n\nIn part 2, you compare the amplitude of input voltage for the piezoelectric motion to the amplitude of LVDT voltage output. When the output voltage is larger, the piezoelectric is moving further. \n\n", "_____no_output_____" ] ], [ [ "from IPython.display import YouTubeVideo\nYouTubeVideo('NwE8B9IHvyo')", "_____no_output_____" ] ], [ [ "### Problem 3 - Calibrate Piezoelectric\n\nFollowing table consists of amplitude of input voltage for the piezoelectric motion, and LVDT voltage output, recorded by DAQ in an experiment. Obtain the caliberation curve using linear regression. You can use `curve_fit` as expalined in Part 1. \n\n|Input, Vpp | Output Vpp |\n|--- | --- | \n|1 | 0.00101 | \n|2 | 0.00380 | \n|4 | 0.00698 | \n|4 | 0.01048 | \n|5 | 0.01420 | \n|6 | 0.01832 | \n|7 | 0.02273 | \n", "_____no_output_____" ] ], [ [ "## enter your work here", "_____no_output_____" ] ], [ [ "## Part 3 - Explore frequency-response between Piezoelectric and LVDT\n\nIn part 3, you vary the frequency input to the piezoelectric and measure the frequency output measured by the LVDT. You are constrained by the Nyquist frequency in these measurements. If you collect data at 500 Hz, then the largest frequency you can reliably measure is 250 Hz. The concept of Nyquist frequency if further explored with Problem 4. \n\n\n### Problem 4 - Nyquist frequency\n\nConsider a case where signal generator sends a signal of the form of cos-wave to a piezo motor. The signal has frequency of 1-Hz with amplitude of 2 Vpp (Volts peak-to-peak) . The Data Aquisition System (DAQ) takes N measurements over the given timeframe from 0-10 seconds to measure the corresponding LVDT output signal. Plot and compare the input and measured signals. ", "_____no_output_____" ] ], [ [ "N=20\nt_collect=10 # time to collect data\nt=np.linspace(0,t_collect,1000)\ny=np.cos(2*pi*t)\ntsample=np.linspace(0,10,N+1)\nysample=np.cos(2*pi*tsample)\nplt.figure(20)\nplt.plot(t,y,label='signal')\nplt.plot(tsample,ysample,'o-',label='measure')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\nplt.xlabel('time (s)')\nplt.ylabel('a.u.')", "_____no_output_____" ] ], [ [ "For N=20, it would appear that DAQ can capture a minimal example of input signal (just the peaks occuring at 1 Hz). Collecting data for N=20 over 10 seconds is equivalent to sampling at 20 samples/10 seconds = 2 Hz. This is called the Nyquist rate which is given as such\n\n$f_{Nyquist}=2f_{signal}$. (1)\n\nIn Equation 1, the Nyquist rate (also Shannon Sampling) [\\[6\\]](https://github.uconn.edu/rcc02007/ME3264-Lab_03)[\\[7\\]](./jerri_1977-shannon_sampling.pdf)[\\[8\\]](./nyquist.pdf), $f_{Nyquist}$, is the minimum sampling rate necessary to capture the signal at frequency, $f_{signal}$. Try changing N<20 and consider the apparent signal frequencies. \n\nIf you try N=11 in the Python code below, you will see a phenomenon called \"aliasing\" or the \"wagon-wheel effect\" [\\[9\\]](http://www.onmyphd.com/?p=aliasing). When you look at the measured signal, it appears to have a frequency of 1 cycle/10 seconds = 0.1 Hz. This phenomenon is called the wagon-wheel effect because it is noticeable when recording spinning objects like a wagon wheel [or turbine](https://www.youtube.com/watch?v=vIsS4TP73AU). The wheel spins at a given frequency and the camera records at another frequency. When the ratio of the wheel frequency to camera recording frequency reaches certain values the wheel appears to stop, spin slower, or even backwards. [\\[6\\]](https://github.uconn.edu/rcc02007/ME3264-Lab_03)\n\nExperimentally, we avoid aliasing by sampling above the Nyquist rate from equation 1.", "_____no_output_____" ] ], [ [ "N=11\nt_collect=10 # time to collect data\nt=np.linspace(0,t_collect,1000)\ny=np.cos(2*pi*t)\ntsample=np.linspace(0,10,N+1)\nysample=np.cos(2*pi*tsample)\nplt.figure(20)\nplt.plot(t,y,label='signal')\nplt.plot(tsample,ysample,'o-',label='measure')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\nplt.xlabel('time (s)')\nplt.ylabel('a.u.')", "_____no_output_____" ] ], [ [ "## Procedure \n\nThe procedure and details of the experiment are included in a lab-handout [1].\n\n[ME3264_Lab_5_LVDT.pdf](https://drive.google.com/file/d/1FbykzotAE50SRTujNUjvF9vek97TlJXL/view?usp=sharing)\n", "_____no_output_____" ] ], [ [ "YouTubeVideo('FRWgpFApITo')", "_____no_output_____" ] ], [ [ "## Notes on error propagation and uncertainties\n\n\n", "_____no_output_____" ], [ "## References \n\n1. [ME3264_Lab_5_LVDT.pdf](https://drive.google.com/file/d/1FbykzotAE50SRTujNUjvF9vek97TlJXL/view?usp=sharing)\n2. [Measuring Position and Displacement with LVDTs](https://www.ni.com/en-us/innovations/white-papers/06/measuring-position-and-displacement-with-lvdts.html)\n3. [Least Squares Fitting](https://mathworld.wolfram.com/LeastSquaresFitting.html)\n4. [Linear variable differential transformer, From Wikipedia](https://en.wikipedia.org/wiki/Linear_variable_differential_transformer)\n5. [Velocity and position transducers, Richard Crowder, in Electric Drives and Electromechanical Systems (Second Edition), 2020](https://www.sciencedirect.com/science/article/pii/B9780081028841000042)\n6. [ME3263-Lab_03, Prof. Ryan Cooper](https://github.uconn.edu/rcc02007/ME3264-Lab_03)\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0ba7252b53267845848b216e076989648fc1275
801,521
ipynb
Jupyter Notebook
Chapter04/ch4_cdcgan_mnist.ipynb
old-school-kid/Hands-On-Image-Generation-with-TensorFlow-2.0
9d1064b018c17b6a84b13598bb39630dd57364d2
[ "MIT" ]
138
2020-08-30T01:40:42.000Z
2022-03-15T13:56:15.000Z
Chapter04/ch4_cdcgan_mnist.ipynb
old-school-kid/Hands-On-Image-Generation-with-TensorFlow-2.0
9d1064b018c17b6a84b13598bb39630dd57364d2
[ "MIT" ]
19
2020-11-13T17:46:09.000Z
2022-03-04T01:09:10.000Z
Chapter04/ch4_cdcgan_mnist.ipynb
old-school-kid/Hands-On-Image-Generation-with-TensorFlow-2.0
9d1064b018c17b6a84b13598bb39630dd57364d2
[ "MIT" ]
45
2020-06-24T06:47:37.000Z
2022-03-21T14:49:25.000Z
1,258.274725
134,148
0.95588
[ [ [ "import tensorflow as tf\nfrom tensorflow.keras import layers, Model\nfrom tensorflow.keras.activations import relu\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom tensorflow.keras.losses import BinaryCrossentropy\nfrom tensorflow.keras.optimizers import RMSprop, Adam\nfrom tensorflow.keras.metrics import binary_accuracy\nimport tensorflow_datasets as tfds\nfrom tensorflow_addons.layers import InstanceNormalization\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport warnings\nwarnings.filterwarnings('ignore')\nprint(\"Tensorflow\", tf.__version__)\nfrom packaging.version import parse as parse_version\nassert parse_version(tf.__version__) < parse_version(\"2.4.0\"), \\\n f\"Please install TensorFlow version 2.3.1 or older. Your current version is {tf.__version__}.\"", "Tensorflow 2.2.0\n" ], [ "gpus = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_visible_devices(gpus[0], 'GPU')\nlogical_gpus = tf.config.experimental.list_logical_devices('GPU')\nprint(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\")", "4 Physical GPUs, 1 Logical GPU\n" ], [ "ds_train, ds_info = tfds.load('mnist', split='train', shuffle_files=True, with_info=True)\nfig = tfds.show_examples(ds_info, ds_train)", "_____no_output_____" ], [ "batch_size = 400\nglobal_batch_size = batch_size * 1\nimage_shape = (32, 32, 1)\n\ndef preprocess(features):\n image = tf.image.resize(features['image'], image_shape[:2]) \n image = tf.cast(image, tf.float32)\n image = (image-127.5)/127.5\n label = features['label']\n return image, label\n\n\nds_train = ds_train.map(preprocess)\nds_train = ds_train.cache() # put dataset into memory\nds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)\nds_train = ds_train.batch(global_batch_size).repeat()\n\ntrain_num = ds_info.splits['train'].num_examples\ntrain_steps_per_epoch = round(train_num/batch_size)\nprint(train_steps_per_epoch)", "150\n" ], [ "class cDCGAN():\n def __init__(self, input_shape):\n self.z_dim = 100\n self.input_shape = input_shape\n self.num_classes = 10\n # discriminator\n self.n_discriminator = 1 \n self.discriminator = self.build_discriminator()\n self.discriminator.trainable = False\n self.optimizer_discriminator = RMSprop(1e-4)\n \n # build generator pipeline with frozen discriminator\n self.generator = self.build_generator()\n discriminator_output = self.discriminator([self.generator.output, self.generator.input[1]])\n self.model = Model(self.generator.input, discriminator_output)\n self.model.compile(loss = self.bce_loss,\n optimizer = RMSprop(1e-4))\n self.discriminator.trainable = True\n self.bce = tf.keras.losses.BinaryCrossentropy()\n\n def conv_block(self, channels, kernels, strides=1, \n batchnorm=True, activation=True):\n model = tf.keras.Sequential()\n \n model.add(layers.Conv2D(channels, kernels, strides=strides, padding='same'))\n if batchnorm:\n model.add(layers.BatchNormalization()) \n if activation:\n model.add(layers.LeakyReLU(0.2)) \n \n return model\n \n def bce_loss(self, y_true, y_pred):\n\n loss = self.bce(y_true, y_pred)\n\n return loss\n\n def build_generator(self):\n\n DIM = 64\n \n input_label = layers.Input(shape=1, dtype=tf.int32, name='ClassLabel')\n \n one_hot_label = tf.one_hot(input_label, self.num_classes)\n one_hot_label = layers.Reshape((self.num_classes,))(one_hot_label)\n \n input_z = layers.Input(shape=self.z_dim, name='LatentVector')\n x = layers.Concatenate()([input_z, one_hot_label])\n x = layers.Dense(4*4*4*DIM, activation=None)(x)\n x = layers.Reshape((4,4,4*DIM))(x)\n #x = layers.Concatenate()([x, embedding])\n \n x = layers.UpSampling2D((2,2), interpolation=\"bilinear\")(x)\n x = self.conv_block(2*DIM, 5)(x)\n\n x = layers.UpSampling2D((2,2), interpolation=\"bilinear\")(x)\n x = self.conv_block(DIM, 5)(x)\n\n x = layers.UpSampling2D((2,2), interpolation=\"bilinear\")(x)\n output = layers.Conv2D(image_shape[-1], 5, padding='same', activation='tanh')(x)\n \n\n return Model([input_z, input_label], output) \n \n\n def build_discriminator(self):\n DIM = 64\n \n # label\n input_label = layers.Input(shape=[1], dtype =tf.int32, name='ClassLabel')\n encoded_label = tf.one_hot(input_label, self.num_classes)\n embedding = layers.Dense(32 * 32 * 1, activation=None)(encoded_label)\n embedding = layers.Reshape((32, 32, 1))(embedding)\n \n # discriminator\n input_image = layers.Input(shape=self.input_shape, name='Image')\n x = layers.Concatenate()([input_image, embedding])\n x = self.conv_block(DIM, 5, 2, batchnorm=False)(x)\n x = self.conv_block(2*DIM, 5, 2)(x)\n x = self.conv_block(4*DIM, 5, 2)(x)\n x = layers.Flatten()(x)\n\n output = layers.Dense(1, activation='sigmoid')(x)\n return Model([input_image, input_label], output) \n \n def train_discriminator(self, real_images, class_labels, batch_size):\n real_labels = tf.ones(batch_size)\n fake_labels = tf.zeros(batch_size)\n \n g_input = tf.random.normal((batch_size, self.z_dim))\n fake_class_labels = tf.random.uniform((batch_size,1), minval=0, maxval=10, dtype=tf.dtypes.int32)\n fake_images = self.generator.predict([g_input, fake_class_labels])\n \n with tf.GradientTape() as gradient_tape:\n \n # forward pass\n pred_fake = self.discriminator([fake_images, fake_class_labels])\n pred_real = self.discriminator([real_images, class_labels])\n \n # calculate losses\n loss_fake = self.bce_loss(fake_labels, pred_fake)\n loss_real = self.bce_loss(real_labels, pred_real) \n \n # total loss\n total_loss = 0.5*(loss_fake + loss_real)\n \n # apply gradients\n gradients = gradient_tape.gradient(total_loss, self.discriminator.trainable_variables)\n \n self.optimizer_discriminator.apply_gradients(zip(gradients, self.discriminator.trainable_variables))\n\n return loss_fake, loss_real\n \n def train(self, data_generator, batch_size, steps, interval=100):\n \n val_g_input = tf.random.normal((self.num_classes, self.z_dim))\n val_class_labels = np.arange(self.num_classes)\n real_labels = tf.ones(batch_size)\n \n for i in range(steps):\n \n real_images, class_labels = next(data_generator)\n loss_fake, loss_real = self.train_discriminator(real_images, class_labels, batch_size)\n discriminator_loss = 0.5*(loss_fake + loss_real)\n \n # train generator\n g_input = tf.random.normal((batch_size, self.z_dim))\n fake_class_labels = tf.random.uniform((batch_size, 1), \n minval=0, maxval=self.num_classes, dtype=tf.dtypes.int32)\n g_loss = self.model.train_on_batch([g_input, fake_class_labels], real_labels)\n if i%interval == 0:\n msg = \"Step {}: discriminator_loss {:.4f} g_loss {:.4f}\"\\\n .format(i, discriminator_loss, g_loss)\n print(msg)\n \n fake_images = self.generator.predict([val_g_input,val_class_labels])\n self.plot_images(fake_images)\n \n def plot_images(self, images): \n grid_row = 1\n grid_col = 10\n f, axarr = plt.subplots(grid_row, grid_col, figsize=(grid_col*1.5, grid_row*1.5))\n for col in range(grid_col):\n axarr[col].imshow((images[col,:,:,0]+1)/2, cmap='gray')\n axarr[col].axis('off') \n plt.show()\n \n def sample_images(self, class_labels):\n z = tf.random.normal((len(class_labels), self.z_dim))\n images = self.generator.predict([z,class_labels])\n self.plot_images(images)\n return images\n \n", "_____no_output_____" ], [ "cdcgan = cDCGAN(image_shape)", "_____no_output_____" ], [ "tf.keras.utils.plot_model(cdcgan.discriminator, show_shapes=True)", "_____no_output_____" ], [ "tf.keras.utils.plot_model(cdcgan.generator, show_shapes=True)", "_____no_output_____" ], [ "cdcgan.train(iter(ds_train), batch_size, 2000, 200)", "Step 0: discriminator_loss 0.6841 g_loss 0.6252\n" ], [ "for i in range(5):\n images = cdcgan.sample_images(np.array([0,1,2,3,4,5,6,7,8,9]))\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0ba781ba01e66a23608dff7d9a013bc7a07ccab
15,669
ipynb
Jupyter Notebook
docs/mindspore/programming_guide/source_zh_cn/initializer.ipynb
bwcsswcx/docs
e54b179bb8ca020a9bf0c83926822048057e9536
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
docs/mindspore/programming_guide/source_zh_cn/initializer.ipynb
bwcsswcx/docs
e54b179bb8ca020a9bf0c83926822048057e9536
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
docs/mindspore/programming_guide/source_zh_cn/initializer.ipynb
bwcsswcx/docs
e54b179bb8ca020a9bf0c83926822048057e9536
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
32.307216
826
0.52856
[ [ [ "# 网络参数的初始化\n\n[![](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_source.png)](https://gitee.com/mindspore/docs/blob/master/docs/mindspore/programming_guide/source_zh_cn/initializer.ipynb)&emsp;[![](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_notebook.png)](https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/master/programming_guide/zh_cn/mindspore_initializer.ipynb)&emsp;[![](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_modelarts.png)](https://authoring-modelarts-cnnorth4.huaweicloud.com/console/lab?share-url-b64=aHR0cHM6Ly9vYnMuZHVhbHN0YWNrLmNuLW5vcnRoLTQubXlodWF3ZWljbG91ZC5jb20vbWluZHNwb3JlLXdlYnNpdGUvbm90ZWJvb2svbW9kZWxhcnRzL3Byb2dyYW1taW5nX2d1aWRlL21pbmRzcG9yZV9pbml0aWFsaXplci5pcHluYg==&imageid=65f636a0-56cf-49df-b941-7d2a07ba8c8c)", "_____no_output_____" ], [ "## 概述\n\nMindSpore提供了权重初始化模块,用户可以通过封装算子和initializer方法来调用字符串、Initializer子类或自定义Tensor等方式完成对网络参数进行初始化。Initializer类是MindSpore中用于进行初始化的基本数据结构,其子类包含了几种不同类型的数据分布(Zero,One,XavierUniform,HeUniform,HeNormal,Constant,Uniform,Normal,TruncatedNormal)。下面针对封装算子和initializer方法两种参数初始化模式进行详细介绍。", "_____no_output_____" ], [ "## 使用封装算子对参数初始化 \nMindSpore提供了多种参数初始化的方式,并在部分算子中封装了参数初始化的功能。本节将介绍带有参数初始化功能的算子对参数进行初始化的方法,以`Conv2d`算子为例,分别介绍以字符串,`Initializer`子类和自定义`Tensor`等方式对网络中的参数进行初始化,以下代码示例中均以`Initializer`的子类`Normal`为例,代码示例中`Normal`均可替换成`Initializer`子类中任何一个。", "_____no_output_____" ], [ "### 字符串 \n使用字符串对网络参数进行初始化,字符串的内容需要与`Initializer`子类的名称保持一致,使用字符串方式进行初始化将使用`Initializer`子类中的默认参数,例如使用字符串`Normal`等同于使用`Initializer`的子类`Normal()`,代码样例如下:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common import set_seed\n\nset_seed(1)\n\ninput_data = Tensor(np.ones([1, 3, 16, 50], dtype=np.float32))\nnet = nn.Conv2d(3, 64, 3, weight_init='Normal')\noutput = net(input_data)\nprint(output)", "[[[[ 3.10382620e-02 4.38603461e-02 4.38603461e-02 ... 4.38603461e-02\n 4.38603461e-02 1.38719045e-02]\n [ 3.26051228e-02 3.54298912e-02 3.54298912e-02 ... 3.54298912e-02\n 3.54298912e-02 -5.54019120e-03]\n [ 3.26051228e-02 3.54298912e-02 3.54298912e-02 ... 3.54298912e-02\n 3.54298912e-02 -5.54019120e-03]\n ...\n [ 3.26051228e-02 3.54298912e-02 3.54298912e-02 ... 3.54298912e-02\n 3.54298912e-02 -5.54019120e-03]\n [ 3.26051228e-02 3.54298912e-02 3.54298912e-02 ... 3.54298912e-02\n 3.54298912e-02 -5.54019120e-03]\n [ 9.66199022e-03 1.24104535e-02 1.24104535e-02 ... 1.24104535e-02\n 1.24104535e-02 -1.38977719e-02]]\n\n ...\n\n [[ 3.98553275e-02 -1.35465711e-03 -1.35465711e-03 ... -1.35465711e-03\n -1.35465711e-03 -1.00310734e-02]\n [ 4.38403059e-03 -3.60766202e-02 -3.60766202e-02 ... -3.60766202e-02\n -3.60766202e-02 -2.95619294e-02]\n [ 4.38403059e-03 -3.60766202e-02 -3.60766202e-02 ... -3.60766202e-02\n -3.60766202e-02 -2.95619294e-02]\n ...\n [ 4.38403059e-03 -3.60766202e-02 -3.60766202e-02 ... -3.60766202e-02\n -3.60766202e-02 -2.95619294e-02]\n [ 4.38403059e-03 -3.60766202e-02 -3.60766202e-02 ... -3.60766202e-02\n -3.60766202e-02 -2.95619294e-02]\n [ 1.33139016e-02 6.74417242e-05 6.74417242e-05 ... 6.74417242e-05\n 6.74417242e-05 -2.27325838e-02]]]]\n" ] ], [ [ "### Initializer子类 \n使用`Initializer`子类对网络参数进行初始化,与使用字符串对参数进行初始化的效果类似,不同的是使用字符串进行参数初始化是使用`Initializer`子类的默认参数,如要使用`Initializer`子类中的参数,就必须使用`Initializer`子类的方式对参数进行初始化,以`Normal(0.2)`为例,代码样例如下:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common import set_seed\nfrom mindspore.common.initializer import Normal\n\nset_seed(1)\n\ninput_data = Tensor(np.ones([1, 3, 16, 50], dtype=np.float32))\nnet = nn.Conv2d(3, 64, 3, weight_init=Normal(0.2))\noutput = net(input_data)\nprint(output)", "[[[[ 6.2076533e-01 8.7720710e-01 8.7720710e-01 ... 8.7720710e-01\n 8.7720710e-01 2.7743810e-01]\n [ 6.5210247e-01 7.0859784e-01 7.0859784e-01 ... 7.0859784e-01\n 7.0859784e-01 -1.1080378e-01]\n [ 6.5210247e-01 7.0859784e-01 7.0859784e-01 ... 7.0859784e-01\n 7.0859784e-01 -1.1080378e-01]\n ...\n [ 6.5210247e-01 7.0859784e-01 7.0859784e-01 ... 7.0859784e-01\n 7.0859784e-01 -1.1080378e-01]\n [ 6.5210247e-01 7.0859784e-01 7.0859784e-01 ... 7.0859784e-01\n 7.0859784e-01 -1.1080378e-01]\n [ 1.9323981e-01 2.4820906e-01 2.4820906e-01 ... 2.4820906e-01\n 2.4820906e-01 -2.7795550e-01]]\n\n ...\n\n [[ 7.9710668e-01 -2.7093157e-02 -2.7093157e-02 ... -2.7093157e-02\n -2.7093157e-02 -2.0062150e-01]\n [ 8.7680638e-02 -7.2153252e-01 -7.2153252e-01 ... -7.2153252e-01\n -7.2153252e-01 -5.9123868e-01]\n [ 8.7680638e-02 -7.2153252e-01 -7.2153252e-01 ... -7.2153252e-01\n -7.2153252e-01 -5.9123868e-01]\n ...\n [ 8.7680638e-02 -7.2153252e-01 -7.2153252e-01 ... -7.2153252e-01\n -7.2153252e-01 -5.9123868e-01]\n [ 8.7680638e-02 -7.2153252e-01 -7.2153252e-01 ... -7.2153252e-01\n -7.2153252e-01 -5.9123868e-01]\n [ 2.6627803e-01 1.3488382e-03 1.3488382e-03 ... 1.3488382e-03\n 1.3488382e-03 -4.5465171e-01]]]]\n" ] ], [ [ "### 自定义的Tensor \n除上述两种初始化方法外,当网络要使用MindSpore中没有的数据类型对参数进行初始化,用户可以通过自定义`Tensor`的方式来对参数进行初始化,代码样例如下:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore import dtype as mstype\n\nweight = Tensor(np.ones([64, 3, 3, 3]), dtype=mstype.float32)\ninput_data = Tensor(np.ones([1, 3, 16, 50], dtype=np.float32))\nnet = nn.Conv2d(3, 64, 3, weight_init=weight)\noutput = net(input_data)\nprint(output)", "[[[[12. 18. 18. ... 18. 18. 12.]\n [18. 27. 27. ... 27. 27. 18.]\n [18. 27. 27. ... 27. 27. 18.]\n ...\n [18. 27. 27. ... 27. 27. 18.]\n [18. 27. 27. ... 27. 27. 18.]\n [12. 18. 18. ... 18. 18. 12.]]\n\n ...\n\n [[12. 18. 18. ... 18. 18. 12.]\n [18. 27. 27. ... 27. 27. 18.]\n [18. 27. 27. ... 27. 27. 18.]\n ...\n [18. 27. 27. ... 27. 27. 18.]\n [18. 27. 27. ... 27. 27. 18.]\n [12. 18. 18. ... 18. 18. 12.]]]]\n" ] ], [ [ "## 使用initializer方法对参数初始化\n\n在上述代码样例中,给出了如何在网络中进行参数初始化的方法,如在网络中使用nn层封装`Conv2d`算子,参数`weight_init`作为要初始化的数据类型传入`Conv2d`算子,算子会在初始化时通过调用`Parameter`类,进而调用封装在`Parameter`类中的`initializer`方法来完成对参数的初始化。然而有一些算子并没有像`Conv2d`那样在内部对参数初始化的功能进行封装,如`Conv3d`算子的权重就是作为参数传入`Conv3d`算子,此时就需要手动的定义权重的初始化。\n\n当对参数进行初始化时,可以使用`initializer`方法调用`Initializer`子类中不同的数据类型来对参数进行初始化,进而产生不同类型的数据。\n\n使用initializer进行参数初始化时,支持传入的参数有`init`、`shape`、`dtype`:\n\n- `init`:支持传入`Tensor`、 `str`、 `Initializer的子类`。\n\n- `shape`:支持传入`list`、 `tuple`、 `int`。\n\n- `dtype`:支持传入`mindspore.dtype`。", "_____no_output_____" ], [ "### init参数为Tensor", "_____no_output_____" ], [ "代码样例如下:", "_____no_output_____" ], [ "```python\nimport numpy as np\nfrom mindspore import Tensor\nfrom mindspore import dtype as mstype\nfrom mindspore.common import set_seed\nfrom mindspore.common.initializer import initializer\nfrom mindspore.ops.operations import nn_ops as nps\n\nset_seed(1)\n\ninput_data = Tensor(np.ones([16, 3, 10, 32, 32]), dtype=mstype.float32)\nweight_init = Tensor(np.ones([32, 3, 4, 3, 3]), dtype=mstype.float32)\nweight = initializer(weight_init, shape=[32, 3, 4, 3, 3])\nconv3d = nps.Conv3D(out_channel=32, kernel_size=(4, 3, 3))\noutput = conv3d(input_data, weight)\nprint(output)\n```", "_____no_output_____" ], [ "输出如下:\n\n```text\n[[[[[108 108 108 ... 108 108 108]\n [108 108 108 ... 108 108 108]\n [108 108 108 ... 108 108 108]\n ...\n [108 108 108 ... 108 108 108]\n [108 108 108 ... 108 108 108]\n [108 108 108 ... 108 108 108]]\n ...\n [[108 108 108 ... 108 108 108]\n [108 108 108 ... 108 108 108]\n [108 108 108 ... 108 108 108]\n ...\n [108 108 108 ... 108 108 108]\n [108 108 108 ... 108 108 108]\n [108 108 108 ... 108 108 108]]]]]\n```", "_____no_output_____" ], [ "### init参数为str", "_____no_output_____" ], [ "代码样例如下:", "_____no_output_____" ], [ "```python\nimport numpy as np\nfrom mindspore import Tensor\nfrom mindspore import dtype as mstype\nfrom mindspore.common import set_seed\nfrom mindspore.common.initializer import initializer\nfrom mindspore.ops.operations import nn_ops as nps\n\nset_seed(1)\n\ninput_data = Tensor(np.ones([16, 3, 10, 32, 32]), dtype=mstype.float32)\nweight = initializer('Normal', shape=[32, 3, 4, 3, 3], dtype=mstype.float32)\nconv3d = nps.Conv3D(out_channel=32, kernel_size=(4, 3, 3))\noutput = conv3d(input_data, weight)\nprint(output)\n```", "_____no_output_____" ], [ "输出如下:\n\n```text\n[[[[[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n ...\n [[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]]]]\n```", "_____no_output_____" ], [ "### init参数为Initializer子类", "_____no_output_____" ], [ "代码样例如下:", "_____no_output_____" ], [ "```python\nimport numpy as np\nfrom mindspore import Tensor\nfrom mindspore import dtype as mstype\nfrom mindspore.common import set_seed\nfrom mindspore.ops.operations import nn_ops as nps\nfrom mindspore.common.initializer import Normal, initializer\n\nset_seed(1)\n\ninput_data = Tensor(np.ones([16, 3, 10, 32, 32]), dtype=mstype.float32)\nweight = initializer(Normal(0.2), shape=[32, 3, 4, 3, 3], dtype=mstype.float32)\nconv3d = nps.Conv3D(out_channel=32, kernel_size=(4, 3, 3))\noutput = conv3d(input_data, weight)\nprint(output)\n```", "_____no_output_____" ], [ "```text\n[[[[[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n ...\n [[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]]]]\n```", "_____no_output_____" ], [ "### 在Parameter中的应用", "_____no_output_____" ], [ "代码样例如下:", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom mindspore import dtype as mstype\nfrom mindspore.common import set_seed\nfrom mindspore.ops import operations as ops\nfrom mindspore import Tensor, Parameter, context\nfrom mindspore.common.initializer import Normal, initializer\n\nset_seed(1)\n\nweight1 = Parameter(initializer('Normal', [5, 4], mstype.float32), name=\"w1\")\nweight2 = Parameter(initializer(Normal(0.2), [5, 4], mstype.float32), name=\"w2\")\ninput_data = Tensor(np.arange(20).reshape(5, 4), dtype=mstype.float32)\nnet = ops.Add()\noutput = net(input_data, weight1)\noutput = net(output, weight2)\nprint(output)", "[[-0.3305102 1.0412874 2.0412874 3.0412874]\n [ 4.0412874 4.9479127 5.9479127 6.9479127]\n [ 7.947912 9.063009 10.063009 11.063009 ]\n [12.063009 13.536987 14.536987 14.857441 ]\n [15.751231 17.073082 17.808317 19.364822 ]]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
d0ba82c66bcc31e23e0a85f5c8f33018aca1a820
264,186
ipynb
Jupyter Notebook
Titanic.ipynb
sanchobuendia/Titanic
d00e6f5cc0549ddb6387fe4a2f2b9bfebc3f2ce4
[ "MIT" ]
null
null
null
Titanic.ipynb
sanchobuendia/Titanic
d00e6f5cc0549ddb6387fe4a2f2b9bfebc3f2ce4
[ "MIT" ]
null
null
null
Titanic.ipynb
sanchobuendia/Titanic
d00e6f5cc0549ddb6387fe4a2f2b9bfebc3f2ce4
[ "MIT" ]
null
null
null
107.174848
100,104
0.798547
[ [ [ "import numpy as np\nimport pandas as pd\n \n# load the contents of a file into a pandas Dataframe\ninput_file = '/Users/aurelianosancho/Google Drive/Pre_Processing/train.csv'\ndf_titanic = pd.read_csv(input_file)", "_____no_output_____" ] ], [ [ "$\\textbf{NOTE}$ Although it is not demonstrated in this section, you must ensure that any feature engineering or imputation that is carried out on the training data is also carried out on the test data.", "_____no_output_____" ] ], [ [ "df_titanic.shape", "_____no_output_____" ], [ "df_titanic.columns", "_____no_output_____" ], [ "df_titanic.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 PassengerId 891 non-null int64 \n 1 Survived 891 non-null int64 \n 2 Pclass 891 non-null int64 \n 3 Name 891 non-null object \n 4 Sex 891 non-null object \n 5 Age 714 non-null float64\n 6 SibSp 891 non-null int64 \n 7 Parch 891 non-null int64 \n 8 Ticket 891 non-null object \n 9 Fare 891 non-null float64\n 10 Cabin 204 non-null object \n 11 Embarked 889 non-null object \ndtypes: float64(2), int64(5), object(5)\nmemory usage: 83.7+ KB\n" ], [ "df_titanic.describe()", "_____no_output_____" ], [ "df_titanic.isnull().sum()", "_____no_output_____" ], [ "df_titanic.head()", "_____no_output_____" ], [ "print(df_titanic.index.name)", "None\n" ] ], [ [ "To make the PassengerId attribute the index of the df_titanic dataframe, use the following snippet:", "_____no_output_____" ] ], [ [ "df_titanic.set_index(\"PassengerId\", inplace=True)", "_____no_output_____" ], [ "print(df_titanic.index.name)", "PassengerId\n" ], [ "df_titanic.head()", "_____no_output_____" ], [ "# extract the target attribute into its own dataframe\ndf_titanic_target = df_titanic.loc[:,['Survived']]\n \n# create a dataframe that contains the 10 feature variables\ndf_titanic_features = df_titanic.drop(['Survived'], axis=1)", "_____no_output_____" ], [ "df_titanic_target['Survived'].value_counts()", "_____no_output_____" ], [ "df_titanic_features['Embarked'].value_counts(dropna=False)", "_____no_output_____" ], [ "# histogram of target variable\n%matplotlib inline\nimport matplotlib.pyplot as plt\ndf_titanic_target.hist(figsize=(5,5))", "_____no_output_____" ], [ "df_titanic_features.hist(figsize=(10,10))", "/opt/anaconda3/lib/python3.7/site-packages/pandas/plotting/_matplotlib/tools.py:298: MatplotlibDeprecationWarning: \nThe rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.\n layout[ax.rowNum, ax.colNum] = ax.get_visible()\n/opt/anaconda3/lib/python3.7/site-packages/pandas/plotting/_matplotlib/tools.py:298: MatplotlibDeprecationWarning: \nThe colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.\n layout[ax.rowNum, ax.colNum] = ax.get_visible()\n/opt/anaconda3/lib/python3.7/site-packages/pandas/plotting/_matplotlib/tools.py:304: MatplotlibDeprecationWarning: \nThe rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.\n if not layout[ax.rowNum + 1, ax.colNum]:\n/opt/anaconda3/lib/python3.7/site-packages/pandas/plotting/_matplotlib/tools.py:304: MatplotlibDeprecationWarning: \nThe colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.\n if not layout[ax.rowNum + 1, ax.colNum]:\n" ], [ "# histogram of categorical attribute 'Embarked'\n# computed from the output of the value_counts() function\nvc = df_titanic_features['Embarked'].value_counts(dropna=False)\nvc.plot(kind='bar')", "_____no_output_____" ], [ "# create a box plot of numeric features.\ndf_titanic_features.boxplot(figsize=(10,6)) ", "_____no_output_____" ], [ "# what features show the strongest correlation with the target variable?\ncorr_matrix = df_titanic.corr()\ncorr_matrix['Survived'].sort_values(ascending=False)", "_____no_output_____" ], [ "# visualize relationship between features using a\n# matrix of scatter plots.\nfrom pandas.plotting import scatter_matrix\nscatter_matrix(df_titanic, figsize=(12,12)) ", "/opt/anaconda3/lib/python3.7/site-packages/pandas/plotting/_matplotlib/tools.py:298: MatplotlibDeprecationWarning: \nThe rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.\n layout[ax.rowNum, ax.colNum] = ax.get_visible()\n/opt/anaconda3/lib/python3.7/site-packages/pandas/plotting/_matplotlib/tools.py:298: MatplotlibDeprecationWarning: \nThe colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.\n layout[ax.rowNum, ax.colNum] = ax.get_visible()\n/opt/anaconda3/lib/python3.7/site-packages/pandas/plotting/_matplotlib/tools.py:304: MatplotlibDeprecationWarning: \nThe rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.\n if not layout[ax.rowNum + 1, ax.colNum]:\n/opt/anaconda3/lib/python3.7/site-packages/pandas/plotting/_matplotlib/tools.py:304: MatplotlibDeprecationWarning: \nThe colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.\n if not layout[ax.rowNum + 1, ax.colNum]:\n" ], [ "df_titanic_features.boxplot(column='Age', figsize=(7,7))", "_____no_output_____" ], [ "# fill missing values with the median\nmedian_age = df_titanic_features['Age'].median()\nprint (median_age)\n28.0\n \ndf_titanic_features[\"Age\"].fillna(median_age, inplace=True) ", "28.0\n" ], [ "# fill missing values of the Embarked attribute\n# with the most common value in the column\n\nembarked_value_counts = df_titanic_features['Embarked'].value_counts(dropna=True)\nmost_common_value = embarked_value_counts.index[0]\n \nprint (most_common_value)\n \ndf_titanic_features[\"Embarked\"].fillna(most_common_value, inplace=True) ", "S\n" ], [ "# create a boolean feature 'CabinIsKnown'\n# which will have True if the Cabin column\n# does not have missing data\ndf_titanic_features['CabinIsKnown'] = ~df_titanic_features.Cabin.isnull()\n \n# drop the Cabin column from the dataframe\ndf_titanic_features.drop(['Cabin'], axis=1, inplace=True)", "_____no_output_____" ], [ "# display the columns of the dataframe.\nprint (df_titanic_features.columns.values)\n \n# display number of missing values in the columns\ndf_titanic_features.isnull().sum()", "['Pclass' 'Name' 'Sex' 'Age' 'SibSp' 'Parch' 'Ticket' 'Fare' 'Embarked'\n 'CabinIsKnown']\n" ], [ "# create a numeric feature called FamilySize that is\n# the sum of the SibSp and Parch features.\ndf_titanic_features['FamilySize'] = df_titanic_features.SibSp + df_titanic_features.Parch", "_____no_output_____" ], [ "# generate new categorical feature AgeCategory\nbins_age = [0,20,30,40,50,150]\nlabels_age = ['<20','20-30','30-40','40-50','>50']\n \ndf_titanic_features['AgeCategory'] = pd.cut(df_titanic_features.Age,\n bins=bins_age,\n labels=labels_age,\n include_lowest=True)", "_____no_output_____" ], [ "df_titanic_features.head()", "_____no_output_____" ], [ "# generate new categorical feature FareCategory\ndf_titanic_features['FareCategory'] = pd.qcut(df_titanic_features.Fare,\n q=4,\n labels=['Q1', 'Q2', 'Q3', 'Q4']) ", "_____no_output_____" ], [ "# use one-hot encoding to convert categorical attributes\n# into binary numeric attributes\ndf_titanic_features = pd.get_dummies(df_titanic_features, columns=['Sex','Embarked','CabinIsKnown','AgeCategory','FareCategory'])\n \n# display the columns of the dataframe.\nprint (df_titanic_features.columns.values)", "['Pclass' 'Name' 'Age' 'SibSp' 'Parch' 'Ticket' 'Fare' 'FamilySize'\n 'Sex_female' 'Sex_male' 'Embarked_C' 'Embarked_Q' 'Embarked_S'\n 'CabinIsKnown_False' 'CabinIsKnown_True' 'AgeCategory_<20'\n 'AgeCategory_20-30' 'AgeCategory_30-40' 'AgeCategory_40-50'\n 'AgeCategory_>50' 'FareCategory_Q1' 'FareCategory_Q2' 'FareCategory_Q3'\n 'FareCategory_Q4']\n" ], [ "df_titanic_features.head()", "_____no_output_____" ], [ "# strong negative correlation between Sex_male and Sex_female.\n# one of these can be dropped.\ncorr_matrix = df_titanic_features[['Sex_male', 'Sex_female']].corr()\nprint(corr_matrix)", " Sex_male Sex_female\nSex_male 1.0 -1.0\nSex_female -1.0 1.0\n" ], [ "# drop the Name, Ticket, Sex_female, CabinIsKnown_False features\n# to get a dataframe that can be used for linear or logistic regression\ndf_titanic_features_numeric = df_titanic_features.drop(['Name', 'Ticket', 'Sex_female', 'CabinIsKnown_False'], axis=1)", "_____no_output_____" ], [ "df_titanic_features_numeric.head()", "_____no_output_____" ], [ "df_titanic_features_numeric.shape", "_____no_output_____" ], [ "####################### pre-processing Test #######################", "_____no_output_____" ], [ "input_file = '/Users/aurelianosancho/Google Drive/Pre_Processing/train.csv'\ndf_titanic_test = pd.read_csv(input_file)\ndf_titanic_test.set_index(\"PassengerId\", inplace=True)\n\ndf_titanic_test_target = df_titanic_test.loc[:,['Survived']]\ndf_titanic_test_features = df_titanic_test.drop(['Survived'], axis=1)\n\nmedian_age = df_titanic_test_features['Age'].median()\ndf_titanic_test_features[\"Age\"].fillna(median_age, inplace=True)\n\nembarked_value_counts = df_titanic_test_features['Embarked'].value_counts(dropna=True)\nmost_common_value = embarked_value_counts.index[0]\ndf_titanic_test_features[\"Embarked\"].fillna(most_common_value, inplace=True) \n\ndf_titanic_test_features['CabinIsKnown'] = ~df_titanic_test_features.Cabin.isnull()\ndf_titanic_test_features.drop(['Cabin'], axis=1, inplace=True)\n\ndf_titanic_test_features['FamilySize'] = df_titanic_test_features.SibSp + df_titanic_test_features.Parch\n\nbins_age = [0,20,30,40,50,150]\nlabels_age = ['<20','20-30','30-40','40-50','>50']\n \ndf_titanic_test_features['AgeCategory'] = pd.cut(df_titanic_test_features.Age,\n bins=bins_age,\n labels=labels_age,\n include_lowest=True)\n\ndf_titanic_test_features['FareCategory'] = pd.qcut(df_titanic_test_features.Fare,\n q=4,\n labels=['Q1', 'Q2', 'Q3', 'Q4']) \n\n\ndf_titanic_test_features = pd.get_dummies(df_titanic_test_features, columns=['Sex','Embarked','CabinIsKnown','AgeCategory','FareCategory'])\n \ndf_titanic_test_features_numeric = df_titanic_test_features.drop(['Name', 'Ticket', 'Sex_female', 'CabinIsKnown_False'], axis=1)", "_____no_output_____" ] ], [ [ "* titanic_features_train = df_titanic_features_numeric\n\n* titanic_features_test = df_titanic_test_features_numeric\n\n* titanic_target_train = df_titanic_test_target\n\n* titanic_target_test = df_titanic_target", "_____no_output_____" ] ], [ [ "df_titanic_test_features_numeric.shape", "_____no_output_____" ], [ "titanic_features_train = df_titanic_features_numeric\ntitanic_features_test = df_titanic_test_features_numeric\ntitanic_target_train = df_titanic_target\ntitanic_target_test = df_titanic_test_target ", "_____no_output_____" ], [ "from sklearn.svm import SVC\nsvc_model = SVC(kernel='rbf', C=1, gamma='auto', probability=True)\nsvc_model.fit(titanic_features_train, titanic_target_train.values.ravel())\n \n# train a logistic regression model on the diabetes dataset\nfrom sklearn.linear_model import LogisticRegression\nlogit_model = LogisticRegression(penalty='l2', fit_intercept=True, solver='liblinear')\nlogit_model.fit(titanic_features_train, titanic_target_train.values.ravel())\n \n# train a decision tree based binary classifier.\nfrom sklearn.tree import DecisionTreeClassifier\n \ndtree_model = DecisionTreeClassifier(max_depth=4)\ndtree_model.fit(titanic_features_train, titanic_target_train.values.ravel())\n \n# use the models to create predictions on the diabetes test set\nsvc_predictions = svc_model.predict(titanic_features_test)\nlogit_predictions = logit_model.predict(titanic_features_test)\ndtree_predictions = dtree_model.predict(titanic_features_test)\n \n# simplistic metric - the percentage of correct predictions\nsvc_correct = svc_predictions == titanic_target_test.values.ravel()\nsvc_correct_percent = np.count_nonzero(svc_correct) / svc_predictions.size * 100\n\nlogit_correct = logit_predictions == titanic_target_test.values.ravel()\nlogit_correct_percent = np.count_nonzero(logit_correct) / logit_predictions.size * 100\n \ndtree_correct = dtree_predictions == titanic_target_test.values.ravel()\ndtree_correct_percent = np.count_nonzero(dtree_correct) / dtree_predictions.size * 100", "_____no_output_____" ], [ "print ('SVC', svc_correct_percent, 'Logistic Regression', logit_correct_percent, 'DecisionTree', dtree_correct_percent)\n", "SVC 84.73625140291807 Logistic Regression 81.03254769921436 DecisionTree 83.61391694725027\n" ], [ "from sklearn.metrics import confusion_matrix\ncm_svc = confusion_matrix(titanic_target_test.values.ravel(), svc_predictions)\ncm_logit = confusion_matrix(titanic_target_test.values.ravel(), logit_predictions)\ncm_dtree = confusion_matrix(titanic_target_test.values.ravel(), dtree_predictions)", "_____no_output_____" ], [ "cm_svc", "_____no_output_____" ], [ "cm_logit", "_____no_output_____" ], [ "cm_dtree", "_____no_output_____" ], [ "tn_svc, fp_svc, fn_svc, tp_svc = cm_svc.ravel()\ntn_logit, fp_logit, fn_logit, tp_logit = cm_logit.ravel()\ntn_dtree, fp_dtree, fn_dtree, tp_dtree = cm_dtree.ravel()", "_____no_output_____" ], [ "print (tn_svc, fp_svc, fn_svc, tp_svc)\n\nprint (tn_logit, fp_logit, fn_logit, tp_logit)\n\nprint (tn_dtree, fp_dtree, fn_dtree, tp_dtree)", "513 36 100 242\n472 77 92 250\n499 50 96 246\n" ], [ "accuracy_svc = (tp_svc + tn_svc) / (tn_svc + fp_svc + fn_svc + tp_svc)\naccuracy_logit = (tp_logit + tn_logit) / (tn_logit + fp_logit + fn_logit + tp_logit)\naccuracy_dtree = (tp_dtree + tn_dtree) / (tn_dtree + fp_dtree + fn_dtree + tp_dtree)\n \nprecision_svc = tp_svc / (tp_svc + fp_svc)\nprecision_logit = tp_logit / (tp_logit + fp_logit)\nprecision_dtree = tp_dtree / (tp_dtree + fp_dtree)\n \nrecall_svc = tp_svc / (tp_svc + fn_svc)\nrecall_logit = tp_logit / (tp_svc + fn_logit)\nrecall_dtree = tp_dtree / (tp_dtree + fn_dtree)", "_____no_output_____" ], [ "print('Accuracy SVC:',accuracy_svc, 'Accuracy REG:', accuracy_logit, 'Accuracy DTREE:', accuracy_dtree)\n \nprint('Precision SVC:',precision_svc, 'Precision REG:',precision_logit, 'Precision DTREE:',precision_dtree)\n \nprint('Recall SVC:', recall_svc, 'Recall REG:',recall_logit, 'Recall DTREE:',recall_dtree)", "Accuracy SVC: 0.8473625140291807 Accuracy REG: 0.8103254769921436 Accuracy DTREE: 0.8361391694725028\nPrecision SVC: 0.8705035971223022 Precision REG: 0.764525993883792 Precision DTREE: 0.831081081081081\nRecall SVC: 0.7076023391812866 Recall REG: 0.7485029940119761 Recall DTREE: 0.7192982456140351\n" ], [ "# plot ROC curves for the three classifiers.\n \n# compute prediction probabilities\nsvc_probabilities = svc_model.predict_proba(titanic_features_test)\nlogit_probabilities = logit_model.predict_proba(titanic_features_test)\ndtree_probabilities = dtree_model.predict_proba(titanic_features_test)\n \n# calculate the FPR and TPR for all thresholds of the SVC model\nimport sklearn.metrics as metrics\nsvc_fpr, svc_tpr, svc_thresholds = metrics.roc_curve(titanic_target_test.values.ravel(),\n svc_probabilities[:,1],\n pos_label=1,\n drop_intermediate=False)\n\n\nlogit_fpr, logit_tpr, logit_thresholds = metrics.roc_curve(titanic_target_test.values.ravel(),\n logit_probabilities[:,1],pos_label=1,\n drop_intermediate=False)\n \n# calculate the FPR and TPR for all thresholds of the decision tree model\ndtree_fpr, dtree_tpr, dtree_thresholds = metrics.roc_curve(titanic_target_test.values.ravel(),\n dtree_probabilities[:,1],pos_label=1,\n drop_intermediate=False)\n \n \n \nfig, axes = plt.subplots(1, 3, figsize=(18,6))\n \naxes[0].set_title('ROC curve: SVC model')\naxes[0].set_xlabel(\"True Positive Rate\")\naxes[0].set_ylabel(\"False Positive Rate\")\naxes[0].plot(svc_fpr, svc_tpr)\naxes[0].axhline(y=0, color='k')\naxes[0].axvline(x=0, color='k')\n \naxes[1].set_title('ROC curve: Logit model')\n\naxes[1].set_xlabel(\"True Positive Rate\")\naxes[1].set_ylabel(\"False Positive Rate\")\naxes[1].plot(logit_fpr, logit_tpr)\naxes[1].axhline(y=0, color='k')\naxes[1].axvline(x=0, color='k')\n \naxes[2].set_title('ROC curve: Tree model')\naxes[2].set_xlabel(\"True Positive Rate\")\naxes[2].set_ylabel(\"False Positive Rate\")\naxes[2].plot(dtree_fpr, dtree_tpr)\naxes[2].axhline(y=0, color='k')\naxes[2].axvline(x=0, color='k')", "_____no_output_____" ], [ "svc_auc = metrics.auc(svc_fpr, svc_tpr)\nlogit_auc = metrics.auc(logit_fpr, logit_tpr)\ndtree_auc = metrics.auc(dtree_fpr, dtree_tpr)", "_____no_output_____" ], [ "print (svc_auc, logit_auc, dtree_auc)", "0.9137373640537287 0.8673744926980475 0.8772728725274022\n" ] ], [ [ "The following code snippet uses the GridSearchCV class to try different hyperparameter \ncombinations for a multi-class decision tree classifier on the Iris flowers dataset and returns the hyperparameters that result in the best precision score:", "_____no_output_____" ] ], [ [ "# use grid search to find the hyperparameters that result\n# in the best accuracy score for a decision tree\n# based classifier on the Iris Flowers dataset\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.tree import DecisionTreeClassifier\n \ngrid_params = {\n 'criterion': ['gini', 'entropy'],\n 'splitter': ['best', 'random'],\n 'max_depth': [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],\n 'min_samples_split': [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],\n 'max_features': ['auto', 'sqrt', 'log2'],\n 'presort': [True, False]\n}\n \ngrid_search = GridSearchCV(estimator=DecisionTreeClassifier(),\n param_grid=grid_params, scoring='accuracy',\n cv=5, n_jobs=-1)\n \ngrid_search.fit(titanic_features_train.values, titanic_target_train)", "/opt/anaconda3/lib/python3.7/site-packages/sklearn/tree/_classes.py:319: FutureWarning: The parameter 'presort' is deprecated and has no effect. It will be removed in v0.24. You can suppress this warning by not passing any value to the 'presort' parameter.\n FutureWarning)\n" ], [ "best_parameters = grid_search.best_params_\nprint(best_parameters)\n \nbest_accuracy = grid_search.best_score_\nprint(best_accuracy)", "{'criterion': 'gini', 'max_depth': 12, 'max_features': 'auto', 'min_samples_split': 12, 'presort': False, 'splitter': 'best'}\n0.8204381394764922\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0baab50c9ff431206c661172376b0046692cdc6
436,356
ipynb
Jupyter Notebook
biobb_REST_API_documentation/html/biobb_REST_API_documentation.web.ipynb
bioexcel/biobb_REST_API_documentation
1850b87e7d9a08cd035467eda9c3fda870379e3a
[ "Apache-2.0" ]
null
null
null
biobb_REST_API_documentation/html/biobb_REST_API_documentation.web.ipynb
bioexcel/biobb_REST_API_documentation
1850b87e7d9a08cd035467eda9c3fda870379e3a
[ "Apache-2.0" ]
null
null
null
biobb_REST_API_documentation/html/biobb_REST_API_documentation.web.ipynb
bioexcel/biobb_REST_API_documentation
1850b87e7d9a08cd035467eda9c3fda870379e3a
[ "Apache-2.0" ]
null
null
null
40.135762
38,750
0.363348
[ [ [ "# The BioBB REST API\n\nThe **[BioBB REST API](https://mmb.irbbarcelona.org/biobb-api)** allows the execution of the **[BioExcel Building Blocks](https://mmb.irbbarcelona.org/biobb/)** in a remote server.\n\n## Documentation\n\nFor an extense documentation section, please go to the **[BioBB REST API website help](https://mmb.irbbarcelona.org/biobb-api/rest)**.\n\n## Settings\n\n### Auxiliar libraries used\n\n* [requests](https://pypi.org/project/requests/): Requests allows you to send *organic, grass-fed* HTTP/1.1 requests, without the need for manual labor.\n* [nb_conda_kernels](https://github.com/Anaconda-Platform/nb_conda_kernels): Enables a Jupyter Notebook or JupyterLab application in one conda environment to access kernels for Python, R, and other languages found in other environments.\n* [nglview](http://nglviewer.org/#nglview): Jupyter/IPython widget to interactively view molecular structures and trajectories in notebooks.\n* [ipywidgets](https://github.com/jupyter-widgets/ipywidgets): Interactive HTML widgets for Jupyter notebooks and the IPython kernel.\n* [plotly](https://plot.ly/python/offline/): Python interactive graphing library integrated in Jupyter notebooks.\n\n### Conda Installation and Launch\n\n```console\ngit clone https://github.com/bioexcel/biobb_REST_API_documentation.git\ncd biobb_REST_API_documentation\nconda env create -f conda_env/environment.yml\nconda activate biobb_REST_API_documentation\njupyter-nbextension enable --py --user widgetsnbextension\njupyter-nbextension enable --py --user nglview\njupyter-notebook biobb_REST_API_documentation/notebooks/biobb_REST_API_documentation.ipynb\n```\n\n***\n\n## Index\n\n * [Behaviour](#behaviour)\n * [Tools information](#tools_info)\n * [List of packages](#list_pckg)\n * [List of tools](#list_tools)\n * [Tool's properties](#tools_prop)\n * [Launch tool](#launch_tool)\n * [Retrieve status](#retrieve_status)\n * [Retrieve data](#retrieve_data)\n * [Sample files](#sample_files)\n * [All sample files](#all_sample)\n * [Package sample files](#pckg_sample)\n * [Tool sample files](#tool_sample)\n * [Single sample file](#sample)\n * [Examples](#examples)\n * [Tools information](#tools_info_ex)\n * [List of packages](#list_pckg_ex)\n * [List of tools from a specific package](#list_tools_ex)\n * [Tool's properties](#tools_prop_ex)\n * [Launch tool](#launch_tool_ex)\n * [Launch job with a YAML file config](#tool_yml_ex)\n * [Launch job with a JSON file config](#tool_json_ex)\n * [Launch job with a piython dictionary config](#tool_dict_ex)\n * [Retrieve status](#retrieve_status_ex)\n * [Retrieve data](#retrieve_data_ex)\n * [Practical cases](#practical_cases)\n * [Example 1: download PDB file from RSCB database](#example1)\n * [Example 2: extract heteroatom from a given structure](#example2)\n * [Example 3: extract energy components from a given GROMACS energy file](#example3)\n\n***\n<img src=\"https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png\" alt=\"Bioexcel2 logo\"\n\ttitle=\"Bioexcel2 logo\" width=\"400\" />\n***\n\n<a id=\"behaviour\"></a>\n## Behaviour\n\nThe **BioBB REST API** works as an asynchronous launcher of jobs, as these jobs can last from a few seconds to several minutes, there are some steps that must be performed for having the complete results of every tool.\n\n**BioExcel Building Blocks** are structured in **[packages and tools](http://mmb.irbbarcelona.org/biobb/availability/source)**. Every call to the **BioBB REST API** executes one single tool and returns the output file(s) related to this specific tool.\n\n<a id=\"tools_info\"></a>\n### Tools information\n\n<a id=\"list_pckg\"></a>\n#### List of packages\n\nIn order to get a complete **list of available packages**, we must do a **GET** request to the following endpoint:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch`\n\nThis endpoint returns a **JSON HTTP response** with status `200`. More information in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest/#/List%20of%20Services/getPckgList).\n\n<a id=\"list_tools\"></a>\n#### List of tools\n\nIf there is need for a **list of tools for a single package**, we must do a **GET** request to the following endpoint:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/{package}`\n\nThis endpoint returns a **JSON HTTP response** with status `200` or a `404` status if the package id is incorrect. More information in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest/#/List%20of%20Services/getToolsList).\n\n<a id=\"tools_prop\"></a>\n#### Tool's properties\n\nIf there is only need for the **information of a single tool**, we must do a **GET** request to the following endpoint:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/{package}/{tool}`\n\nThis endpoint returns a **JSON HTTP response** with status `200` or a `404` status if the package id and / or the tool id are incorrect. The reason for failure should be detailed in the JSON response. More information in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest/#/Launch%20Tool/getLaunchTool).\n\n<a id=\"launch_tool\"></a>\n### Launch tool\n\nFor **launching a tool**, we must do a **POST** request to the following endpoint:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/{package}/{tool}`\n\nIn the body of this POST request, **we must add the file(s) needed as input** (included the properties config file in **JSON** or **YAML** format) and the name for the output(s). The detailed list of inputs and outputs with its respectives properties can be found in the **GET** request of this same endpoint.\n\nThis endpoint returns a **JSON HTTP response** with the following possible status:\n\n* `303`: **The job has been successfully launched** and the user must save the token provided and follow to the next endpoint (defined in the same JSON response)\n* `404`: **There was some error launching the tool.** The reason for failure should be detailed in the JSON response.\n* `500`: The job has been launched, but **some internal server error** has occurred during the execution.\n\nMore information for a generic call in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest/#/Launch%20Tool/postLaunchTool). The documentation for all the tools is available in the [BioBB REST API Tools Documentation section](https://mmb.irbbarcelona.org/biobb-api/tools-documentation?docExpansion=none). Interactive examples for all the tools are available in the [BioBB REST API Tools Execution section](https://mmb.irbbarcelona.org/biobb-api/tools-execution).\n\n<a id=\"retrieve_status\"></a>\n### Retrieve status\n\nIf the previous endpoint returned a `303` status, we must do a **GET** request to the following endpoint providing the given token in the path:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/retrieve/status/{token}`\n\nThis endpoint checks the state of the job and returns a **JSON HTTP response** with the following possible status:\n\n* `200`: **The job has finished successfully** and in the JSON response we can found a list of output files generated by the job with its correspondent id for retrieving them on the next endpoint (defined in the same JSON message).\n* `202`: The job is **still running**.\n* `404`: **Token incorrect, job unexisting or expired.**\n* `500`: Some **internal server error** has occurred during the execution.\n\nMore information in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest/#/Retrieve/getRetrieveStatus).\n\n<a id=\"retrieve_data\"></a>\n### Retrieve data\n\nOnce the previous endpoint returns a `200` status, the output file(s) are ready for its retrieval, so we must do a **GET** request to the following endpoint providing the given **file id** in the path:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/retrieve/data/{id}`\n\nThis endpoint returns the **requested file** with a `200` status or a `404` status if the provided id is incorrect, the file doesn't exist or it has expired. More information in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest/#/Retrieve/getRetrieveData).\n\nNote that if we have executed a job that returns multiple output files, a call to this endpoint must be done **for each of the output files** generated by the job.\n\n<a id=\"sample_files\"></a>\n### Sample files\n\nThe **BioBB REST API** provides sample files for most of the inputs and outputs of each tool. Files can be accessed thought the whole **BioBB REST API** hierarchical range.\n\n<a id=\"all_sample\"></a>\n#### All sample files\n\nIn order to download **all the sample files**, we must do a **GET** request to the following endpoint:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/sample`\n\nThis endpoint returns the **requested file** with a `200` status. More information in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/Sample%20Files/getSample).\n\n<a id=\"pckg_sample\"></a>\n#### Package sample files\n\nIn order to download **all the sample files of a package**, we must do a **GET** request to the following endpoint:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/sample/{package}`\n\nThis endpoint returns the **requested file** with a `200` status or a `404` status if the package id is incorrect. More information in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/Sample%20Files/getPackageSample).\n\n<a id=\"tool_sample\"></a>\n#### Tool sample files\n\nIn order to download **all the sample files of a tool**, we must do a **GET** request to the following endpoint:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/sample/{package}/{tool}`\n\nThis endpoint returns the **requested file** with a `200` status or a `404` status if the package id and / or the tool id are incorrect. The reason for failure should be detailed in the JSON response. More information in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/Sample%20Files/getToolSample).\n\n<a id=\"sample\"></a>\n#### Single sample file\n\nIn order to download **a single sample file**, we must do a **GET** request to the following endpoint:\n\n`https://mmb.irbbarcelona.org/biobb-api/rest/v1/sample/{package}/{tool}/{id}`\n\nThis endpoint returns the **requested file** with a `200` status or a `404` status if the package id and / or the tool id and / or the file id are incorrect. The reason for failure should be detailed in the JSON response. More information in the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/Sample%20Files/getSingleSample).", "_____no_output_____" ], [ "<a id=\"examples\"></a>\n## Examples\n\nBelow we will do **calls to all the previously defined endpoints** and define some **functions** for make easier the connection to the **BioBB REST API** through **Jupyter Notebook**.\n\nFirst off, we will import the Python requests and json library and set the root URI for the **BioBB REST API**.", "_____no_output_____" ] ], [ [ "import requests\nimport json\n\napiURL = \"https://mmb.irbbarcelona.org/biobb-api/rest/v1/\"", "_____no_output_____" ] ], [ [ "<a id=\"tools_info_ex\"></a>\n### Tools information", "_____no_output_____" ], [ "Definition of simple GET / POST request functions and a class Response:", "_____no_output_____" ] ], [ [ "# Class for returning response status and json content of a requested URL\nclass Response:\n def __init__(self, status, json):\n self.status = status\n self.json = json\n\n# Perform GET request\ndef get_data(url):\n r = requests.get(url)\n return Response(r.status_code, json.loads(r.text))\n\n# Perform POST request\ndef post_data(url, d, f):\n r = requests.post(url, data = d, files = f)\n return Response(r.status_code, json.loads(r.text))", "_____no_output_____" ] ], [ [ "<a id=\"list_pckg_ex\"></a>\n#### List of packages\n\nFor more information about this endpoint, please visit the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/List%20of%20Services/getPckgList).\n\n##### Endpoint", "_____no_output_____" ], [ "**GET** `https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch`", "_____no_output_____" ], [ "##### Code", "_____no_output_____" ] ], [ [ "url = apiURL + 'launch'\nresponse = get_data(url)\n\nprint(json.dumps(response.json, indent=2))", "{\n \"packages\": [\n {\n \"id\": \"biobb_analysis\",\n \"tools\": [\n {\n \"id\": \"gmx_cluster\",\n \"description\": \"Creates cluster structures from a given GROMACS compatible trajectory\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_cluster tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_cluster.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_pdb_path\",\n \"required\": true,\n \"description\": \"Path to the output cluster file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_cluster.pdb\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_rms\",\n \"description\": \"Performs a Root Mean Square deviation (RMSd) analysis from a given GROMACS compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_rms tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_rms.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_xvg_path\",\n \"required\": true,\n \"description\": \"Path to the XVG output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_rms.xvg\",\n \"formats\": [\n \".*\\\\.xvg$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_rgyr\",\n \"description\": \"Computes the radius of gyration (Rgyr) of a molecule about the x-, y- and z-axes, as a function of time, from a given GROMACS compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_rgyr tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_rgyr.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_xvg_path\",\n \"required\": true,\n \"description\": \"Path to the XVG output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_rgyr.xvg\",\n \"formats\": [\n \".*\\\\.xvg$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_energy\",\n \"description\": \"Extracts energy components from a given GROMACS energy file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_energy tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_energy.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_energy_path\",\n \"required\": true,\n \"description\": \"Path to the input EDR file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/energy.edr\",\n \"formats\": [\n \".*\\\\.edr$\"\n ]\n },\n {\n \"id\": \"output_xvg_path\",\n \"required\": true,\n \"description\": \"Path to the XVG output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_energy.xvg\",\n \"formats\": [\n \".*\\\\.xvg$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_image\",\n \"description\": \"Corrects periodicity (image) from a given GROMACS compatible trajectory file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_image tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_image.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS input topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_traj_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_image.xtc\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_trjconv_str\",\n \"description\": \"Converts between GROMACS compatible structure file formats and/or extracts a selection of atoms.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_trjconv_str tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_trjconv_str.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS input topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_str_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_trjconv.str.pdb\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_trjconv_str_ens\",\n \"description\": \"Extracts an ensemble of frames containing a selection of atoms from GROMACS compatible trajectory files.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_trjconv_str_ens tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_trjconv_str_ens.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS input topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_str_ens_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_trjconv.str.ens.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_trjconv_trj\",\n \"description\": \"Converts between GROMACS compatible trajectory file formats and/or extracts a selection of atoms.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_trjconv_trj tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_trjconv_trj.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_traj_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_trjconv.trj.xtc\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_average\",\n \"description\": \"Calculates a structure average of a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_average tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_average.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed structure\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.average.pdb\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_bfactor\",\n \"description\": \"Calculates the Bfactor fluctuations of a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_bfactor tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"input_exp_path\",\n \"required\": false,\n \"description\": \"Path to the experimental reference file (required if reference = experimental)\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/experimental.1e5t.pdb\",\n \"formats\": null\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed analysis\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.bfactor.first.dat\",\n \"formats\": [\n \".*\\\\.dat$\",\n \".*\\\\.agr$\",\n \".*\\\\.xmgr$\",\n \".*\\\\.gnu$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_rms\",\n \"description\": \"Calculates the Root Mean Square deviation (RMSd) of a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_rms tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"input_exp_path\",\n \"required\": false,\n \"description\": \"Path to the experimental reference file (required if reference = experimental)\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/experimental.1e5t.pdb\",\n \"formats\": null\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed analysis\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.rms.first.dat\",\n \"formats\": [\n \".*\\\\.dat$\",\n \".*\\\\.agr$\",\n \".*\\\\.xmgr$\",\n \".*\\\\.gnu$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_rmsf\",\n \"description\": \"Calculates the Root Mean Square fluctuations (RMSf) of a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_rmsf tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"input_exp_path\",\n \"required\": false,\n \"description\": \"Path to the experimental reference file (required if reference = experimental)\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/experimental.1e5t.pdb\",\n \"formats\": null\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed analysis\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.rmsf.first.dat\",\n \"formats\": [\n \".*\\\\.dat$\",\n \".*\\\\.agr$\",\n \".*\\\\.xmgr$\",\n \".*\\\\.gnu$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_rgyr\",\n \"description\": \"Computes the radius of gyration (Rgyr) from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_rgyr tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_rgyr.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output analysis\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.rgyr.dat\",\n \"formats\": [\n \".*\\\\.dat$\",\n \".*\\\\.agr$\",\n \".*\\\\.xmgr$\",\n \".*\\\\.gnu$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_dry\",\n \"description\": \"Dehydrates a given cpptraj compatible trajectory stripping out solvent molecules and ions.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_dry tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_dry.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.dry.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_strip\",\n \"description\": \"Strips a defined set of atoms (mask) from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_strip tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_strip.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.strip.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_snapshot\",\n \"description\": \"Extracts a particular snapshot from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_snapshot tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_snapshot.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed structure\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.snapshot.pdb\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_slice\",\n \"description\": \"Extracts a particular trajectory slice from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_slice tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_slice.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.slice.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_convert\",\n \"description\": \"Converts between cpptraj compatible trajectory file formats and/or extracts a selection of atoms or frames.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_convert tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_convert.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.convert.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_mask\",\n \"description\": \"Extracts a selection of atoms from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_mask tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_mask.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.mask.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_image\",\n \"description\": \"Corrects periodicity (image) from a given cpptraj trajectory file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_image tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_image.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.image.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n }\n ]\n },\n {\n \"id\": \"biobb_chemistry\",\n \"tools\": [\n {\n \"id\": \"acpype_params_ac\",\n \"description\": \"Small molecule parameterization for AMBER MD package.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the acpype_params_ac tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_chemistry/master/biobb_chemistry/test/data/config/config_acpype_params_ac.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/acpype/acpype.params.mol2\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.mdl$\",\n \".*\\\\.mol2$\"\n ]\n },\n {\n \"id\": \"output_path_frcmod\",\n \"required\": true,\n \"description\": \"Path to the FRCMOD output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.ac.frcmod\",\n \"formats\": [\n \".*\\\\.frcmod$\"\n ]\n },\n {\n \"id\": \"output_path_inpcrd\",\n \"required\": true,\n \"description\": \"Path to the INPCRD output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.ac.inpcrd\",\n \"formats\": [\n \".*\\\\.inpcrd$\"\n ]\n },\n {\n \"id\": \"output_path_lib\",\n \"required\": true,\n \"description\": \"Path to the LIB output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.ac.lib\",\n \"formats\": [\n \".*\\\\.lib$\"\n ]\n },\n {\n \"id\": \"output_path_prmtop\",\n \"required\": true,\n \"description\": \"Path to the PRMTOP output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.ac.prmtop\",\n \"formats\": [\n \".*\\\\.prmtop$\"\n ]\n }\n ]\n },\n {\n \"id\": \"acpype_params_cns\",\n \"description\": \"Small molecule parameterization for CNS/XPLOR MD package.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the acpype_params_cns tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_chemistry/master/biobb_chemistry/test/data/config/config_acpype_params_cns.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/acpype/acpype.params.mol2\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.mdl$\",\n \".*\\\\.mol2$\"\n ]\n },\n {\n \"id\": \"output_path_par\",\n \"required\": true,\n \"description\": \"Path to the PAR output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.cns.par\",\n \"formats\": [\n \".*\\\\.par$\"\n ]\n },\n {\n \"id\": \"output_path_inp\",\n \"required\": true,\n \"description\": \"Path to the INP output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.cns.inp\",\n \"formats\": [\n \".*\\\\.inp$\"\n ]\n },\n {\n \"id\": \"output_path_top\",\n \"required\": true,\n \"description\": \"Path to the TOP output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.cns.top\",\n \"formats\": [\n \".*\\\\.top$\"\n ]\n }\n ]\n },\n {\n \"id\": \"acpype_params_gmx\",\n \"description\": \"Small molecule parameterization for GROMACS MD package.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the acpype_params_gmx tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_chemistry/master/biobb_chemistry/test/data/config/config_acpype_params_gmx.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/acpype/acpype.params.mol2\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.mdl$\",\n \".*\\\\.mol2$\"\n ]\n },\n {\n \"id\": \"output_path_gro\",\n \"required\": true,\n \"description\": \"Path to the GRO output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.gmx.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_path_itp\",\n \"required\": true,\n \"description\": \"Path to the ITP output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.gmx.itp\",\n \"formats\": [\n \".*\\\\.itp$\"\n ]\n },\n {\n \"id\": \"output_path_top\",\n \"required\": true,\n \"description\": \"Path to the TOP output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.gmx.top\",\n \"formats\": [\n \".*\\\\.top$\"\n ]\n }\n ]\n },\n {\n \"id\": \"acpype_params_gmx_opls\",\n \"description\": \"Small molecule parameterization for OPLS/AA MD package.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the acpype_params_gmx_opls tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_chemistry/master/biobb_chemistry/test/data/config/config_acpype_params_gmx_opls.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/acpype/acpype.params.mol2\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.mdl$\",\n \".*\\\\.mol2$\"\n ]\n },\n {\n \"id\": \"output_path_itp\",\n \"required\": true,\n \"description\": \"Path to the ITP output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.gmx.opls.itp\",\n \"formats\": [\n \".*\\\\.itp$\"\n ]\n },\n {\n \"id\": \"output_path_top\",\n \"required\": true,\n \"description\": \"Path to the TOP output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/acpype/ref_acpype.gmx.opls.top\",\n \"formats\": [\n \".*\\\\.top$\"\n ]\n }\n ]\n },\n {\n \"id\": \"babel_convert\",\n \"description\": \"Small molecule format conversion.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the babel_convert tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_chemistry/master/biobb_chemistry/test/data/config/config_babel_convert.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/babel/babel.smi\",\n \"formats\": [\n \".*\\\\.abinit$\",\n \".*\\\\.acesout$\",\n \".*\\\\.acr$\",\n \".*\\\\.adfout$\",\n \".*\\\\.alc$\",\n \".*\\\\.aoforce$\",\n \".*\\\\.arc$\",\n \".*\\\\.axsf$\",\n \".*\\\\.bgf$\",\n \".*\\\\.box$\",\n \".*\\\\.bs$\",\n \".*\\\\.c09out$\",\n \".*\\\\.c3d2$\",\n \".*\\\\.caccrt$\",\n \".*\\\\.can$\",\n \".*\\\\.car$\",\n \".*\\\\.castep$\",\n \".*\\\\.ccc$\",\n \".*\\\\.cdjson$\",\n \".*\\\\.cdx$\",\n \".*\\\\.cdxml$\",\n \".*\\\\.cif$\",\n \".*\\\\.ck$\",\n \".*\\\\.cml$\",\n \".*\\\\.cmlr$\",\n \".*\\\\.CONFIG$\",\n \".*\\\\.CONTCAR$\",\n \".*\\\\.CONTFF$\",\n \".*\\\\.crk2d$\",\n \".*\\\\.crk3d$\",\n \".*\\\\.ct$\",\n \".*\\\\.cub$\",\n \".*\\\\.cube$\",\n \".*\\\\.dallog$\",\n \".*\\\\.dalmol$\",\n \".*\\\\.dat$\",\n \".*\\\\.dmol$\",\n \".*\\\\.dx$\",\n \".*\\\\.ent$\",\n \".*\\\\.exyz$\",\n \".*\\\\.fa$\",\n \".*\\\\.fasta$\",\n \".*\\\\.fch$\",\n \".*\\\\.fchk$\",\n \".*\\\\.fck$\",\n \".*\\\\.feat$\",\n \".*\\\\.fhiaims$\",\n \".*\\\\.fract$\",\n \".*\\\\.fs$\",\n \".*\\\\.fsa$\",\n \".*\\\\.g03$\",\n \".*\\\\.g09$\",\n \".*\\\\.g92$\",\n \".*\\\\.g94$\",\n \".*\\\\.g98$\",\n \".*\\\\.gal$\",\n \".*\\\\.gam$\",\n \".*\\\\.gamess$\",\n \".*\\\\.gamin$\",\n \".*\\\\.gamout$\",\n \".*\\\\.got$\",\n \".*\\\\.gpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.gukin$\",\n \".*\\\\.gukout$\",\n \".*\\\\.gzmat$\",\n \".*\\\\.hin$\",\n \".*\\\\.HISTORY$\",\n \".*\\\\.inchi$\",\n \".*\\\\.inp$\",\n \".*\\\\.ins$\",\n \".*\\\\.jin$\",\n \".*\\\\.jout$\",\n \".*\\\\.log$\",\n \".*\\\\.lpmd$\",\n \".*\\\\.mcdl$\",\n \".*\\\\.mcif$\",\n \".*\\\\.MDFF$\",\n \".*\\\\.mdl$\",\n \".*\\\\.ml2$\",\n \".*\\\\.mmcif$\",\n \".*\\\\.mmd$\",\n \".*\\\\.mmod$\",\n \".*\\\\.mol$\",\n \".*\\\\.mol2$\",\n \".*\\\\.mold$\",\n \".*\\\\.molden$\",\n \".*\\\\.molf$\",\n \".*\\\\.moo$\",\n \".*\\\\.mop$\",\n \".*\\\\.mopcrt$\",\n \".*\\\\.mopin$\",\n \".*\\\\.mopout$\",\n \".*\\\\.mpc$\",\n \".*\\\\.mpo$\",\n \".*\\\\.mpqc$\",\n \".*\\\\.mrv$\",\n \".*\\\\.msi$\",\n \".*\\\\.nwo$\",\n \".*\\\\.orca$\",\n \".*\\\\.out$\",\n \".*\\\\.outmol$\",\n \".*\\\\.output$\",\n \".*\\\\.pc$\",\n \".*\\\\.pcjson$\",\n \".*\\\\.pcm$\",\n \".*\\\\.pdb$\",\n \".*\\\\.pdbqt$\",\n \".*\\\\.png$\",\n \".*\\\\.pos$\",\n \".*\\\\.POSCAR$\",\n \".*\\\\.POSFF$\",\n \".*\\\\.pqr$\",\n \".*\\\\.pqs$\",\n \".*\\\\.prep$\",\n \".*\\\\.pwscf$\",\n \".*\\\\.qcout$\",\n \".*\\\\.res$\",\n \".*\\\\.rsmi$\",\n \".*\\\\.rxn$\",\n \".*\\\\.sd$\",\n \".*\\\\.sdf$\",\n \".*\\\\.siesta$\",\n \".*\\\\.smi$\",\n \".*\\\\.smiles$\",\n \".*\\\\.smy$\",\n \".*\\\\.sy2$\",\n \".*\\\\.t41$\",\n \".*\\\\.tdd$\",\n \".*\\\\.text$\",\n \".*\\\\.therm$\",\n \".*\\\\.tmol$\",\n \".*\\\\.txt$\",\n \".*\\\\.txyz$\",\n \".*\\\\.unixyz$\",\n \".*\\\\.VASP$\",\n \".*\\\\.vmol$\",\n \".*\\\\.xml$\",\n \".*\\\\.xsf$\",\n \".*\\\\.xtc$\",\n \".*\\\\.xyz$\",\n \".*\\\\.yob$\"\n ]\n },\n {\n \"id\": \"output_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/babel/ref_babel.convert.mol2\",\n \"formats\": [\n \".*\\\\.acesin$\",\n \".*\\\\.adf$\",\n \".*\\\\.alc$\",\n \".*\\\\.ascii$\",\n \".*\\\\.bgf$\",\n \".*\\\\.box$\",\n \".*\\\\.bs$\",\n \".*\\\\.c3d1$\",\n \".*\\\\.c3d2$\",\n \".*\\\\.cac$\",\n \".*\\\\.caccrt$\",\n \".*\\\\.cache$\",\n \".*\\\\.cacint$\",\n \".*\\\\.can$\",\n \".*\\\\.cdjson$\",\n \".*\\\\.cdxml$\",\n \".*\\\\.cht$\",\n \".*\\\\.cif$\",\n \".*\\\\.ck$\",\n \".*\\\\.cml$\",\n \".*\\\\.cmlr$\",\n \".*\\\\.com$\",\n \".*\\\\.confabreport$\",\n \".*\\\\.CONFIG$\",\n \".*\\\\.CONTCAR$\",\n \".*\\\\.CONTFF$\",\n \".*\\\\.copy$\",\n \".*\\\\.crk2d$\",\n \".*\\\\.crk3d$\",\n \".*\\\\.csr$\",\n \".*\\\\.cssr$\",\n \".*\\\\.ct$\",\n \".*\\\\.cub$\",\n \".*\\\\.cube$\",\n \".*\\\\.dalmol$\",\n \".*\\\\.dmol$\",\n \".*\\\\.dx$\",\n \".*\\\\.ent$\",\n \".*\\\\.exyz$\",\n \".*\\\\.fa$\",\n \".*\\\\.fasta$\",\n \".*\\\\.feat$\",\n \".*\\\\.fh$\",\n \".*\\\\.fhiaims$\",\n \".*\\\\.fix$\",\n \".*\\\\.fps$\",\n \".*\\\\.fpt$\",\n \".*\\\\.fract$\",\n \".*\\\\.fs$\",\n \".*\\\\.fsa$\",\n \".*\\\\.gamin$\",\n \".*\\\\.gau$\",\n \".*\\\\.gjc$\",\n \".*\\\\.gjf$\",\n \".*\\\\.gpr$\",\n \".*\\\\.gr96$\",\n \".*\\\\.gro$\",\n \".*\\\\.gukin$\",\n \".*\\\\.gukout$\",\n \".*\\\\.gzmat$\",\n \".*\\\\.hin$\",\n \".*\\\\.inchi$\",\n \".*\\\\.inchikey$\",\n \".*\\\\.inp$\",\n \".*\\\\.jin$\",\n \".*\\\\.k$\",\n \".*\\\\.lmpdat$\",\n \".*\\\\.lpmd$\",\n \".*\\\\.mcdl$\",\n \".*\\\\.mcif$\",\n \".*\\\\.MDFF$\",\n \".*\\\\.mdl$\",\n \".*\\\\.ml2$\",\n \".*\\\\.mmcif$\",\n \".*\\\\.mmd$\",\n \".*\\\\.mmod$\",\n \".*\\\\.mna$\",\n \".*\\\\.mol$\",\n \".*\\\\.mol2$\",\n \".*\\\\.mold$\",\n \".*\\\\.molden$\",\n \".*\\\\.molf$\",\n \".*\\\\.molreport$\",\n \".*\\\\.mop$\",\n \".*\\\\.mopcrt$\",\n \".*\\\\.mopin$\",\n \".*\\\\.mp$\",\n \".*\\\\.mpc$\",\n \".*\\\\.mpd$\",\n \".*\\\\.mpqcin$\",\n \".*\\\\.mrv$\",\n \".*\\\\.msms$\",\n \".*\\\\.nul$\",\n \".*\\\\.nw$\",\n \".*\\\\.orcainp$\",\n \".*\\\\.outmol$\",\n \".*\\\\.paint$\",\n \".*\\\\.pcjson$\",\n \".*\\\\.pcm$\",\n \".*\\\\.pdb$\",\n \".*\\\\.pdbqt$\",\n \".*\\\\.png$\",\n \".*\\\\.pointcloud$\",\n \".*\\\\.POSCAR$\",\n \".*\\\\.POSFF$\",\n \".*\\\\.pov$\",\n \".*\\\\.pqr$\",\n \".*\\\\.pqs$\",\n \".*\\\\.qcin$\",\n \".*\\\\.report$\",\n \".*\\\\.rsmi$\",\n \".*\\\\.rxn$\",\n \".*\\\\.sd$\",\n \".*\\\\.sdf$\",\n \".*\\\\.smi$\",\n \".*\\\\.smiles$\",\n \".*\\\\.stl$\",\n \".*\\\\.svg$\",\n \".*\\\\.sy2$\",\n \".*\\\\.tdd$\",\n \".*\\\\.text$\",\n \".*\\\\.therm$\",\n \".*\\\\.tmol$\",\n \".*\\\\.txt$\",\n \".*\\\\.txyz$\",\n \".*\\\\.unixyz$\",\n \".*\\\\.VASP$\",\n \".*\\\\.vmol$\",\n \".*\\\\.xed$\",\n \".*\\\\.xyz$\",\n \".*\\\\.yob$\",\n \".*\\\\.zin$\"\n ]\n }\n ]\n },\n {\n \"id\": \"babel_add_hydrogens\",\n \"description\": \"Adds hydrogen atoms to small molecules.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the babel_add_hydrogens tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_chemistry/master/biobb_chemistry/test/data/config/config_babel_add_hydrogens.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/babel/babel.no.H.pdb\",\n \"formats\": [\n \".*\\\\.abinit$\",\n \".*\\\\.acesout$\",\n \".*\\\\.acr$\",\n \".*\\\\.adfout$\",\n \".*\\\\.alc$\",\n \".*\\\\.aoforce$\",\n \".*\\\\.arc$\",\n \".*\\\\.axsf$\",\n \".*\\\\.bgf$\",\n \".*\\\\.box$\",\n \".*\\\\.bs$\",\n \".*\\\\.c09out$\",\n \".*\\\\.c3d2$\",\n \".*\\\\.caccrt$\",\n \".*\\\\.can$\",\n \".*\\\\.car$\",\n \".*\\\\.castep$\",\n \".*\\\\.ccc$\",\n \".*\\\\.cdjson$\",\n \".*\\\\.cdx$\",\n \".*\\\\.cdxml$\",\n \".*\\\\.cif$\",\n \".*\\\\.ck$\",\n \".*\\\\.cml$\",\n \".*\\\\.cmlr$\",\n \".*\\\\.CONFIG$\",\n \".*\\\\.CONTCAR$\",\n \".*\\\\.CONTFF$\",\n \".*\\\\.crk2d$\",\n \".*\\\\.crk3d$\",\n \".*\\\\.ct$\",\n \".*\\\\.cub$\",\n \".*\\\\.cube$\",\n \".*\\\\.dallog$\",\n \".*\\\\.dalmol$\",\n \".*\\\\.dat$\",\n \".*\\\\.dmol$\",\n \".*\\\\.dx$\",\n \".*\\\\.ent$\",\n \".*\\\\.exyz$\",\n \".*\\\\.fa$\",\n \".*\\\\.fasta$\",\n \".*\\\\.fch$\",\n \".*\\\\.fchk$\",\n \".*\\\\.fck$\",\n \".*\\\\.feat$\",\n \".*\\\\.fhiaims$\",\n \".*\\\\.fract$\",\n \".*\\\\.fs$\",\n \".*\\\\.fsa$\",\n \".*\\\\.g03$\",\n \".*\\\\.g09$\",\n \".*\\\\.g92$\",\n \".*\\\\.g94$\",\n \".*\\\\.g98$\",\n \".*\\\\.gal$\",\n \".*\\\\.gam$\",\n \".*\\\\.gamess$\",\n \".*\\\\.gamin$\",\n \".*\\\\.gamout$\",\n \".*\\\\.got$\",\n \".*\\\\.gpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.gukin$\",\n \".*\\\\.gukout$\",\n \".*\\\\.gzmat$\",\n \".*\\\\.hin$\",\n \".*\\\\.HISTORY$\",\n \".*\\\\.inchi$\",\n \".*\\\\.inp$\",\n \".*\\\\.ins$\",\n \".*\\\\.jin$\",\n \".*\\\\.jout$\",\n \".*\\\\.log$\",\n \".*\\\\.lpmd$\",\n \".*\\\\.mcdl$\",\n \".*\\\\.mcif$\",\n \".*\\\\.MDFF$\",\n \".*\\\\.mdl$\",\n \".*\\\\.ml2$\",\n \".*\\\\.mmcif$\",\n \".*\\\\.mmd$\",\n \".*\\\\.mmod$\",\n \".*\\\\.mol$\",\n \".*\\\\.mol2$\",\n \".*\\\\.mold$\",\n \".*\\\\.molden$\",\n \".*\\\\.molf$\",\n \".*\\\\.moo$\",\n \".*\\\\.mop$\",\n \".*\\\\.mopcrt$\",\n \".*\\\\.mopin$\",\n \".*\\\\.mopout$\",\n \".*\\\\.mpc$\",\n \".*\\\\.mpo$\",\n \".*\\\\.mpqc$\",\n \".*\\\\.mrv$\",\n \".*\\\\.msi$\",\n \".*\\\\.nwo$\",\n \".*\\\\.orca$\",\n \".*\\\\.out$\",\n \".*\\\\.outmol$\",\n \".*\\\\.output$\",\n \".*\\\\.pc$\",\n \".*\\\\.pcjson$\",\n \".*\\\\.pcm$\",\n \".*\\\\.pdb$\",\n \".*\\\\.pdbqt$\",\n \".*\\\\.png$\",\n \".*\\\\.pos$\",\n \".*\\\\.POSCAR$\",\n \".*\\\\.POSFF$\",\n \".*\\\\.pqr$\",\n \".*\\\\.pqs$\",\n \".*\\\\.prep$\",\n \".*\\\\.pwscf$\",\n \".*\\\\.qcout$\",\n \".*\\\\.res$\",\n \".*\\\\.rsmi$\",\n \".*\\\\.rxn$\",\n \".*\\\\.sd$\",\n \".*\\\\.sdf$\",\n \".*\\\\.siesta$\",\n \".*\\\\.smi$\",\n \".*\\\\.smiles$\",\n \".*\\\\.smy$\",\n \".*\\\\.sy2$\",\n \".*\\\\.t41$\",\n \".*\\\\.tdd$\",\n \".*\\\\.text$\",\n \".*\\\\.therm$\",\n \".*\\\\.tmol$\",\n \".*\\\\.txt$\",\n \".*\\\\.txyz$\",\n \".*\\\\.unixyz$\",\n \".*\\\\.VASP$\",\n \".*\\\\.vmol$\",\n \".*\\\\.xml$\",\n \".*\\\\.xsf$\",\n \".*\\\\.xtc$\",\n \".*\\\\.xyz$\",\n \".*\\\\.yob$\"\n ]\n },\n {\n \"id\": \"output_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/babel/ref_babel.hydrogens.pdb\",\n \"formats\": [\n \".*\\\\.acesin$\",\n \".*\\\\.adf$\",\n \".*\\\\.alc$\",\n \".*\\\\.ascii$\",\n \".*\\\\.bgf$\",\n \".*\\\\.box$\",\n \".*\\\\.bs$\",\n \".*\\\\.c3d1$\",\n \".*\\\\.c3d2$\",\n \".*\\\\.cac$\",\n \".*\\\\.caccrt$\",\n \".*\\\\.cache$\",\n \".*\\\\.cacint$\",\n \".*\\\\.can$\",\n \".*\\\\.cdjson$\",\n \".*\\\\.cdxml$\",\n \".*\\\\.cht$\",\n \".*\\\\.cif$\",\n \".*\\\\.ck$\",\n \".*\\\\.cml$\",\n \".*\\\\.cmlr$\",\n \".*\\\\.com$\",\n \".*\\\\.confabreport$\",\n \".*\\\\.CONFIG$\",\n \".*\\\\.CONTCAR$\",\n \".*\\\\.CONTFF$\",\n \".*\\\\.copy$\",\n \".*\\\\.crk2d$\",\n \".*\\\\.crk3d$\",\n \".*\\\\.csr$\",\n \".*\\\\.cssr$\",\n \".*\\\\.ct$\",\n \".*\\\\.cub$\",\n \".*\\\\.cube$\",\n \".*\\\\.dalmol$\",\n \".*\\\\.dmol$\",\n \".*\\\\.dx$\",\n \".*\\\\.ent$\",\n \".*\\\\.exyz$\",\n \".*\\\\.fa$\",\n \".*\\\\.fasta$\",\n \".*\\\\.feat$\",\n \".*\\\\.fh$\",\n \".*\\\\.fhiaims$\",\n \".*\\\\.fix$\",\n \".*\\\\.fps$\",\n \".*\\\\.fpt$\",\n \".*\\\\.fract$\",\n \".*\\\\.fs$\",\n \".*\\\\.fsa$\",\n \".*\\\\.gamin$\",\n \".*\\\\.gau$\",\n \".*\\\\.gjc$\",\n \".*\\\\.gjf$\",\n \".*\\\\.gpr$\",\n \".*\\\\.gr96$\",\n \".*\\\\.gro$\",\n \".*\\\\.gukin$\",\n \".*\\\\.gukout$\",\n \".*\\\\.gzmat$\",\n \".*\\\\.hin$\",\n \".*\\\\.inchi$\",\n \".*\\\\.inchikey$\",\n \".*\\\\.inp$\",\n \".*\\\\.jin$\",\n \".*\\\\.k$\",\n \".*\\\\.lmpdat$\",\n \".*\\\\.lpmd$\",\n \".*\\\\.mcdl$\",\n \".*\\\\.mcif$\",\n \".*\\\\.MDFF$\",\n \".*\\\\.mdl$\",\n \".*\\\\.ml2$\",\n \".*\\\\.mmcif$\",\n \".*\\\\.mmd$\",\n \".*\\\\.mmod$\",\n \".*\\\\.mna$\",\n \".*\\\\.mol$\",\n \".*\\\\.mol2$\",\n \".*\\\\.mold$\",\n \".*\\\\.molden$\",\n \".*\\\\.molf$\",\n \".*\\\\.molreport$\",\n \".*\\\\.mop$\",\n \".*\\\\.mopcrt$\",\n \".*\\\\.mopin$\",\n \".*\\\\.mp$\",\n \".*\\\\.mpc$\",\n \".*\\\\.mpd$\",\n \".*\\\\.mpqcin$\",\n \".*\\\\.mrv$\",\n \".*\\\\.msms$\",\n \".*\\\\.nul$\",\n \".*\\\\.nw$\",\n \".*\\\\.orcainp$\",\n \".*\\\\.outmol$\",\n \".*\\\\.paint$\",\n \".*\\\\.pcjson$\",\n \".*\\\\.pcm$\",\n \".*\\\\.pdb$\",\n \".*\\\\.pdbqt$\",\n \".*\\\\.png$\",\n \".*\\\\.pointcloud$\",\n \".*\\\\.POSCAR$\",\n \".*\\\\.POSFF$\",\n \".*\\\\.pov$\",\n \".*\\\\.pqr$\",\n \".*\\\\.pqs$\",\n \".*\\\\.qcin$\",\n \".*\\\\.report$\",\n \".*\\\\.rsmi$\",\n \".*\\\\.rxn$\",\n \".*\\\\.sd$\",\n \".*\\\\.sdf$\",\n \".*\\\\.smi$\",\n \".*\\\\.smiles$\",\n \".*\\\\.stl$\",\n \".*\\\\.svg$\",\n \".*\\\\.sy2$\",\n \".*\\\\.tdd$\",\n \".*\\\\.text$\",\n \".*\\\\.therm$\",\n \".*\\\\.tmol$\",\n \".*\\\\.txt$\",\n \".*\\\\.txyz$\",\n \".*\\\\.unixyz$\",\n \".*\\\\.VASP$\",\n \".*\\\\.vmol$\",\n \".*\\\\.xed$\",\n \".*\\\\.xyz$\",\n \".*\\\\.yob$\",\n \".*\\\\.zin$\"\n ]\n }\n ]\n },\n {\n \"id\": \"babel_remove_hydrogens\",\n \"description\": \"Removes hydrogen atoms to small molecules.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the babel_remove_hydrogens tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_chemistry/master/biobb_chemistry/test/data/config/config_babel_remove_hydrogens.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/babel/babel.H.pdb\",\n \"formats\": [\n \".*\\\\.abinit$\",\n \".*\\\\.acesout$\",\n \".*\\\\.acr$\",\n \".*\\\\.adfout$\",\n \".*\\\\.alc$\",\n \".*\\\\.aoforce$\",\n \".*\\\\.arc$\",\n \".*\\\\.axsf$\",\n \".*\\\\.bgf$\",\n \".*\\\\.box$\",\n \".*\\\\.bs$\",\n \".*\\\\.c09out$\",\n \".*\\\\.c3d2$\",\n \".*\\\\.caccrt$\",\n \".*\\\\.can$\",\n \".*\\\\.car$\",\n \".*\\\\.castep$\",\n \".*\\\\.ccc$\",\n \".*\\\\.cdjson$\",\n \".*\\\\.cdx$\",\n \".*\\\\.cdxml$\",\n \".*\\\\.cif$\",\n \".*\\\\.ck$\",\n \".*\\\\.cml$\",\n \".*\\\\.cmlr$\",\n \".*\\\\.CONFIG$\",\n \".*\\\\.CONTCAR$\",\n \".*\\\\.CONTFF$\",\n \".*\\\\.crk2d$\",\n \".*\\\\.crk3d$\",\n \".*\\\\.ct$\",\n \".*\\\\.cub$\",\n \".*\\\\.cube$\",\n \".*\\\\.dallog$\",\n \".*\\\\.dalmol$\",\n \".*\\\\.dat$\",\n \".*\\\\.dmol$\",\n \".*\\\\.dx$\",\n \".*\\\\.ent$\",\n \".*\\\\.exyz$\",\n \".*\\\\.fa$\",\n \".*\\\\.fasta$\",\n \".*\\\\.fch$\",\n \".*\\\\.fchk$\",\n \".*\\\\.fck$\",\n \".*\\\\.feat$\",\n \".*\\\\.fhiaims$\",\n \".*\\\\.fract$\",\n \".*\\\\.fs$\",\n \".*\\\\.fsa$\",\n \".*\\\\.g03$\",\n \".*\\\\.g09$\",\n \".*\\\\.g92$\",\n \".*\\\\.g94$\",\n \".*\\\\.g98$\",\n \".*\\\\.gal$\",\n \".*\\\\.gam$\",\n \".*\\\\.gamess$\",\n \".*\\\\.gamin$\",\n \".*\\\\.gamout$\",\n \".*\\\\.got$\",\n \".*\\\\.gpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.gukin$\",\n \".*\\\\.gukout$\",\n \".*\\\\.gzmat$\",\n \".*\\\\.hin$\",\n \".*\\\\.HISTORY$\",\n \".*\\\\.inchi$\",\n \".*\\\\.inp$\",\n \".*\\\\.ins$\",\n \".*\\\\.jin$\",\n \".*\\\\.jout$\",\n \".*\\\\.log$\",\n \".*\\\\.lpmd$\",\n \".*\\\\.mcdl$\",\n \".*\\\\.mcif$\",\n \".*\\\\.MDFF$\",\n \".*\\\\.mdl$\",\n \".*\\\\.ml2$\",\n \".*\\\\.mmcif$\",\n \".*\\\\.mmd$\",\n \".*\\\\.mmod$\",\n \".*\\\\.mol$\",\n \".*\\\\.mol2$\",\n \".*\\\\.mold$\",\n \".*\\\\.molden$\",\n \".*\\\\.molf$\",\n \".*\\\\.moo$\",\n \".*\\\\.mop$\",\n \".*\\\\.mopcrt$\",\n \".*\\\\.mopin$\",\n \".*\\\\.mopout$\",\n \".*\\\\.mpc$\",\n \".*\\\\.mpo$\",\n \".*\\\\.mpqc$\",\n \".*\\\\.mrv$\",\n \".*\\\\.msi$\",\n \".*\\\\.nwo$\",\n \".*\\\\.orca$\",\n \".*\\\\.out$\",\n \".*\\\\.outmol$\",\n \".*\\\\.output$\",\n \".*\\\\.pc$\",\n \".*\\\\.pcjson$\",\n \".*\\\\.pcm$\",\n \".*\\\\.pdb$\",\n \".*\\\\.pdbqt$\",\n \".*\\\\.png$\",\n \".*\\\\.pos$\",\n \".*\\\\.POSCAR$\",\n \".*\\\\.POSFF$\",\n \".*\\\\.pqr$\",\n \".*\\\\.pqs$\",\n \".*\\\\.prep$\",\n \".*\\\\.pwscf$\",\n \".*\\\\.qcout$\",\n \".*\\\\.res$\",\n \".*\\\\.rsmi$\",\n \".*\\\\.rxn$\",\n \".*\\\\.sd$\",\n \".*\\\\.sdf$\",\n \".*\\\\.siesta$\",\n \".*\\\\.smi$\",\n \".*\\\\.smiles$\",\n \".*\\\\.smy$\",\n \".*\\\\.sy2$\",\n \".*\\\\.t41$\",\n \".*\\\\.tdd$\",\n \".*\\\\.text$\",\n \".*\\\\.therm$\",\n \".*\\\\.tmol$\",\n \".*\\\\.txt$\",\n \".*\\\\.txyz$\",\n \".*\\\\.unixyz$\",\n \".*\\\\.VASP$\",\n \".*\\\\.vmol$\",\n \".*\\\\.xml$\",\n \".*\\\\.xsf$\",\n \".*\\\\.xtc$\",\n \".*\\\\.xyz$\",\n \".*\\\\.yob$\"\n ]\n },\n {\n \"id\": \"output_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/babel/ref_babel.nohydrogens.pdb\",\n \"formats\": [\n \".*\\\\.acesin$\",\n \".*\\\\.adf$\",\n \".*\\\\.alc$\",\n \".*\\\\.ascii$\",\n \".*\\\\.bgf$\",\n \".*\\\\.box$\",\n \".*\\\\.bs$\",\n \".*\\\\.c3d1$\",\n \".*\\\\.c3d2$\",\n \".*\\\\.cac$\",\n \".*\\\\.caccrt$\",\n \".*\\\\.cache$\",\n \".*\\\\.cacint$\",\n \".*\\\\.can$\",\n \".*\\\\.cdjson$\",\n \".*\\\\.cdxml$\",\n \".*\\\\.cht$\",\n \".*\\\\.cif$\",\n \".*\\\\.ck$\",\n \".*\\\\.cml$\",\n \".*\\\\.cmlr$\",\n \".*\\\\.com$\",\n \".*\\\\.confabreport$\",\n \".*\\\\.CONFIG$\",\n \".*\\\\.CONTCAR$\",\n \".*\\\\.CONTFF$\",\n \".*\\\\.copy$\",\n \".*\\\\.crk2d$\",\n \".*\\\\.crk3d$\",\n \".*\\\\.csr$\",\n \".*\\\\.cssr$\",\n \".*\\\\.ct$\",\n \".*\\\\.cub$\",\n \".*\\\\.cube$\",\n \".*\\\\.dalmol$\",\n \".*\\\\.dmol$\",\n \".*\\\\.dx$\",\n \".*\\\\.ent$\",\n \".*\\\\.exyz$\",\n \".*\\\\.fa$\",\n \".*\\\\.fasta$\",\n \".*\\\\.feat$\",\n \".*\\\\.fh$\",\n \".*\\\\.fhiaims$\",\n \".*\\\\.fix$\",\n \".*\\\\.fps$\",\n \".*\\\\.fpt$\",\n \".*\\\\.fract$\",\n \".*\\\\.fs$\",\n \".*\\\\.fsa$\",\n \".*\\\\.gamin$\",\n \".*\\\\.gau$\",\n \".*\\\\.gjc$\",\n \".*\\\\.gjf$\",\n \".*\\\\.gpr$\",\n \".*\\\\.gr96$\",\n \".*\\\\.gro$\",\n \".*\\\\.gukin$\",\n \".*\\\\.gukout$\",\n \".*\\\\.gzmat$\",\n \".*\\\\.hin$\",\n \".*\\\\.inchi$\",\n \".*\\\\.inchikey$\",\n \".*\\\\.inp$\",\n \".*\\\\.jin$\",\n \".*\\\\.k$\",\n \".*\\\\.lmpdat$\",\n \".*\\\\.lpmd$\",\n \".*\\\\.mcdl$\",\n \".*\\\\.mcif$\",\n \".*\\\\.MDFF$\",\n \".*\\\\.mdl$\",\n \".*\\\\.ml2$\",\n \".*\\\\.mmcif$\",\n \".*\\\\.mmd$\",\n \".*\\\\.mmod$\",\n \".*\\\\.mna$\",\n \".*\\\\.mol$\",\n \".*\\\\.mol2$\",\n \".*\\\\.mold$\",\n \".*\\\\.molden$\",\n \".*\\\\.molf$\",\n \".*\\\\.molreport$\",\n \".*\\\\.mop$\",\n \".*\\\\.mopcrt$\",\n \".*\\\\.mopin$\",\n \".*\\\\.mp$\",\n \".*\\\\.mpc$\",\n \".*\\\\.mpd$\",\n \".*\\\\.mpqcin$\",\n \".*\\\\.mrv$\",\n \".*\\\\.msms$\",\n \".*\\\\.nul$\",\n \".*\\\\.nw$\",\n \".*\\\\.orcainp$\",\n \".*\\\\.outmol$\",\n \".*\\\\.paint$\",\n \".*\\\\.pcjson$\",\n \".*\\\\.pcm$\",\n \".*\\\\.pdb$\",\n \".*\\\\.pdbqt$\",\n \".*\\\\.png$\",\n \".*\\\\.pointcloud$\",\n \".*\\\\.POSCAR$\",\n \".*\\\\.POSFF$\",\n \".*\\\\.pov$\",\n \".*\\\\.pqr$\",\n \".*\\\\.pqs$\",\n \".*\\\\.qcin$\",\n \".*\\\\.report$\",\n \".*\\\\.rsmi$\",\n \".*\\\\.rxn$\",\n \".*\\\\.sd$\",\n \".*\\\\.sdf$\",\n \".*\\\\.smi$\",\n \".*\\\\.smiles$\",\n \".*\\\\.stl$\",\n \".*\\\\.svg$\",\n \".*\\\\.sy2$\",\n \".*\\\\.tdd$\",\n \".*\\\\.text$\",\n \".*\\\\.therm$\",\n \".*\\\\.tmol$\",\n \".*\\\\.txt$\",\n \".*\\\\.txyz$\",\n \".*\\\\.unixyz$\",\n \".*\\\\.VASP$\",\n \".*\\\\.vmol$\",\n \".*\\\\.xed$\",\n \".*\\\\.xyz$\",\n \".*\\\\.yob$\",\n \".*\\\\.zin$\"\n ]\n }\n ]\n },\n {\n \"id\": \"babel_minimize\",\n \"description\": \"Energetically minimize small molecules.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the babel_minimize tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_chemistry/master/biobb_chemistry/test/data/config/config_babel_minimize.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/babel/babel.minimize.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\"\n ]\n },\n {\n \"id\": \"output_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/babel/ref_babel.minimize.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\"\n ]\n }\n ]\n },\n {\n \"id\": \"reduce_add_hydrogens\",\n \"description\": \"Adds hydrogen atoms to small molecules.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the reduce_add_hydrogens tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_chemistry/master/biobb_chemistry/test/data/config/config_reduce_add_hydrogens.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/ambertools/reduce.no.H.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/ambertools/ref_reduce.add.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"reduce_remove_hydrogens\",\n \"description\": \"Removes hydrogen atoms to small molecules.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the reduce_remove_hydrogens tool.\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_path\",\n \"required\": true,\n \"description\": \"Path to the input file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/data/ambertools/reduce.H.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_chemistry/raw/master/biobb_chemistry/test/reference/ambertools/ref_reduce.remove.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n }\n ]\n },\n {\n \"id\": \"biobb_io\",\n \"tools\": [\n {\n \"id\": \"ligand\",\n \"description\": \"Downloads a ligand file from the MMB REST API.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the ligand tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_io/master/biobb_io/test/data/config/config_ligand.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"output_pdb_path\",\n \"required\": true,\n \"description\": \"Path to the output PDB ligand file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_io/raw/master/biobb_io/test/reference/api/ligand_12d.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"pdb\",\n \"description\": \"Downloads a PDB file from the RCSB or MMB REST APIs.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the pdb tool.\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"output_pdb_path\",\n \"required\": true,\n \"description\": \"Path to the output PDB file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_io/raw/master/biobb_io/test/reference/api/pdb_1ubq.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"pdb_variants\",\n \"description\": \"Creates a text file containing a list of all the variants mapped to a RSCB PDB code from the corresponding UNIPROT entries.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the pdb_variants tool.\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"output_mutations_list_txt\",\n \"required\": true,\n \"description\": \"Path to the TXT file containing an ASCII comma separated values of the mutations\",\n \"filetype\": \"output\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.txt$\"\n ]\n }\n ]\n },\n {\n \"id\": \"pdb_cluster_zip\",\n \"description\": \"Creates a zip file containing all the PDB files in the given sequence similarity cluster percentage of the given RSCB PDB code.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the pdb_cluster_zip tool.\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"output_pdb_zip_path\",\n \"required\": true,\n \"description\": \"Path to the ZIP or PDB file containing the output PDB files\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_io/raw/master/biobb_io/test/reference/api/reference_output_pdb_zip_path.zip\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.zip$\"\n ]\n }\n ]\n }\n ]\n },\n {\n \"id\": \"biobb_md\",\n \"tools\": [\n {\n \"id\": \"pdb2gmx\",\n \"description\": \"Creates a compressed (ZIP) GROMACS topology (TOP and ITP files) from a given PDB file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the pdb2gmx tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_md/master/biobb_md/test/data/config/config_pdb2gmx.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_pdb_path\",\n \"required\": true,\n \"description\": \"Path to the input PDB file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/egfr.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_gro_path\",\n \"required\": true,\n \"description\": \"Path to the output GRO file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_pdb2gmx.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the output TOP topology in zip format\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_pdb2gmx.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n }\n ]\n },\n {\n \"id\": \"editconf\",\n \"description\": \"Creates a GROMACS structure file (GRO) adding the information of the solvent box to the input structure file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the editconf tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_gro_path\",\n \"required\": true,\n \"description\": \"Path to the input GRO file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/editconf.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_gro_path\",\n \"required\": true,\n \"description\": \"Path to the output GRO file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_editconf.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n }\n ]\n },\n {\n \"id\": \"genion\",\n \"description\": \"Creates a new compressed GROMACS topology adding ions until reaching the desired concentration to the input compressed GROMACS topology. \",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the genion tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_tpr_path\",\n \"required\": true,\n \"description\": \"Path to the input portable run input TPR file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/genion.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\"\n ]\n },\n {\n \"id\": \"output_gro_path\",\n \"required\": true,\n \"description\": \"Path to the input structure GRO file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_genion.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"input_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the input TOP topology in zip format\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/genion.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"output_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the output topology TOP and ITP files zipball\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_genion.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n }\n ]\n },\n {\n \"id\": \"genrestr\",\n \"description\": \"Creates a new GROMACS compressed topology applying the indicated force restrains to the given input compressed topology.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the genrestr tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure PDB, GRO or TPR format\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/genrestr.gro\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\",\n \".*\\\\.tpr$\"\n ]\n },\n {\n \"id\": \"input_ndx_path\",\n \"required\": true,\n \"description\": \"Path to the input GROMACS index file, NDX format\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/genrestr.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_itp_path\",\n \"required\": true,\n \"description\": \"Path the output ITP topology file with restrains\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_genrestr.itp\",\n \"formats\": [\n \".*\\\\.itp$\"\n ]\n }\n ]\n },\n {\n \"id\": \"grompp\",\n \"description\": \"Creates a GROMACS portable binary run input file (TPR) applying the desired properties from the input compressed GROMACS topology.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the grompp tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_md/master/biobb_md/test/data/config/config_grompp.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_gro_path\",\n \"required\": true,\n \"description\": \"Path to the input GROMACS structure GRO file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/grompp.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"input_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the input GROMACS topology TOP and ITP files in zip format\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/grompp.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"output_tpr_path\",\n \"required\": true,\n \"description\": \"Path to the output portable binary run file TPR\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_grompp.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\"\n ]\n },\n {\n \"id\": \"input_cpt_path\",\n \"required\": false,\n \"description\": \"Path to the input GROMACS checkpoint file CPT\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.cpt$\"\n ]\n },\n {\n \"id\": \"input_ndx_path\",\n \"required\": false,\n \"description\": \"Path to the input GROMACS index files NDX\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n }\n ]\n },\n {\n \"id\": \"mdrun\",\n \"description\": \"Performs molecular dynamics simulations from an input GROMACS TPR file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the mdrun tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_tpr_path\",\n \"required\": true,\n \"description\": \"Path to the portable binary run input file TPR\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/mdrun.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\"\n ]\n },\n {\n \"id\": \"output_trr_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS uncompressed raw trajectory file TRR\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_mdrun.trr\",\n \"formats\": [\n \".*\\\\.trr$\"\n ]\n },\n {\n \"id\": \"output_gro_path\",\n \"required\": true,\n \"description\": \"Path to the output GROMACS structure GRO file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_mdrun.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_edr_path\",\n \"required\": true,\n \"description\": \"Path to the output GROMACS portable energy file EDR\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_mdrun.edr\",\n \"formats\": [\n \".*\\\\.edr$\"\n ]\n },\n {\n \"id\": \"output_log_path\",\n \"required\": true,\n \"description\": \"Path to the output GROMACS trajectory log file LOG\",\n \"filetype\": \"output\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.log$\"\n ]\n },\n {\n \"id\": \"output_xtc_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS compressed trajectory file XTC\",\n \"filetype\": \"output\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.xtc$\"\n ]\n },\n {\n \"id\": \"output_cpt_path\",\n \"required\": false,\n \"description\": \"Path to the output GROMACS checkpoint file CPT\",\n \"filetype\": \"output\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.cpt$\"\n ]\n },\n {\n \"id\": \"output_dhdl_path\",\n \"required\": false,\n \"description\": \"Path to the output dhdl\",\n \"filetype\": \"output\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.xvg$\"\n ]\n }\n ]\n },\n {\n \"id\": \"make_ndx\",\n \"description\": \"Creates a GROMACS index file (NDX) from an input selection and an input GROMACS structure file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the make_ndx tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input GRO/PDB/TPR file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/make_ndx.tpr\",\n \"formats\": [\n \".*\\\\.gro$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tpr$\"\n ]\n },\n {\n \"id\": \"output_ndx_path\",\n \"required\": true,\n \"description\": \"Path to the output index NDX file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_make_ndx.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"input_ndx_path\",\n \"required\": false,\n \"description\": \"Path to the input index NDX file\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n }\n ]\n },\n {\n \"id\": \"select\",\n \"description\": \"Creates a GROMACS index file (NDX) from an input selection and an input GROMACS structure file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the select tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_md/master/biobb_md/test/data/config/config_select.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input GRO/PDB/TPR file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/make_ndx.tpr\",\n \"formats\": [\n \".*\\\\.gro$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tpr$\"\n ]\n },\n {\n \"id\": \"output_ndx_path\",\n \"required\": true,\n \"description\": \"Path to the output index NDX file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_select.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"input_ndx_path\",\n \"required\": false,\n \"description\": \"Path to the input index NDX file\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n }\n ]\n },\n {\n \"id\": \"solvate\",\n \"description\": \"Creates a new compressed GROMACS topology file adding solvent molecules to a given input compressed GROMACS topology file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the solvate tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_solute_gro_path\",\n \"required\": true,\n \"description\": \"Path to the input GRO file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/solvate.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_gro_path\",\n \"required\": true,\n \"description\": \"Path to the output GRO file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_solvate.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"input_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the input TOP topology in zip format\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/solvate.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"output_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the output topology in zip format\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_solvate.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n }\n ]\n },\n {\n \"id\": \"ndx2resttop\",\n \"description\": \"Creates a new GROMACS compressed topology applying the force restrains to the input groups in the input index file to the given input compressed topology.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the ndx2resttop tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_md/master/biobb_md/test/data/config/config_ndx2resttop.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_ndx_path\",\n \"required\": true,\n \"description\": \"Path to the input NDX index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs_extra/ndx2resttop.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"input_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the input TOP topology in zip format\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs_extra/ndx2resttop.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"output_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the output TOP topology in zip format\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs_extra/ref_ndx2resttop.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n }\n ]\n },\n {\n \"id\": \"append_ligand\",\n \"description\": \"Takes a ligand ITP file and inserts it in a topology.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the append_ligand tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the input topology TOP and ITP files zipball\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs_extra/ndx2resttop.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_itp_path\",\n \"required\": true,\n \"description\": \"Path to the ligand ITP file to be inserted in the topology\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs_extra/pep_ligand.itp\",\n \"formats\": [\n \".*\\\\.itp$\"\n ]\n },\n {\n \"id\": \"output_top_zip_path\",\n \"required\": true,\n \"description\": \"Path/Name the output topology TOP and ITP files zipball\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs_extra/ref_appendligand.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n }\n ]\n }\n ]\n },\n {\n \"id\": \"biobb_model\",\n \"tools\": [\n {\n \"id\": \"fix_side_chain\",\n \"description\": \"Reconstructs the missing side chains and heavy atoms of the given PDB file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the fix_side_chain tool.\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_pdb_path\",\n \"required\": true,\n \"description\": \"Input PDB file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_model/raw/master/biobb_model/test/data/model/2ki5.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_pdb_path\",\n \"required\": true,\n \"description\": \"Output PDB file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_model/raw/master/biobb_model/test/reference/model/output_pdb_path.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"mutate\",\n \"description\": \"Creates a new PDB file performing the mutations given in a list of amino acid mutations to the input PDB file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the mutate tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_model/master/biobb_model/test/data/config/config_mutate.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_pdb_path\",\n \"required\": true,\n \"description\": \"Input PDB file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_model/raw/master/biobb_model/test/data/model/2ki5.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_pdb_path\",\n \"required\": true,\n \"description\": \"Output PDB file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_model/raw/master/biobb_model/test/reference/model/output_mutated_pdb_path.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n }\n ]\n },\n {\n \"id\": \"biobb_pmx\",\n \"tools\": [\n {\n \"id\": \"mutate\",\n \"description\": \"pmx tool to insert mutated residues in structure files for free energy simulations\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the mutate tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_pmx/master/biobb_pmx/test/data/config/config_mutate.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_pmx/raw/master/biobb_pmx/test/data/pmx/frame99.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_structure_path\",\n \"required\": true,\n \"description\": \"Path to the output structure file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_pmx/raw/master/biobb_pmx/test/reference/pmx/ref_output_structure.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"input_b_structure_path\",\n \"required\": false,\n \"description\": \"Path to the mutated input structure file\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gentop\",\n \"description\": \"pmx tool to generate hybrid GROMACS topologies: adding a B state to an .itp or .top file for a hybrid residue\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gentop tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_pmx/master/biobb_pmx/test/data/config/config_gentop.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the input GROMACS topology TOP and ITP files in zip format\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_pmx/raw/master/biobb_pmx/test/data/pmx/topology.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"output_top_zip_path\",\n \"required\": true,\n \"description\": \"Path the output TOP topology in zip format\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_pmx/raw/master/biobb_pmx/test/reference/pmx/ref_output_topology.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n }\n ]\n },\n {\n \"id\": \"analyse\",\n \"description\": \"pmx tool to calculate free energies from fast growth thermodynamic integration simulations.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the analyse tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_pmx/master/biobb_pmx/test/data/config/config_analyse.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_a_xvg_zip_path\",\n \"required\": true,\n \"description\": \"Path the zip file containing the dgdl\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_pmx/raw/master/biobb_pmx/test/data/pmx/xvg_A.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_b_xvg_zip_path\",\n \"required\": true,\n \"description\": \"Path the zip file containing the dgdl\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_pmx/raw/master/biobb_pmx/test/data/pmx/xvg_B.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"output_result_path\",\n \"required\": true,\n \"description\": \"Path to the TXT results file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_pmx/raw/master/biobb_pmx/test/reference/pmx/ref_result.txt\",\n \"formats\": [\n \".*\\\\.txt$\"\n ]\n },\n {\n \"id\": \"output_work_plot_path\",\n \"required\": true,\n \"description\": \"Path to the PNG plot results file\",\n \"filetype\": \"output\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.png$\"\n ]\n }\n ]\n }\n ]\n },\n {\n \"id\": \"biobb_structure_utils\",\n \"tools\": [\n {\n \"id\": \"cat_pdb\",\n \"description\": \"Class to concat two PDB structures in a single PDB file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cat_pdb tool.\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure1\",\n \"required\": true,\n \"description\": \"Input structure 1 file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/cat_protein.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"input_structure2\",\n \"required\": true,\n \"description\": \"Input structure 2 file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/cat_ligand.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_structure_path\",\n \"required\": true,\n \"description\": \"Output protein file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/ref_cat_pdb.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"extract_atoms\",\n \"description\": \"Class to extract atoms from a 3D structure.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the extract_atoms tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_structure_utils/master/biobb_structure_utils/test/data/config/config_extract_atoms.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Input structure file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/2vgb.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_structure_path\",\n \"required\": true,\n \"description\": \"Output structure file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/OE2_atoms.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\"\n ]\n }\n ]\n },\n {\n \"id\": \"extract_chain\",\n \"description\": \"Class to extract a chain from a 3D structure.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the extract_chain tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_structure_utils/master/biobb_structure_utils/test/data/config/config_extract_chain.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Input structure file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/extract_chain.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_structure_path\",\n \"required\": true,\n \"description\": \"Output structure file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/ref_extract_chain.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"extract_heteroatoms\",\n \"description\": \"Class to extract hetero-atoms from a 3D structure.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the extract_heteroatoms tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_structure_utils/master/biobb_structure_utils/test/data/config/config_extract_heteroatoms.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Input structure file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/extract_heteroatom.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_heteroatom_path\",\n \"required\": true,\n \"description\": \"Output heteroatom file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/ref_extract_heteroatom.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"extract_model\",\n \"description\": \"Class to extract a model from a 3D structure.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the extract_model tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_structure_utils/master/biobb_structure_utils/test/data/config/config_extract_model.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Input structure file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/extract_model.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_structure_path\",\n \"required\": true,\n \"description\": \"Output structure file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/ref_extract_model.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"extract_protein\",\n \"description\": \"Class to extract a protein from a 3D structure.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the extract_protein tool.\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Input structure file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/extract_protein.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_protein_path\",\n \"required\": true,\n \"description\": \"Output protein file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/ref_extract_protein.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"remove_ligand\",\n \"description\": \"Class to remove the selected ligand atoms from a 3D structure.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the remove_ligand tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_structure_utils/master/biobb_structure_utils/test/data/config/config_remove_ligand.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Input structure file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/WT_aq4_md_1.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_structure_path\",\n \"required\": true,\n \"description\": \"Output structure file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/WT_apo_md_1.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\"\n ]\n }\n ]\n },\n {\n \"id\": \"remove_pdb_water\",\n \"description\": \"Class to remove water molecules from PDB 3D structures.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the remove_pdb_water tool.\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_pdb_path\",\n \"required\": true,\n \"description\": \"Input PDB file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/WT_aq4_md_WAT.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n },\n {\n \"id\": \"output_pdb_path\",\n \"required\": true,\n \"description\": \"Output PDB file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/WT_apo_no_wat.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\"\n ]\n }\n ]\n },\n {\n \"id\": \"renumber_structure\",\n \"description\": \"Class to renumber atomic indexes from a 3D structure.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the renumber_structure tool.\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Input structure file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/cl3.noH.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_structure_path\",\n \"required\": true,\n \"description\": \"Output structure file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/renum_cl3_noH.pdb\",\n \"formats\": [\n \".*\\\\.pdb$\",\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_mapping_json_path\",\n \"required\": true,\n \"description\": \"Output mapping json file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/cl3_output_mapping_json_path.json\",\n \"formats\": [\n \".*\\\\.json$\"\n ]\n }\n ]\n },\n {\n \"id\": \"sort_gro_residues\",\n \"description\": \"Class to sort the selected residues from a GRO 3D structure.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the sort_gro_residues tool.\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_structure_utils/master/biobb_structure_utils/test/data/config/config_sort_gro_residues.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_gro_path\",\n \"required\": true,\n \"description\": \"Input GRO file path\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/WT_aq4_md_1.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n },\n {\n \"id\": \"output_gro_path\",\n \"required\": true,\n \"description\": \"Output sorted GRO file path\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/WT_aq4_md_sorted.gro\",\n \"formats\": [\n \".*\\\\.gro$\"\n ]\n }\n ]\n }\n ]\n }\n ]\n}\n" ] ], [ [ "<a id=\"list_tools_ex\"></a>\n#### List of tools from a specific package\n\nFor more information about this endpoint, please visit the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/List%20of%20Services/getToolsList).\n\n##### Endpoint", "_____no_output_____" ], [ "**GET** `https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/{package}`", "_____no_output_____" ], [ "##### Code", "_____no_output_____" ] ], [ [ "package = 'biobb_analysis'\nurl = apiURL + 'launch/' + package\nresponse = get_data(url)\n\nprint(json.dumps(response.json, indent=2))", "{\n \"id\": \"biobb_analysis\",\n \"tools\": [\n {\n \"id\": \"gmx_cluster\",\n \"description\": \"Creates cluster structures from a given GROMACS compatible trajectory\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_cluster tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_cluster.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_pdb_path\",\n \"required\": true,\n \"description\": \"Path to the output cluster file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_cluster.pdb\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_rms\",\n \"description\": \"Performs a Root Mean Square deviation (RMSd) analysis from a given GROMACS compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_rms tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_rms.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_xvg_path\",\n \"required\": true,\n \"description\": \"Path to the XVG output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_rms.xvg\",\n \"formats\": [\n \".*\\\\.xvg$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_rgyr\",\n \"description\": \"Computes the radius of gyration (Rgyr) of a molecule about the x-, y- and z-axes, as a function of time, from a given GROMACS compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_rgyr tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_rgyr.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_xvg_path\",\n \"required\": true,\n \"description\": \"Path to the XVG output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_rgyr.xvg\",\n \"formats\": [\n \".*\\\\.xvg$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_energy\",\n \"description\": \"Extracts energy components from a given GROMACS energy file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_energy tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_energy.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_energy_path\",\n \"required\": true,\n \"description\": \"Path to the input EDR file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/energy.edr\",\n \"formats\": [\n \".*\\\\.edr$\"\n ]\n },\n {\n \"id\": \"output_xvg_path\",\n \"required\": true,\n \"description\": \"Path to the XVG output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_energy.xvg\",\n \"formats\": [\n \".*\\\\.xvg$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_image\",\n \"description\": \"Corrects periodicity (image) from a given GROMACS compatible trajectory file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_image tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_image.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS input topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_traj_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_image.xtc\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_trjconv_str\",\n \"description\": \"Converts between GROMACS compatible structure file formats and/or extracts a selection of atoms.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_trjconv_str tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_trjconv_str.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_structure_path\",\n \"required\": true,\n \"description\": \"Path to the input structure file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS input topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_str_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_trjconv.str.pdb\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_trjconv_str_ens\",\n \"description\": \"Extracts an ensemble of frames containing a selection of atoms from GROMACS compatible trajectory files.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_trjconv_str_ens tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_trjconv_str_ens.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS input topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/topology.tpr\",\n \"formats\": [\n \".*\\\\.tpr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.brk$\",\n \".*\\\\.ent$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_str_ens_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_trjconv.str.ens.zip\",\n \"formats\": [\n \".*\\\\.zip$\"\n ]\n }\n ]\n },\n {\n \"id\": \"gmx_trjconv_trj\",\n \"description\": \"Converts between GROMACS compatible trajectory file formats and/or extracts a selection of atoms.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the gmx_trjconv_trj tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_gmx_trjconv_trj.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the GROMACS trajectory file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/trajectory.trr\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.cpt$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n },\n {\n \"id\": \"input_index_path\",\n \"required\": false,\n \"description\": \"Path to the GROMACS index file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/gromacs/index.ndx\",\n \"formats\": [\n \".*\\\\.ndx$\"\n ]\n },\n {\n \"id\": \"output_traj_path\",\n \"required\": true,\n \"description\": \"Path to the output file\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/gromacs/ref_trjconv.trj.xtc\",\n \"formats\": [\n \".*\\\\.xtc$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.g96$\",\n \".*\\\\.pdb$\",\n \".*\\\\.tng$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_average\",\n \"description\": \"Calculates a structure average of a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_average tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_average.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed structure\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.average.pdb\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_bfactor\",\n \"description\": \"Calculates the Bfactor fluctuations of a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_bfactor tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"input_exp_path\",\n \"required\": false,\n \"description\": \"Path to the experimental reference file (required if reference = experimental)\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/experimental.1e5t.pdb\",\n \"formats\": null\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed analysis\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.bfactor.first.dat\",\n \"formats\": [\n \".*\\\\.dat$\",\n \".*\\\\.agr$\",\n \".*\\\\.xmgr$\",\n \".*\\\\.gnu$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_rms\",\n \"description\": \"Calculates the Root Mean Square deviation (RMSd) of a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_rms tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"input_exp_path\",\n \"required\": false,\n \"description\": \"Path to the experimental reference file (required if reference = experimental)\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/experimental.1e5t.pdb\",\n \"formats\": null\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed analysis\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.rms.first.dat\",\n \"formats\": [\n \".*\\\\.dat$\",\n \".*\\\\.agr$\",\n \".*\\\\.xmgr$\",\n \".*\\\\.gnu$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_rmsf\",\n \"description\": \"Calculates the Root Mean Square fluctuations (RMSf) of a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_rmsf tool\",\n \"filetype\": \"input\",\n \"sample\": null,\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"input_exp_path\",\n \"required\": false,\n \"description\": \"Path to the experimental reference file (required if reference = experimental)\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/experimental.1e5t.pdb\",\n \"formats\": null\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed analysis\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.rmsf.first.dat\",\n \"formats\": [\n \".*\\\\.dat$\",\n \".*\\\\.agr$\",\n \".*\\\\.xmgr$\",\n \".*\\\\.gnu$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_rgyr\",\n \"description\": \"Computes the radius of gyration (Rgyr) from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_rgyr tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_rgyr.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output analysis\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.rgyr.dat\",\n \"formats\": [\n \".*\\\\.dat$\",\n \".*\\\\.agr$\",\n \".*\\\\.xmgr$\",\n \".*\\\\.gnu$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_dry\",\n \"description\": \"Dehydrates a given cpptraj compatible trajectory stripping out solvent molecules and ions.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_dry tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_dry.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.dry.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_strip\",\n \"description\": \"Strips a defined set of atoms (mask) from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_strip tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_strip.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.strip.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_snapshot\",\n \"description\": \"Extracts a particular snapshot from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_snapshot tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_snapshot.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed structure\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.snapshot.pdb\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_slice\",\n \"description\": \"Extracts a particular trajectory slice from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_slice tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_slice.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.slice.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_convert\",\n \"description\": \"Converts between cpptraj compatible trajectory file formats and/or extracts a selection of atoms or frames.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_convert tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_convert.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.convert.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_mask\",\n \"description\": \"Extracts a selection of atoms from a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_mask tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_mask.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.mask.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n },\n {\n \"id\": \"cpptraj_image\",\n \"description\": \"Corrects periodicity (image) from a given cpptraj trajectory file.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_image tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_image.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed trajectory\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.image.netcdf\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n }\n ]\n}\n" ] ], [ [ "<a id=\"tools_prop_ex\"></a>\n#### Tool's properties\n\nFor more information about this endpoint, please visit the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/Launch%20Tool/getLaunchTool).\n\n##### Endpoint", "_____no_output_____" ], [ "**GET** `https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/{package}/{tool}`", "_____no_output_____" ], [ "##### Code", "_____no_output_____" ] ], [ [ "package = 'biobb_analysis'\ntool = 'cpptraj_average'\nurl = apiURL + 'launch/' + package + '/' + tool\nresponse = get_data(url)\n\nprint(json.dumps(response.json, indent=2))", "{\n \"id\": \"cpptraj_average\",\n \"description\": \"Calculates a structure average of a given cpptraj compatible trajectory.\",\n \"arguments\": [\n {\n \"id\": \"config\",\n \"required\": false,\n \"description\": \"Configuration file for the cpptraj_average tool\",\n \"filetype\": \"input\",\n \"sample\": \"https://raw.githubusercontent.com/bioexcel/biobb_analysis/master/biobb_analysis/test/data/config/config_cpptraj_average.json\",\n \"formats\": [\n \".*\\\\.json$\",\n \".*\\\\.yml$\"\n ]\n },\n {\n \"id\": \"input_top_path\",\n \"required\": true,\n \"description\": \"Path to the input structure or topology file\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.parm.top\",\n \"formats\": [\n \".*\\\\.top$\",\n \".*\\\\.pdb$\",\n \".*\\\\.prmtop$\",\n \".*\\\\.parmtop$\",\n \".*\\\\.zip$\"\n ]\n },\n {\n \"id\": \"input_traj_path\",\n \"required\": true,\n \"description\": \"Path to the input trajectory to be processed\",\n \"filetype\": \"input\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/data/ambertools/cpptraj.traj.dcd\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.cdf$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.restart$\",\n \".*\\\\.ncrestart$\",\n \".*\\\\.restartnc$\",\n \".*\\\\.dcd$\",\n \".*\\\\.charmm$\",\n \".*\\\\.cor$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.trr$\",\n \".*\\\\.gro$\",\n \".*\\\\.binpos$\",\n \".*\\\\.xtc$\",\n \".*\\\\.cif$\",\n \".*\\\\.arc$\",\n \".*\\\\.sqm$\",\n \".*\\\\.sdf$\",\n \".*\\\\.conflib$\"\n ]\n },\n {\n \"id\": \"output_cpptraj_path\",\n \"required\": true,\n \"description\": \"Path to the output processed structure\",\n \"filetype\": \"output\",\n \"sample\": \"https://github.com/bioexcel/biobb_analysis/raw/master/biobb_analysis/test/reference/ambertools/ref_cpptraj.average.pdb\",\n \"formats\": [\n \".*\\\\.crd$\",\n \".*\\\\.netcdf$\",\n \".*\\\\.rst7$\",\n \".*\\\\.ncrst$\",\n \".*\\\\.dcd$\",\n \".*\\\\.pdb$\",\n \".*\\\\.mol2$\",\n \".*\\\\.binpos$\",\n \".*\\\\.trr$\",\n \".*\\\\.xtc$\",\n \".*\\\\.sqm$\"\n ]\n }\n ]\n}\n" ] ], [ [ "<a id=\"launch_tool_ex\"></a>\n### Launch tool\n\nFor more information about this endpoint, please visit the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/Launch%20Tool/postLaunchTool). The documentation for all the tools is available in the [BioBB REST API Tools Documentation section](https://mmb.irbbarcelona.org/biobb-api/tools-documentation?docExpansion=none). Interactive examples for all the tools are available in the [BioBB REST API Tools Execution section](https://mmb.irbbarcelona.org/biobb-api/tools-execution).\n\nDefinition of functions needed for launch a job:", "_____no_output_____" ] ], [ [ "from io import BytesIO\nfrom pathlib import Path\n\n# Function used for encode python dictionary to JSON file\ndef encode_config(data):\n jsonData = json.dumps(data)\n binaryData = jsonData.encode()\n return BytesIO(binaryData)\n\n# Launch job\ndef launch_job(url, **kwargs):\n data = {}\n files = {}\n # Fill data (output paths) and files (input files) objects\n for key, value in kwargs.items():\n # Inputs / Outputs\n if type(value) is str:\n if key.startswith('input'):\n files[key] = (value, open(value, 'rb'))\n elif key.startswith('output'):\n data[key] = value\n elif Path(value).is_file():\n files[key] = (value, open(value, 'rb'))\n # Properties (in case properties are provided as a dictionary instead of a file)\n if type(value) is dict:\n files['config'] = ('prop.json', encode_config(value))\n # Request URL with data and files\n response = post_data(url, data, files)\n # Print REST API response\n print(json.dumps(response.json, indent=2))\n # Save token if status == 303\n if response.status == 303:\n token = response.json['token']\n return token", "_____no_output_____" ] ], [ [ "Hereafter we will launch a job on *biobb_analysis.cpptraj_average* tool with the provided *files/* in the files folder of this same repository. The response is a JSON with the status code, the state of the job, a message and a token for checking the job status.\n\n<a id=\"tool_yml_ex\"></a>\n#### Launch job with a YAML file config\n\n##### File config", "_____no_output_____" ], [ "```yaml \nproperties:\n in_parameters:\n start: 1\n end: -1\n step: 1\n mask: c-alpha\n out_parameters:\n format: pdb\n```", "_____no_output_____" ], [ "##### Endpoint", "_____no_output_____" ], [ "**POST** `https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/{package}/{tool}`", "_____no_output_____" ], [ "##### Code", "_____no_output_____" ], [ "The function below sends POST data and files to the *{package}/{tool}* endpoint. The config properties are sent as a YAML file.\n\nThe response is a JSON with the status code, the state of the job, a message and a token that will be used for checking the job status in the next step. ", "_____no_output_____" ] ], [ [ "# Launch BioBB on REST API with YAML config file\n\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/cpptraj_average', \n config = 'files/config.yml',\n input_top_path = 'files/cpptraj.parm.top',\n input_traj_path = 'files/cpptraj.traj.dcd',\n output_cpptraj_path = 'output.cpptraj.average.pdb')", "{\n \"code\": 303,\n \"state\": \"RUNNING\",\n \"message\": \"The requested job has has been successfully launched, please go to /retrieve/status/{token} for checking job status.\",\n \"token\": \"fe2805760eeeec0d5b8a34fbc40aa6c2a2d68c7ba1663cccb88659b1e149c898a414bbc04e37bb73efc725b7a29de2a93ffb55e6ef85cd6467f3d62a06ea5bfa\"\n}\n" ] ], [ [ "<a id=\"tool_json_ex\"></a>\n#### Launch job with a JSON file config\n\nFile config:", "_____no_output_____" ], [ "```json\n{\n\t\"in_parameters\": {\n\t\t\"start\": 1,\n\t\t\"end\": -1,\n\t\t\"step\": 1,\n\t\t\"mask\": \"c-alpha\"\n\t},\n\t\"out_parameters\": {\n\t\t\"format\": \"pdb\"\n\t}\n}\n```", "_____no_output_____" ], [ "##### Endpoint", "_____no_output_____" ], [ "**POST** `https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/{package}/{tool}`", "_____no_output_____" ], [ "##### Code", "_____no_output_____" ], [ "The function below sends POST data and files to the *{package}/{tool}* endpoint. The config properties are sent as a JSON file.\n\nThe response is a JSON with the status code, the state of the job, a message and a token that will be used for checking the job status in the next step. ", "_____no_output_____" ] ], [ [ "# Launch BioBB on REST API with JSON config file\n\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/cpptraj_average', \n config = 'files/config.json',\n input_top_path = 'files/cpptraj.parm.top',\n input_traj_path = 'files/cpptraj.traj.dcd',\n output_cpptraj_path = 'output.cpptraj.average.pdb')", "{\n \"code\": 303,\n \"state\": \"RUNNING\",\n \"message\": \"The requested job has has been successfully launched, please go to /retrieve/status/{token} for checking job status.\",\n \"token\": \"84ab5ef63d82ab3fa4f120532949905d83f6aff65f101cb1ed5fdd5f05acb00421ddc4560098f877f26a96972a8ea8521ab222a0bb78a5ffa9d213c0ab2618c9\"\n}\n" ] ], [ [ "<a id=\"tool_dict_ex\"></a>\n#### Launch job with a python dictionary config", "_____no_output_____" ], [ "##### Endpoint", "_____no_output_____" ], [ "**POST** `https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/{package}/{tool}`", "_____no_output_____" ], [ "##### Code", "_____no_output_____" ], [ "The function below sends POST data and files to the *{package}/{tool}* endpoint. The config properties are sent as a python dictionary embedded in the code.\n\nThe response is a JSON with the status code, the state of the job, a message and a token that will be used for checking the job status in the next step. ", "_____no_output_____" ] ], [ [ "# Launch BioBB on REST API with JSON config file\n\nprop = {\n \"in_parameters\" : {\n \"start\": 1,\n \"end\": -1,\n \"step\": 1,\n \"mask\": \"c-alpha\"\n },\n \"out_parameters\" : {\n \"format\": \"pdb\"\n }\n}\n\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/cpptraj_average', \n config = prop,\n input_top_path = 'files/cpptraj.parm.top',\n input_traj_path = 'files/cpptraj.traj.dcd',\n output_cpptraj_path = 'output.cpptraj.average.pdb')", "{\n \"code\": 303,\n \"state\": \"RUNNING\",\n \"message\": \"The requested job has has been successfully launched, please go to /retrieve/status/{token} for checking job status.\",\n \"token\": \"98013d74bef397d5498db3eb1008e5e136702d63903b6ea0cb5a2db44c4a4e0adbcd1ce9999915acd90c444f8749880c052185bbbfc747c1ebc7d67d6d2c84c8\"\n}\n" ] ], [ [ "<a id=\"retrieve_status_ex\"></a>\n### Retrieve status\n\nFor more information about this endpoint, please visit the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/Retrieve/getRetrieveStatus).\n\nDefinition of functions needed for retrieve the status of a job:", "_____no_output_____" ] ], [ [ "import datetime\nfrom time import sleep\n\n# Checks status until a provided \"ok\" status is returned by the response\ndef check_status(url, ok, error):\n counter = 0\n while True:\n if counter < 10: slp = 1\n if counter >= 10 and counter < 60: slp = 10\n if counter >= 60: slp = 60\n counter = counter + slp\n sleep(slp)\n r = requests.get(url)\n if r.status_code == ok or r.status_code == error:\n return counter\n break\n\n# Function that checks the status and parses the reponse JSON for saving the output files in a list\ndef check_job(token, apiURL):\n # define retrieve status URL\n url = apiURL + 'retrieve/status/' + token\n # check status until job has finished\n counter = check_status(url, 200, 500)\n # Get content when status = 200\n response = get_data(url)\n # Save id for the generated output_files\n if response.status == 200:\n out_files = []\n for outf in response.json['output_files']:\n item = { 'id': outf['id'], 'name': outf['name'] }\n out_files.append(item)\n\n # Print REST API response\n print(\"Total elapsed time: %s\" % str(datetime.timedelta(seconds=counter)))\n print(\"REST API JSON response:\")\n print(json.dumps(response.json, indent=4))\n \n if response.status == 200: \n return out_files\n else: return None", "_____no_output_____" ] ], [ [ "##### Endpoint", "_____no_output_____" ], [ "**GET** `https://mmb.irbbarcelona.org/biobb-api/rest/v1/retrieve/status/{token}`", "_____no_output_____" ], [ "##### Code", "_____no_output_____" ], [ "The function below checks the status of a job and awaits until the response status is `200`. The response is a JSON with the status code, the state of the job, a message, a list with all the generated output files and the date of the expiration of these files. Additionally, the function also provides the elapsed time since the job has been launched until it has finished. ", "_____no_output_____" ] ], [ [ "# Check job status\nout_files = check_job(token, apiURL)", "Total elapsed time: 0:00:20\nREST API JSON response:\n{\n \"code\": 200,\n \"state\": \"FINISHED\",\n \"message\": \"The requested job has finished successfully, please go to /retrieve/data/{id} for each output_files.\",\n \"output_files\": [\n {\n \"id\": \"5e42837a40fe75.05757111\",\n \"name\": \"output.cpptraj.average.pdb\",\n \"size\": 77397,\n \"mimetype\": \"text/plain\"\n }\n ],\n \"expiration\": \"February 13, 2020 00:00 GMT+0000\"\n}\n" ] ], [ [ "<a id=\"retrieve_data_ex\"></a>\n### Retrieve data\n\nFor more information about this endpoint, please visit the [BioBB REST API Documentation section](https://mmb.irbbarcelona.org/biobb-api/rest#/Retrieve/getRetrieveData).\n\nDefinition of functions needed for retrieve the output file(s) generated by a job:", "_____no_output_____" ] ], [ [ "# Downloads to disk a file from a given URL\ndef get_file(url, filename):\n r = requests.get(url, allow_redirects=True)\n file = open(filename,'wb') \n file.write(r.content) \n file.close()\n\n# Retrieves all the files provided in the out_files list\ndef retrieve_data(out_files, apiURL):\n if not out_files:\n return \"No files provided\"\n for outf in out_files:\n get_file(apiURL + 'retrieve/data/' + outf['id'], outf['name'])", "_____no_output_____" ] ], [ [ "##### Endpoint", "_____no_output_____" ], [ "**GET** `https://mmb.irbbarcelona.org/biobb-api/rest/v1/retrieve/data/{id}`", "_____no_output_____" ], [ "##### Code", "_____no_output_____" ], [ "The function below makes a single call to the *retrieve/data* endpoint for each output file got in the *retrieve/status* endpoint and save the generated file(s) to disk.", "_____no_output_____" ] ], [ [ "# Save generated file(s) to disk\n\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"practical_cases\"></a>\n## Practical cases", "_____no_output_____" ], [ "Now we will execute some Bioexcel Building Blocks through the BioBB REST API and with the results we will do some interactions with other python libraries such as [plotly](https://plot.ly/python/offline/) or [nglview](http://nglviewer.org/#nglview).", "_____no_output_____" ], [ "<a id=\"example1\"></a>\n### Example 1: download PDB file from RSCB database", "_____no_output_____" ], [ "Launch the *biobb_io.pdb* job that downloads a PDB file from the RSCB database:", "_____no_output_____" ] ], [ [ "# Downloading desired PDB file\n\n# Create properties dict and inputs/outputs\ndownloaded_pdb = '3EBP.pdb'\nprop = {\n 'pdb_code': '3EBP',\n 'filter': False\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_io/pdb', \n config = prop,\n output_pdb_path = downloaded_pdb)\n", "{\n \"code\": 303,\n \"state\": \"RUNNING\",\n \"message\": \"The requested job has has been successfully launched, please go to /retrieve/status/{token} for checking job status.\",\n \"token\": \"af60d733db949a71167f3aa6a7a793fc520b5a4176b57a770bed4798654a79be2a47b81e6a77de4eb285de84f9b768b119004f0bfbbe4be9e5ff1ffe31b81fd9\"\n}\n" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "Total elapsed time: 0:00:06\nREST API JSON response:\n{\n \"code\": 200,\n \"state\": \"FINISHED\",\n \"message\": \"The requested job has finished successfully, please go to /retrieve/data/{id} for each output_files.\",\n \"output_files\": [\n {\n \"id\": \"5e428389eeafa3.49051362\",\n \"name\": \"3EBP.pdb\",\n \"size\": 609120,\n \"mimetype\": \"text/plain\"\n }\n ],\n \"expiration\": \"February 13, 2020 00:00 GMT+0000\"\n}\n" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "Visualize downloaded PDB in NGLView:", "_____no_output_____" ] ], [ [ "import nglview\n\n# Show protein\nview = nglview.show_structure_file(downloaded_pdb)\nview.add_representation(repr_type='ball+stick', selection='het')\nview._remote_call('setSize', target='Widget', args=['','600px'])\nview", "_____no_output_____" ], [ "view.render_image()\nview.download_image(filename='ngl1.png')", "_____no_output_____" ] ], [ [ "<img src='ngl1.png'></img>", "_____no_output_____" ], [ "<a id=\"example2\"></a>\n### Example 2: extract heteroatom from a given structure", "_____no_output_____" ], [ "Launch the *biobb_structure_utils.extract_heteroatoms* job that extracts a heteroatom from a PDB file.", "_____no_output_____" ] ], [ [ "# Extracting heteroatom from a given structure\n\n# Create properties dict and inputs/outputs\nheteroatom = 'CPB.pdb'\nprop = {\n 'heteroatoms': [{\n 'name': 'CPB'\n }]\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_structure_utils/extract_heteroatoms', \n config = prop,\n input_structure_path = downloaded_pdb,\n output_heteroatom_path = heteroatom)\n", "{\n \"code\": 303,\n \"state\": \"RUNNING\",\n \"message\": \"The requested job has has been successfully launched, please go to /retrieve/status/{token} for checking job status.\",\n \"token\": \"740c9ac30767ba996e445e4ed05c151ee903fed2234c0cc7ace6ec3ba4e1fa8bdcd5a3c6835c7f1038530eb81c4cc319674f235cf55b38863903181dce09a8d1\"\n}\n" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "Total elapsed time: 0:00:20\nREST API JSON response:\n{\n \"code\": 200,\n \"state\": \"FINISHED\",\n \"message\": \"The requested job has finished successfully, please go to /retrieve/data/{id} for each output_files.\",\n \"output_files\": [\n {\n \"id\": \"5e4283986555a0.86371712\",\n \"name\": \"CPB.pdb\",\n \"size\": 2268,\n \"mimetype\": \"text/plain\"\n }\n ],\n \"expiration\": \"February 13, 2020 00:00 GMT+0000\"\n}\n" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "Visualize generated extracted heteroatom in NGLView:", "_____no_output_____" ] ], [ [ "# Show protein\nview = nglview.show_structure_file(heteroatom)\nview.add_representation(repr_type='ball+stick', selection='het')\nview._remote_call('setSize', target='Widget', args=['','600px'])\nview", "_____no_output_____" ], [ "view.render_image()\nview.download_image(filename='ngl2.png')", "_____no_output_____" ] ], [ [ "<img src='ngl2.png'></img>", "_____no_output_____" ], [ "<a id=\"example3\"></a>\n### Example 3: extract energy components from a given GROMACS energy file", "_____no_output_____" ] ], [ [ "# GMXEnergy: Getting system energy by time \n\n# Create prop dict and inputs/outputs\noutput_min_ene_xvg ='file_min_ene.xvg'\noutput_min_edr = 'files/1AKI_min.edr'\nprop = {\n 'terms': [\"Potential\"]\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/gmx_energy',\n config = prop,\n input_energy_path = output_min_edr,\n output_xvg_path = output_min_ene_xvg)", "{\n \"code\": 303,\n \"state\": \"RUNNING\",\n \"message\": \"The requested job has has been successfully launched, please go to /retrieve/status/{token} for checking job status.\",\n \"token\": \"170e8e2645d179eaa40e2de652f3e6dec909ef1df4642526ba789ed21806bab917bb4ce7f9fb730dfe635155f52ac9f3869c4429afcc767bd190f01337a8a718\"\n}\n" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "Total elapsed time: 0:00:08\nREST API JSON response:\n{\n \"code\": 200,\n \"state\": \"FINISHED\",\n \"message\": \"The requested job has finished successfully, please go to /retrieve/data/{id} for each output_files.\",\n \"output_files\": [\n {\n \"id\": \"5e4283a6c70143.38956052\",\n \"name\": \"file_min_ene.xvg\",\n \"size\": 54143,\n \"mimetype\": \"text/plain\"\n }\n ],\n \"expiration\": \"February 13, 2020 00:00 GMT+0000\"\n}\n" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "Visualize generated energy file in plotly:", "_____no_output_____" ] ], [ [ "import plotly\nimport plotly.graph_objs as go\n\n#Read data from file and filter energy values higher than 1000 Kj/mol^-1\nwith open(output_min_ene_xvg,'r') as energy_file:\n x,y = map(\n list,\n zip(*[\n (float(line.split()[0]),float(line.split()[1]))\n for line in energy_file \n if not line.startswith((\"#\",\"@\")) \n if float(line.split()[1]) < 1000 \n ])\n )\n\nplotly.offline.init_notebook_mode(connected=True)\n\nfig = {\n \"data\": [go.Scatter(x=x, y=y)],\n \"layout\": go.Layout(title=\"Energy Minimization\",\n xaxis=dict(title = \"Energy Minimization Step\"),\n yaxis=dict(title = \"Potential Energy KJ/mol-1\")\n )\n}\n\nplotly.offline.iplot(fig)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0baaba7e59883938e17866b4b206fbfa45ce504
10,271
ipynb
Jupyter Notebook
_posts/scikit/multidimensional-scaling/Multi-Dimensional Scaling.ipynb
bmb804/documentation
57826d25e0afea7fff6a8da9abab8be2f7a4b48c
[ "CC-BY-3.0" ]
2
2019-06-24T23:55:53.000Z
2019-07-08T12:22:56.000Z
_posts/scikit/multidimensional-scaling/Multi-Dimensional Scaling.ipynb
bmb804/documentation
57826d25e0afea7fff6a8da9abab8be2f7a4b48c
[ "CC-BY-3.0" ]
15
2020-06-30T21:21:30.000Z
2021-08-02T21:16:33.000Z
_posts/scikit/multidimensional-scaling/Multi-Dimensional Scaling.ipynb
bmb804/documentation
57826d25e0afea7fff6a8da9abab8be2f7a4b48c
[ "CC-BY-3.0" ]
1
2019-11-10T04:01:48.000Z
2019-11-10T04:01:48.000Z
29.771014
316
0.52877
[ [ [ "An illustration of the metric and non-metric MDS on generated noisy data.\n\nThe reconstructed points using the metric MDS and non metric MDS are slightly shifted to avoid overlapping.", "_____no_output_____" ], [ "#### New to Plotly?\nPlotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).\n<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).\n<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!", "_____no_output_____" ], [ "### Version", "_____no_output_____" ] ], [ [ "import sklearn\nsklearn.__version__", "_____no_output_____" ] ], [ [ "### Imports", "_____no_output_____" ] ], [ [ "print(__doc__)\n\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport numpy as np\n\nfrom sklearn import manifold\nfrom sklearn.metrics import euclidean_distances\nfrom sklearn.decomposition import PCA", "Automatically created module for IPython interactive environment\n" ] ], [ [ "### Calculations", "_____no_output_____" ] ], [ [ "n_samples = 20\nseed = np.random.RandomState(seed=3)\nX_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)\nX_true = X_true.reshape((n_samples, 2))\n# Center the data\nX_true -= X_true.mean()\n\nsimilarities = euclidean_distances(X_true)\n\n# Add noise to the similarities\nnoise = np.random.rand(n_samples, n_samples)\nnoise = noise + noise.T\nnoise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0\nsimilarities += noise\n\nmds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,\n dissimilarity=\"precomputed\", n_jobs=1)\npos = mds.fit(similarities).embedding_\n\nnmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,\n dissimilarity=\"precomputed\", random_state=seed, n_jobs=1,\n n_init=1)\nnpos = nmds.fit_transform(similarities, init=pos)\n\n# Rescale the data\npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())\nnpos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())\n\n# Rotate the data\nclf = PCA(n_components=2)\nX_true = clf.fit_transform(X_true)\n\npos = clf.fit_transform(pos)\n\nnpos = clf.fit_transform(npos)", "_____no_output_____" ] ], [ [ "### Plot Results", "_____no_output_____" ] ], [ [ "data = []\np1 = go.Scatter(x=X_true[:, 0], y=X_true[:, 1], \n mode='markers+lines',\n marker=dict(color='navy', size=10),\n line=dict(width=1),\n name='True Position')\ndata.append(p1)\np2 = go.Scatter(x=pos[:, 0], y=pos[:, 1],\n mode='markers+lines',\n marker=dict(color='turquoise', size=10),\n line=dict(width=1),\n name='MDS')\ndata.append(p2)\np3 = go.Scatter(x=npos[:, 0], y=npos[:, 1], \n mode='markers+lines',\n marker=dict(color='orange', size=10),\n line=dict(width=1),\n name='NMDS')\ndata.append(p3)\n\nsimilarities = similarities.max() / similarities * 100\nsimilarities[np.isinf(similarities)] = 0\n\n# Plot the edges\nstart_idx, end_idx = np.where(pos)\n# a sequence of (*line0*, *line1*, *line2*), where::\n# linen = (x0, y0), (x1, y1), ... (xm, ym)\nsegments = [[X_true[i, :], X_true[j, :]]\n for i in range(len(pos)) for j in range(len(pos))]\nvalues = np.abs(similarities)\nfor i in range(len(segments)):\n p4 = go.Scatter(x=[segments[i][0][0],segments[i][1][0]],\n y=[segments[i][0][1],segments[i][1][1]],\n mode = 'lines',\n showlegend=False,\n line = dict(\n color = 'lightblue',\n width = 0.5))\n data.append(p4)\n \nlayout = go.Layout(xaxis=dict(zeroline=False, showgrid=False,\n ticks='', showticklabels=False),\n yaxis=dict(zeroline=False, showgrid=False,\n ticks='', showticklabels=False),\n height=900, hovermode='closest')\nfig = go.Figure(data=data, layout=layout)", "_____no_output_____" ], [ "py.iplot(fig)", "_____no_output_____" ] ], [ [ "### License", "_____no_output_____" ], [ "Author: \n \n Nelle Varoquaux <[email protected]>\n\nLicense:\n \n BSD\n", "_____no_output_____" ] ], [ [ "from IPython.display import display, HTML\n\ndisplay(HTML('<link href=\"//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700\" rel=\"stylesheet\" type=\"text/css\" />'))\ndisplay(HTML('<link rel=\"stylesheet\" type=\"text/css\" href=\"http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css\">'))\n\n! pip install git+https://github.com/plotly/publisher.git --upgrade\nimport publisher\npublisher.publish(\n 'Multi-Dimensional Scaling.ipynb', 'scikit-learn/plot-mds/', 'Multi-Dimensional Scaling | plotly',\n '',\n title = 'Multi-Dimensional Scaling | plotly',\n name = 'Multi-Dimensional Scaling',\n has_thumbnail='true', thumbnail='thumbnail/mds.jpg', \n language='scikit-learn', page_type='example_index',\n display_as='manifold_learning', order=2,\n ipynb= '~Diksha_Gabha/3320')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0babb137726ac4e03b5a6d84fb85faf71e2d513
32,011
ipynb
Jupyter Notebook
wcs-1.0.0-example.ipynb
ethanfahy/wcs-1.0.0-example-notebook
44826a6f5491ce2014b7e50eadcca2304ef965ef
[ "MIT" ]
null
null
null
wcs-1.0.0-example.ipynb
ethanfahy/wcs-1.0.0-example-notebook
44826a6f5491ce2014b7e50eadcca2304ef965ef
[ "MIT" ]
null
null
null
wcs-1.0.0-example.ipynb
ethanfahy/wcs-1.0.0-example-notebook
44826a6f5491ce2014b7e50eadcca2304ef965ef
[ "MIT" ]
null
null
null
49.096626
507
0.564087
[ [ [ "# Web Coverage Service (WCS) Download Example\n## Introduction\nWe'll demonstrate how to download a GeoTIFF data file from a public WCS service using Python 3. \n### WCS Data Service\nFor this demonstration we'll use Landfire (LF_1.4.0): https://www.landfire.gov/data_access.php\nFor Landfire LF_1.4.0 we see that the base URL is https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\n## WCS Requests\nThere are three types of WCS requests:\n- GetCapabilities\n- DescribeCoverage\n- GetCoverage\n\nGenerally, you first do a GetCapabilities request to obtain high-level information about what data you can ask for from the WCS server. \nYou then perform a DescribeCoverage request to get information specific to the coverage you want to get data from. \nFinally, you perform a GetCoverage to obtain the data itself.\n### GetCapabilities\nLet's perform a GetCapabilities request on the Landfire WCS server to see what the service can do.", "_____no_output_____" ] ], [ [ "import requests\n\n# base WCS server URL\nwcs_base_url = \"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"\n\n# add on to the base WCS server URL to define GetCapabilities URL\nwcs_get_capabilities_url = wcs_base_url + \"?REQUEST=GetCapabilities&SERVICE=WCS\"\n\n# perform an HTTP GET request with the GetCapabilities URL\nwcs_get_capabilities_response = requests.get(wcs_get_capabilities_url)\n\n# show the resulting body of the GetCapabilities request\nprint(\"GetCapabilities Response:\")\nprint(wcs_get_capabilities_response.text)", "GetCapabilities Response:\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Capabilities xmlns=\"http://www.opengis.net/wcs/1.1\"\n xmlns:ows=\"http://www.opengis.net/ows/1.1\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xsi:schemaLocation=\"http://www.opengis.net/wcs/1.1 http://schemas.opengis.net/wcs/1.1/wcsGetCapabilities.xsd http://www.opengis.net/ows/1.1/ http://schemas.opengis.net/ows/1.1.0/owsAll.xsd\" version=\"1.1.2\">\n <ows:ServiceIdentification>\n <ows:Title>US_140</ows:Title>\n <ows:ServiceType>WCS</ows:ServiceType>\n <ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>\n <ows:ServiceTypeVersion>1.1.0</ows:ServiceTypeVersion>\n <ows:ServiceTypeVersion>1.1.1</ows:ServiceTypeVersion>\n <ows:ServiceTypeVersion>1.1.2</ows:ServiceTypeVersion>\n <ows:Fees>NONE</ows:Fees>\n <ows:AccessConstraints>NONE</ows:AccessConstraints>\n </ows:ServiceIdentification>\n <ows:ServiceProvider>\n <ows:ProviderName></ows:ProviderName>\n <ows:ServiceContact>\n <ows:ContactInfo>\n <ows:Phone>\n </ows:Phone>\n <ows:Address>\n </ows:Address>\n <ows:OnlineResource xlink:href=\"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"/>\n </ows:ContactInfo>\n </ows:ServiceContact>\n </ows:ServiceProvider>\n <ows:OperationsMetadata>\n <ows:Operation name=\"GetCapabilities\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"/>\n <ows:Post xlink:href=\"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"/>\n </ows:HTTP>\n </ows:DCP>\n <ows:Parameter name=\"service\">\n <ows:AllowedValues>\n <ows:Value>WCS</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n <ows:Parameter name=\"AcceptVersions\">\n <ows:AllowedValues>\n <ows:Value>1.0.0</ows:Value>\n <ows:Value>1.1.0</ows:Value>\n <ows:Value>1.1.1</ows:Value>\n <ows:Value>1.1.2</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n </ows:Operation>\n <ows:Operation name=\"DescribeCoverage\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"/>\n <ows:Post xlink:href=\"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"/>\n </ows:HTTP>\n </ows:DCP>\n <ows:Parameter name=\"service\">\n <ows:AllowedValues>\n <ows:Value>WCS</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n <ows:Parameter name=\"version\">\n <ows:AllowedValues>\n <ows:Value>1.0.0</ows:Value>\n <ows:Value>1.1.0</ows:Value>\n <ows:Value>1.1.1</ows:Value>\n <ows:Value>1.1.2</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n <ows:Parameter name=\"Identifier\">\n <ows:AllowedValues>\n <ows:Value>1</ows:Value>\n <ows:Value>2</ows:Value>\n <ows:Value>3</ows:Value>\n <ows:Value>4</ows:Value>\n <ows:Value>5</ows:Value>\n <ows:Value>6</ows:Value>\n <ows:Value>7</ows:Value>\n <ows:Value>8</ows:Value>\n <ows:Value>9</ows:Value>\n <ows:Value>10</ows:Value>\n <ows:Value>11</ows:Value>\n <ows:Value>12</ows:Value>\n <ows:Value>13</ows:Value>\n <ows:Value>14</ows:Value>\n <ows:Value>15</ows:Value>\n <ows:Value>16</ows:Value>\n <ows:Value>17</ows:Value>\n <ows:Value>18</ows:Value>\n <ows:Value>19</ows:Value>\n <ows:Value>20</ows:Value>\n <ows:Value>21</ows:Value>\n <ows:Value>22</ows:Value>\n <ows:Value>23</ows:Value>\n <ows:Value>24</ows:Value>\n <ows:Value>25</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n </ows:Operation>\n <ows:Operation name=\"GetCoverage\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"/>\n <ows:Post xlink:href=\"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"/>\n </ows:HTTP>\n </ows:DCP>\n <ows:Parameter name=\"service\">\n <ows:AllowedValues>\n <ows:Value>WCS</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n <ows:Parameter name=\"version\">\n <ows:AllowedValues>\n <ows:Value>1.0.0</ows:Value>\n <ows:Value>1.1.0</ows:Value>\n <ows:Value>1.1.1</ows:Value>\n <ows:Value>1.1.2</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n <ows:Parameter name=\"Identifier\">\n <ows:AllowedValues>\n <ows:Value>1</ows:Value>\n <ows:Value>2</ows:Value>\n <ows:Value>3</ows:Value>\n <ows:Value>4</ows:Value>\n <ows:Value>5</ows:Value>\n <ows:Value>6</ows:Value>\n <ows:Value>7</ows:Value>\n <ows:Value>8</ows:Value>\n <ows:Value>9</ows:Value>\n <ows:Value>10</ows:Value>\n <ows:Value>11</ows:Value>\n <ows:Value>12</ows:Value>\n <ows:Value>13</ows:Value>\n <ows:Value>14</ows:Value>\n <ows:Value>15</ows:Value>\n <ows:Value>16</ows:Value>\n <ows:Value>17</ows:Value>\n <ows:Value>18</ows:Value>\n <ows:Value>19</ows:Value>\n <ows:Value>20</ows:Value>\n <ows:Value>21</ows:Value>\n <ows:Value>22</ows:Value>\n <ows:Value>23</ows:Value>\n <ows:Value>24</ows:Value>\n <ows:Value>25</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n <ows:Parameter name=\"InterpolationType\">\n <ows:AllowedValues>\n <ows:Value>nearest</ows:Value>\n <ows:Value>bilinear</ows:Value>\n <ows:Value>bicubic</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n <ows:Parameter name=\"format\">\n <ows:AllowedValues>\n <ows:Value>image/GeoTIFF</ows:Value>\n <ows:Value>image/NITF</ows:Value>\n <ows:Value>image/JPEG</ows:Value>\n <ows:Value>image/PNG</ows:Value>\n <ows:Value>image/JPEG2000</ows:Value>\n <ows:Value>image/HDF</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n <ows:Parameter name=\"store\">\n <ows:AllowedValues>\n <ows:Value>False</ows:Value>\n <ows:Value>True</ows:Value>\n </ows:AllowedValues>\n </ows:Parameter>\n </ows:Operation>\n </ows:OperationsMetadata>\n <Contents>\n <CoverageSummary>\n <ows:Title>US_DIST2013</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_DIST2013</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_DIST2014</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_DIST2014</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140CBD</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140CBD</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140CBH</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140CBH</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140CH</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140CH</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140EVH</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140EVH</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140VDEP</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140VDEP</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140FBFM40</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140FBFM40</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140FBFM13</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140FBFM13</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140MFRI</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98737642604233 22.765512000364211</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681016233757</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140MFRI</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140FRG</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98737642604233 22.765512000364211</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681016233757</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140FRG</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140ESP</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140ESP</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140BPS</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98737642604233 22.765512000364211</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681016233757</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140BPS</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140SCLASS</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140SCLASS</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140CC</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140CC</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140EVC</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140EVC</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140EVT</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140EVT</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140PRS</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98737642604233 22.765512000364211</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681016233757</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140PRS</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140PMS</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98737642604233 22.765512000364211</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681016233757</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140PMS</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140PLS</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98737642604233 22.765512000364211</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681016233757</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140PLS</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_VDIST2014</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_VDIST2014</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_VTM2014</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_VTM2014</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_FDIST2014</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_FDIST2014</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140VCC</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765446426860603</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140VCC</Identifier>\n </CoverageSummary>\n <CoverageSummary>\n <ows:Title>US_140FCCS</ows:Title>\n <ows:Abstract>tempname</ows:Abstract>\n <ows:WGS84BoundingBox>\n <ows:LowerCorner>-127.98775263969655 22.765714837805092</ows:LowerCorner>\n <ows:UpperCorner>-65.254445466369276 51.649681015029245</ows:UpperCorner>\n </ows:WGS84BoundingBox>\n <Identifier>US_140FCCS</Identifier>\n </CoverageSummary>\n <SupportedCRS>urn:ogc:def:crs:EPSG::4326</SupportedCRS>\n <SupportedCRS>urn:ogc:def:crs:EPSG::102039</SupportedCRS>\n <SupportedFormat>image/GeoTIFF</SupportedFormat>\n <SupportedFormat>image/NITF</SupportedFormat>\n <SupportedFormat>image/JPEG</SupportedFormat>\n <SupportedFormat>image/PNG</SupportedFormat>\n <SupportedFormat>image/JPEG2000</SupportedFormat>\n <SupportedFormat>image/HDF</SupportedFormat>\n </Contents>\n</Capabilities>\n\n" ] ], [ [ "#### GetCapabilities results\nThe GetCapabilities request returns a lot of information for us. \n\nWe can see that the WCS server supports WCS version `1.0.0`, `1.1.0`, `1.1.1`, and `1.1.2` . \nFor the purposes of this guide, we're going to stick to WCS 1.0.0.\n\nWithin the contents section, we can see all of the available coverages. A coverage is just another word for a data layer.\nFor this guide, let's pick an arbitrary data layer: `US_VDIST2014`\n\n### DescribeCoverage\n\nWe'll use the results we got from GetCoverage to perform a DescribeCoverage request to get more specific information about how to ultimately perform the GetCoverage Operation. We'll use WCS version `1.0.0` with coverage `US_VDIST2014` to perform the request.", "_____no_output_____" ] ], [ [ "import requests\n\n# base WCS server URL\nwcs_base_url = \"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"\n\n# add on to the base WCS server URL to define DescribeCoverage URL\nwcs_describe_coverage_url = wcs_base_url + \"?SERVICE=WCS&VERSION=1.0.0&REQUEST=DescribeCoverage&COVERAGE=US_VDIST2014\"\n\n# perform an HTTP GET request with the DescribeCoverage URL\nwcs_describe_coverage_response = requests.get(wcs_describe_coverage_url)\n\n# show the resulting body of the DescribeCoverage request\nprint(\"DescribeCoverage Response:\")\nprint(wcs_describe_coverage_response.text)", "DescribeCoverage Response:\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<CoverageDescription xmlns=\"http://www.opengis.net/wcs\"\n xmlns:gml=\"http://www.opengis.net/gml\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.opengis.net/wcs http://schemas.opengeospatial.net/wcs/1.0.0/describeCoverage.xsd\" version=\"1.0.0\">\n<CoverageOffering>\n <description>tempname</description>\n <name>US_VDIST2014</name>\n <label>US_VDIST2014</label>\n <lonLatEnvelope srsName=\"urn:ogc:def:crs:OGC:1.3:CRS84\">\n <gml:pos dimension=\"2\">-127.98775263969655 22.765446426860603</gml:pos>\n <gml:pos dimension=\"2\">-65.254445466369276 51.649681015029245</gml:pos>\n </lonLatEnvelope>\n <domainSet>\n <spatialDomain>\n <gml:Envelope srsName=\"EPSG:102039\">\n <gml:pos dimension=\"2\">-2362425 258915</gml:pos>\n <gml:pos dimension=\"2\">2263815 3177435</gml:pos>\n </gml:Envelope>\n <gml:RectifiedGrid srsName=\"EPSG:102039\" dimension=\"2\">\n <gml:limits>\n <gml:GridEnvelope>\n <gml:low>0 0</gml:low> \n <gml:high>154207 97283</gml:high>\n </gml:GridEnvelope>\n </gml:limits>\n <gml:axisName>Raster_Pixel_Columns(X-axis)</gml:axisName>\n <gml:axisName>Raster_Pixel_Rows(Y-axis)</gml:axisName>\n <gml:origin>\n <gml:pos>-2362410 3177420</gml:pos>\n </gml:origin>\n <gml:offsetVector>30 0</gml:offsetVector>\n <gml:offsetVector>0 -30</gml:offsetVector>\n </gml:RectifiedGrid>\n </spatialDomain>\n </domainSet>\n <rangeSet>\n <RangeSet>\n <name>RangeSet_21</name>\n <label>US_VDIST2014 RangeSet</label>\n <axisDescription>\n <AxisDescription>\n <name>Band</name>\n <label>Band Numbers</label>\n <values>\n <singleValue>1</singleValue>\n </values>\n </AxisDescription>\n </axisDescription>\n </RangeSet>\n </rangeSet>\n <supportedCRSs>\n <requestResponseCRSs>EPSG:4326</requestResponseCRSs>\n <requestResponseCRSs>EPSG:102039</requestResponseCRSs>\n <nativeCRSs>EPSG:102039</nativeCRSs>\n </supportedCRSs>\n <supportedFormats nativeFormat=\"GeoTIFF\">\n <formats>GeoTIFF</formats>\n <formats>NITF</formats>\n <formats>HDF</formats>\n <formats>JPEG2000</formats>\n </supportedFormats>\n <supportedInterpolations default=\"nearest neighbor\">\n <interpolationMethod>nearest neighbor</interpolationMethod>\n <interpolationMethod>bilinear</interpolationMethod>\n <interpolationMethod>bicubic</interpolationMethod>\n </supportedInterpolations>\n</CoverageOffering>\n</CoverageDescription>\n\n" ] ], [ [ "#### DescribeCoverage results\nFrom the DescribeCoverage response, we can see information about the available bounding box for the layer, the Coordinate Reference System (CRS), the download data types (e.g. GeoTiff), and more. We'll use this information to help formulate a successful WCS GetCoverage request to download some actual data.\n\n### GetCoverage\nWith the results from DescribeCoverage, we now know everything we need to know in order to download some data using a GetCoverage request.\nOne unfortunate aspect of WCS version 1.0.0 is that you must provide the resolution or number of pixels of your downloaded output file; you cannot omit this and simply ask for the download file to be in the native resolution of the source data. This means that you may need to perform some additional math to calculate the native resolution on your own before downloading the file. For the purposes of this tutorial, we'll simply download a 500 by 100 pixel image of the continental United States. ", "_____no_output_____" ] ], [ [ "import requests\n\n# base WCS server URL\nwcs_base_url = \"https://landfire.cr.usgs.gov/arcgis/services/Landfire/US_140/MapServer/WCSServer\"\n\n# add on to the base WCS server URL to define GetCoverage URL\nwcs_get_coverage_url = wcs_base_url + \"?SERVICE=WCS&VERSION=1.0.0&REQUEST=GetCoverage&FORMAT=GeoTIFF&COVERAGE=US_VDIST2014&BBOX=-127.98775263969655,22.765446426860603,-65.254445466369276,51.649681015029245&CRS=EPSG:4326&WIDTH=500&HEIGHT=100\"\n\n# perform an HTTP GET request with the DescribeCoverage URL\nwcs_get_coverage_response = requests.get(wcs_get_coverage_url)\n\n# download the resulting response image to disk\nif wcs_get_coverage_response.status_code == 200:\n with open(\"wcs-example.tif\", 'wb') as f:\n f.write(wcs_get_coverage_response.content)", "_____no_output_____" ] ], [ [ "#### GetCoverage results\nAt this point you should have a downloaded GeoTiff file called `wcs-example.tif`. Once you use the GetCapabilities and DescribeCoverage requests to dial in the types of GetCoverage requests you want to perform, you can simply tweak your GetCoverage requests to perform these types of requests in an automated fashion on your code.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0babd5865f2b57c5693b9a5b045ad2e4c3a7621
64,065
ipynb
Jupyter Notebook
Untitled.ipynb
williamcottrell72/williamcottrell72.github.io
09494ba3bc0e0d7c8070b3c7f6f22d688ca54d7a
[ "MIT" ]
null
null
null
Untitled.ipynb
williamcottrell72/williamcottrell72.github.io
09494ba3bc0e0d7c8070b3c7f6f22d688ca54d7a
[ "MIT" ]
null
null
null
Untitled.ipynb
williamcottrell72/williamcottrell72.github.io
09494ba3bc0e0d7c8070b3c7f6f22d688ca54d7a
[ "MIT" ]
null
null
null
121.565465
29,620
0.882291
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Method-I:-For-Loop\" data-toc-modified-id=\"Method-I:-For-Loop-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Method I: For Loop</a></span></li><li><span><a href=\"#Method-II:-List-Comprehension\" data-toc-modified-id=\"Method-II:-List-Comprehension-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Method II: List Comprehension</a></span></li><li><span><a href=\"#Method-III:-Combining\" data-toc-modified-id=\"Method-III:-Combining-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Method III: Combining</a></span></li></ul></div>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport os\nimport random\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "arr=[1,2,3,4,5,6,6,7,7,7,7,8,5,3,2]\nnp_arr=np.array(arr)", "_____no_output_____" ], [ "np_arr", "_____no_output_____" ] ], [ [ "Note: () for functions, [] for calling elements of a list or array.", "_____no_output_____" ] ], [ [ "def my_function(x):\n return x**2", "_____no_output_____" ], [ "my_function(10)", "_____no_output_____" ] ], [ [ "# Method I: For Loop", "_____no_output_____" ] ], [ [ "arr1=[]\nfor i in range(100):\n arr1.append(i**(1/2)+1+i**1.2)\n ", "_____no_output_____" ] ], [ [ "plt.plot(Xvalues,Yvalues,....properties)\n\nplt.show()", "_____no_output_____" ] ], [ [ "plt.plot(range(100),arr1)\nplt.show()", "_____no_output_____" ], [ "arr2=[]\nfor i in range(1000):\n arr2.append(random.random())", "_____no_output_____" ], [ "plt.plot(range(1000),arr2)\nplt.show()", "_____no_output_____" ] ], [ [ "# Method II: List Comprehension", "_____no_output_____" ] ], [ [ "arr3=[i**(1/2) for i in range(100)]", "_____no_output_____" ], [ "plt.plot(range(100),arr3)", "_____no_output_____" ], [ "arr4=[random.random() for i in range(100)]", "_____no_output_____" ], [ "arr4", "_____no_output_____" ] ], [ [ "# Method III: Combining", "_____no_output_____" ] ], [ [ "len(arr3+arr4), len(arr3),len(arr4)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0bac22acd5177f129f05bc143413107e226be74
17,668
ipynb
Jupyter Notebook
jupyter_notebooks/pandas/mastering_data_analysis/07. Time Series/06. Python datetime module.ipynb
manual123/Nacho-Jupyter-Notebooks
e75523434b1a90313a6b44e32b056f63de8a7135
[ "MIT" ]
2
2021-02-13T05:52:05.000Z
2022-02-08T09:52:35.000Z
pandas/mastering_data_analysis/07. Time Series/06. Python datetime module.ipynb
manual123/Nacho-Jupyter-Notebooks
e75523434b1a90313a6b44e32b056f63de8a7135
[ "MIT" ]
null
null
null
pandas/mastering_data_analysis/07. Time Series/06. Python datetime module.ipynb
manual123/Nacho-Jupyter-Notebooks
e75523434b1a90313a6b44e32b056f63de8a7135
[ "MIT" ]
null
null
null
23.843455
375
0.566221
[ [ [ "# Python datetime module\n\nWe will look at an important standard library, the [datetime library][1] which contains many powerful functions to support date, time and datetime manipulation. Pandas does not rely on this object and instead creates its own, a `Timestamp`, discussed in other notebooks.\n\nThe datetime library is part of the standard library, so it comes shipped along with every Python installation. Let's get started by importing it into our namespace.\n\n[1]: https://docs.python.org/3/library/datetime.html", "_____no_output_____" ] ], [ [ "import datetime", "_____no_output_____" ] ], [ [ "## Create a date, a time and a datetime\nThe datetime module provides three separate objects for dates, times, and datetimes. Let's use the `date` type to construct a date. It takes three integers, the year, month and day. Here we create the date April 11, 2016.", "_____no_output_____" ] ], [ [ "my_date = datetime.date(2016, 4, 11)\nmy_date", "_____no_output_____" ], [ "type(my_date)", "_____no_output_____" ] ], [ [ "Use the `time` type to construct a time. It takes 4 integers - hours, minutes, seconds, and microseconds (one millionth of a second). Here we create the time 10:54:32.034512", "_____no_output_____" ] ], [ [ "my_time = datetime.time(10, 54, 32, 34512)\nmy_time", "_____no_output_____" ], [ "type(my_time)", "_____no_output_____" ] ], [ [ "Only the hour component is mandatory. For instance, we can create the time 5:44 with this:", "_____no_output_____" ] ], [ [ "datetime.time(5, 44)", "_____no_output_____" ] ], [ [ "Or you can specify just a particular component of time.", "_____no_output_____" ] ], [ [ "datetime.time(second=34)", "_____no_output_____" ] ], [ [ "Finally, we can construct a datetime with the `datetime` type, which takes up to 7 parameters - three for the date, and four for the time.", "_____no_output_____" ] ], [ [ "my_datetime = datetime.datetime(2016, 4, 11, 10, 54, 32, 34512)\nmy_datetime", "_____no_output_____" ], [ "type(my_datetime)", "_____no_output_____" ] ], [ [ "### Format changes when printed to the screen\nPrinting the objects from above to the screen provides a more readable view.", "_____no_output_____" ] ], [ [ "print(my_date)\nprint(my_time)\nprint(my_datetime)", "_____no_output_____" ] ], [ [ "## Attributes of date, time, and datetimes\nEach individual component of the date, time, and datetime is available as an attribute.", "_____no_output_____" ] ], [ [ "my_date.year", "_____no_output_____" ], [ "my_date.month", "_____no_output_____" ], [ "my_date.day", "_____no_output_____" ], [ "my_time.hour", "_____no_output_____" ], [ "my_time.minute", "_____no_output_____" ], [ "my_time.second", "_____no_output_____" ], [ "my_datetime.day", "_____no_output_____" ], [ "my_datetime.microsecond", "_____no_output_____" ], [ "my_date.weekday()", "_____no_output_____" ] ], [ [ "## Methods of date, time, and datetimes\nSeveral methods exist for each of these objects. The methods that begin with ISO represent the [International Standards Organization][1] formatting rules for dates, times, and datetimes. The particular standard here is [ISO 8601][2]. Python will return according to this standard.\n\n[1]: https://www.iso.org/home.html\n[2]: https://en.wikipedia.org/wiki/ISO_8601", "_____no_output_____" ] ], [ [ "my_date.weekday()", "_____no_output_____" ], [ "my_date.isoformat()", "_____no_output_____" ], [ "my_date.isocalendar()", "_____no_output_____" ], [ "my_time.isoformat()", "_____no_output_____" ], [ "my_datetime.isoformat()", "_____no_output_____" ], [ "# get the date from a datetime\nmy_datetime.date()", "_____no_output_____" ], [ "# get the time from a datetime\nmy_datetime.time()", "_____no_output_____" ] ], [ [ "## Alternate Constructors\nYou can create dates and datetimes from a single integer which represents the number of seconds since the Unix epoch, January 1, 1970 UTC. UTC is the timezone, [Coordinated Universal Time][1] and is 0 degrees longitude or 5 hours ahead of Easter Standard Time.\n\nPassing the integer 0 to the `fromtimestamp` datetime constructor will return a datetime at the Unix epoch adjusted to your local timezone. If you are located in EST, then you will get returned December 31, 1969 7 p.m.\n\n[1]: https://en.wikipedia.org/wiki/Coordinated_Universal_Time", "_____no_output_____" ] ], [ [ "datetime.datetime.fromtimestamp(0)", "_____no_output_____" ], [ "# 1 billion seconds from the unix epoch\ndatetime.datetime.fromtimestamp(10 ** 9)", "_____no_output_____" ] ], [ [ "The date type also has this constructor, but not time.", "_____no_output_____" ] ], [ [ "# also works for date\ndatetime.date.fromtimestamp(10 ** 9)", "_____no_output_____" ] ], [ [ "Can get todays date or datetime:", "_____no_output_____" ] ], [ [ "datetime.date.today()", "_____no_output_____" ], [ "datetime.datetime.now()", "_____no_output_____" ] ], [ [ "### Constructing from strings\nThe `strptime` alternate datetime constructor has the ability to convert a string into a datetime. In addition to the string, you must pass it a specific **format** to alert the constructor which part of the string corresponds to which component of the datetime. There are special character codes called **directives** which must be used to form this correspondence.\n\n## Directives\nAll the directives can be found in the official [Python documentation for the datetime module][1]. Below are some common ones.\n\n* **%y** - two digit year\n* **%Y** - four digit year\n* **%m** - Month\n* **%d** - Day of the month \n* **%H** - Hour (24-hour clock)\n* **%I** - Hour (12-hour clock)\n* **%M** - Minute\n\n[1]: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior", "_____no_output_____" ], [ "### Examples of string parsing to datetimes\nThe `strptime` alternate constructor stands for string parse time (though it only parses datetimes). You must create a string with the correct directives that represents the format of the date string you are trying to convert to a datetime. For instance, the string '2016-10-22' can use the format '%Y-%m-%d' to parse it correctly.", "_____no_output_____" ] ], [ [ "s = '2016-10-22'\nfmt = '%Y-%m-%d'\ndatetime.datetime.strptime(s, fmt)", "_____no_output_____" ], [ "s = '2016/1/22 5:32:44'\nfmt = '%Y/%m/%d %H:%M:%S'\ndatetime.datetime.strptime(s, fmt)", "_____no_output_____" ], [ "s = 'January 23, 2019 5:22 PM'\nfmt = '%B %d, %Y %H:%M %p'\ndatetime.datetime.strptime(s, fmt)", "_____no_output_____" ], [ "s = 'On January the 23rd 2019 at 5:22 PM'\nfmt = 'On %B the %drd %Y at %H:%M %p'\ndatetime.datetime.strptime(s, fmt)", "_____no_output_____" ] ], [ [ "### Converting datetimes to string\nThe **strftime** method converts a date, time, or datetime to a string. It stands for **string format time**. Begin with a date, time, or datetime and use a string with directives to make the conversion.", "_____no_output_____" ] ], [ [ "# Convert directly into a string of your choice. Lookup directives online\nmy_date.strftime(\"%Y-%m-%d\")", "_____no_output_____" ], [ "# Another more involved directive\nmy_date.strftime(\"Remembering back to %A, %B %d, %Y.... What a fantastic day that was.\")", "_____no_output_____" ] ], [ [ "## Date and Datetime addition\nIt's possible to add an amount of time to a date or datetime object using the timedelta function. timedelta simply produces some amount of time measured in days, seconds and microseconds. You can then use this object to add to date or datetime objects.\n\n**`timedelta`** objects are constructed with the following definition: \n\n**`timedelta(days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0)`**", "_____no_output_____" ] ], [ [ "my_timedelta = datetime.timedelta(seconds=5000) \nmy_timedelta", "_____no_output_____" ], [ "type(my_timedelta)", "_____no_output_____" ], [ "# add to datetime\nmy_datetime + my_timedelta", "_____no_output_____" ], [ "# original\nmy_datetime", "_____no_output_____" ], [ "# add to date\nmy_date + my_timedelta", "_____no_output_____" ], [ "# original date. Nothing changed since 5000 seconds wasn't long enough to make an extra day\nmy_date", "_____no_output_____" ], [ "# now there is a change\nmy_date + datetime.timedelta(days = 5)", "_____no_output_____" ], [ "# add weeks\na = my_datetime + datetime.timedelta(weeks=72, days=4, hours=44)", "_____no_output_____" ], [ "# the difference between the underlying string representation and the print function\nprint(a.__repr__())\nprint(a)", "_____no_output_____" ], [ "datetime.timedelta(weeks=72, days=4, hours=44)", "_____no_output_____" ] ], [ [ "## Third-Party library `dateutil`\nFor improved datetime handling, you can use [dateutil][1], a more advanced third-party library. Pandas actually uses this library to for its complex date handling. Two of it's most useful features are string parsing and datetime addition.\n\n### Advanced string handling\n\nThe `parse` function handles a wide variety of strings. It returns the same datetime type from above. See [many more examples][2] in the documentation.\n\n[1]: https://dateutil.readthedocs.io/en/stable/\n[2]: https://dateutil.readthedocs.io/en/stable/examples.html#parse-examples", "_____no_output_____" ] ], [ [ "from dateutil.parser import parse", "_____no_output_____" ], [ "parse('Jan 3, 2003 and 5:22')", "_____no_output_____" ] ], [ [ "Pandas uses this under the hood.", "_____no_output_____" ] ], [ [ "import pandas as pd\npd.Timestamp('Jan 3, 2003 and 5:22')", "_____no_output_____" ] ], [ [ "### Advanced datetime addition\nAn upgrade to the **`timedelta`** class exists with the **`relativedelta`** class. Check [this stackoverflow][1] post for more detail or see the [documentation for examples][2].\n\n[1]: http://stackoverflow.com/questions/12433233/what-is-the-difference-between-datetime-timedelta-and-dateutil-relativedelta\n[2]: https://dateutil.readthedocs.io/en/stable/relativedelta.html#examples", "_____no_output_____" ] ], [ [ "from dateutil.relativedelta import relativedelta", "_____no_output_____" ] ], [ [ "There are two ways to use it. First, you can pass it two datetimes to find the difference between the two.", "_____no_output_____" ] ], [ [ "dt1 = datetime.datetime(2016, 1, 20, 5, 33)\ndt2 = datetime.datetime(2018, 3, 20, 6, 22)\nrelativedelta(dt1, dt2)", "_____no_output_____" ] ], [ [ "Second, create an amount of time with the parameters years, months, weeks, days, etc... and then add that to a datetime.", "_____no_output_____" ] ], [ [ "rd = relativedelta(months=3)", "_____no_output_____" ], [ "dt1 + rd", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0bad0d8971080631d8ae7df29f3374360485c44
237,722
ipynb
Jupyter Notebook
clustering.ipynb
DOANDUYDAT/graph-clustering
f5408b05bb3439b7cbb900e6d845f7d45a0ebefb
[ "MIT" ]
2
2020-05-30T07:14:47.000Z
2020-05-31T02:23:24.000Z
clustering.ipynb
DOANDUYDAT/graph-clustering
f5408b05bb3439b7cbb900e6d845f7d45a0ebefb
[ "MIT" ]
1
2021-08-23T20:46:13.000Z
2021-08-23T20:46:13.000Z
clustering.ipynb
DOANDUYDAT/graph-clustering
f5408b05bb3439b7cbb900e6d845f7d45a0ebefb
[ "MIT" ]
null
null
null
402.918644
73,403
0.737513
[ [ [ "import numpy as np\nimport pandas as pd\nimport time\nimport os\n\nfrom pyspark.ml.clustering import KMeans\nfrom pyspark.ml.evaluation import ClusteringEvaluator\nfrom pyspark.ml.linalg import Vectors\n\nfrom matplotlib import pyplot as plt\n\nfrom pyspark.sql import SparkSession\n# from pyspark.ml.clustering import KMeans, KMeansModel\n\nimport networkx as nx # thư viện để tạo, thao tác, học cấu trúc.... của các mạng phức tạp\nimport seaborn as sns # thư viện để trực quan hóa dữ liệu\n\nsns.set_style('darkgrid', {'axes.facecolor': '.9'})\nsns.set_palette(palette='deep')\nsns_c = sns.color_palette(palette='deep')\n\nfloat_formatter = lambda x: \"%.6f\" % x\n\nnp.set_printoptions(formatter={'float_kind':float_formatter})\n", "_____no_output_____" ], [ "def draw_graph(G):\n pos = nx.spring_layout(G)\n nx.draw_networkx_nodes(G, pos, node_size=10)\n # nx.draw_networkx_labels(G, pos)\n nx.draw_networkx_edges(G, pos, width=0.1, alpha=0.5)\n\n\ndef draw_graph_cluster(G, labels):\n pos = nx.spring_layout(G)\n nx.draw(\n G,\n pos,\n node_color=node_colors,\n node_size=10,\n width=0.1,\n alpha=0.5,\n with_labels=False,\n )\n\ndef get_node_color(label):\n switcher = {\n 0: 'red',\n 1: 'blue',\n 2: 'orange',\n 3: 'gray',\n 4: 'violet',\n 5: 'pink',\n 6: 'purple',\n 7: 'brown',\n 8: 'yellow',\n 9: 'lime',\n 10: 'cyan'\n }\n return switcher.get(label, 'Invalid label')", "_____no_output_____" ], [ "spark = SparkSession.builder \\\n .master(\"local\") \\\n .appName(\"CLustering\") \\\n .config(\"spark.some.config.option\", \"some-value\") \\\n .getOrCreate()\nspark", "_____no_output_____" ], [ "base_path = os.getcwd()\n# file_input = base_path + \"/facebook_combined.txt\" \nfile_input = base_path + \"/ChG-Miner_miner-chem-gene.tsv\"\nfile_input", "_____no_output_____" ], [ "pdf = pd.read_table(file_input, sep='\\t', names=['src', 'dst'])\npdf.head()", "_____no_output_____" ], [ "pdf = pdf.to_numpy()", "_____no_output_____" ], [ "G = nx.Graph()\nG.add_edges_from(pdf)", "_____no_output_____" ], [ "len(G.nodes())", "_____no_output_____" ], [ "len(G.edges())", "_____no_output_____" ], [ "# adjacency matrix\nW = nx.adjacency_matrix(G)\nprint(W.todense())", "[[0 1 0 ... 0 0 0]\n [1 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n" ], [ "# degree matrix\nD = np.diag(np.sum(np.array(W.todense()), axis=1))\nprint(D)", "[[ 5 0 0 ... 0 0 0]\n [ 0 15 0 ... 0 0 0]\n [ 0 0 3 ... 0 0 0]\n ...\n [ 0 0 0 ... 1 0 0]\n [ 0 0 0 ... 0 1 0]\n [ 0 0 0 ... 0 0 1]]\n" ], [ "# Laplacian matrix\nL = D - W\nprint(L)", "[[ 5 -1 0 ... 0 0 0]\n [-1 15 0 ... 0 0 0]\n [ 0 0 3 ... 0 0 0]\n ...\n [ 0 0 0 ... 1 0 0]\n [ 0 0 0 ... 0 1 0]\n [ 0 0 0 ... 0 0 1]]\n" ], [ "# eigenvalues, eigenvector\neigenvals, eigenvcts = np.linalg.eigh(L)", "_____no_output_____" ], [ "eigenvcts", "_____no_output_____" ], [ "eigenvals", "_____no_output_____" ], [ "eigenvals_sorted_indices = np.argsort(eigenvals)\neigenvals_sorted = eigenvals[eigenvals_sorted_indices]", "_____no_output_____" ], [ "eigenvals_sorted_indices", "_____no_output_____" ], [ "eigenvals_sorted", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(10, 6))\nsns.lineplot(x=range(1, eigenvals_sorted_indices.size + 1), y=eigenvals_sorted, ax=ax)\nax.set(title='Sorted Eigenvalues Graph Laplacian', xlabel='index', ylabel=r'$\\lambda$')", "_____no_output_____" ], [ "index_lim = 250\n\nfig, ax = plt.subplots(figsize=(10, 6))\nsns.scatterplot(x=range(1, eigenvals_sorted_indices[: index_lim].size + 1), y=eigenvals_sorted[: index_lim], s=80, ax=ax)\nsns.lineplot(x=range(1, eigenvals_sorted_indices[: index_lim].size + 1), y=eigenvals_sorted[: index_lim], alpha=0.5, ax=ax)\nax.axvline(x=1, color=sns_c[3], label='zero eigenvalues', linestyle='--')\nax.legend()\nax.set(title=f'Sorted Eigenvalues Graph Laplacian (First {index_lim})', xlabel='index', ylabel=r'$\\lambda$')", "_____no_output_____" ], [ "zero_eigenvals_index = np.argwhere(abs(eigenvals) < 0.02)\nzero_eigenvals_index.squeeze()", "_____no_output_____" ], [ "proj_df = pd.DataFrame(eigenvcts[:, zero_eigenvals_index.squeeze()[206]])\n# proj_df = proj_df.transpose()\nproj_df = proj_df.rename(columns={0: 'features'})\nproj_df", "_____no_output_____" ], [ "U = []\nfor x in proj_df['features']:\n U.append(Vectors.dense(x))\npdf_train = pd.DataFrame(U, columns=['features'])\ndf = spark.createDataFrame(pdf_train)\ndisplay(df)", "_____no_output_____" ], [ "# train k-means model\ncost = np.zeros(15)\nfor i in range(2,15):\n kmeans = KMeans(k=i, seed=1)\n model = kmeans.fit(df)\n cost[i] = model.computeCost(df) # requires Spark 2.0 or later", "_____no_output_____" ], [ "fig, ax = plt.subplots(1,1, figsize =(8,6))\nax.plot(range(2,15),cost[2:15])\nax.set_xlabel('k')\nax.set_ylabel('cost')\nplt.show()", "_____no_output_____" ], [ "# train\nkmeans = KMeans(k=9, seed=1)\nmodel = kmeans.fit(df)\n\n# Make predictions\npredictions = model.transform(df)\nrows = predictions.select(\"features\",\"prediction\").collect()\n\n# Evaluate clustering by computing Silhouette score\nevaluator = ClusteringEvaluator()\nsilhouette = evaluator.evaluate(predictions)\nsilhouette", "_____no_output_____" ], [ "rows", "_____no_output_____" ], [ "node_colors = []\nfor label in rows:\n node_colors.append(get_node_color(label.prediction))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0bae3543d8fd37e74e601b5c00981a9e1fcb7a7
916
ipynb
Jupyter Notebook
experiments/lista.ipynb
ksteensig/channel-estimation
30b1133ade2527b772a51dc5294cd4be71abd5b0
[ "MIT" ]
6
2020-12-15T11:25:01.000Z
2022-03-27T06:16:52.000Z
experiments/lista.ipynb
ksteensig/channel-estimation
30b1133ade2527b772a51dc5294cd4be71abd5b0
[ "MIT" ]
null
null
null
experiments/lista.ipynb
ksteensig/channel-estimation
30b1133ade2527b772a51dc5294cd4be71abd5b0
[ "MIT" ]
3
2021-04-01T18:04:04.000Z
2022-01-27T16:42:38.000Z
19.083333
61
0.546943
[ [ [ "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \n\nfrom tensorflow import keras\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
d0bae44589820d9c444b049de2f76fa29e47e27f
25,161
ipynb
Jupyter Notebook
VacationPy/VacationPy.ipynb
jfaccioli/python-api-challenge
f5205796b1ce4af9377b7131c9cf0ef565145f7e
[ "ADSL" ]
null
null
null
VacationPy/VacationPy.ipynb
jfaccioli/python-api-challenge
f5205796b1ce4af9377b7131c9cf0ef565145f7e
[ "ADSL" ]
null
null
null
VacationPy/VacationPy.ipynb
jfaccioli/python-api-challenge
f5205796b1ce4af9377b7131c9cf0ef565145f7e
[ "ADSL" ]
null
null
null
31.333748
160
0.382934
[ [ [ "# VacationPy\n----\n\n#### Note\n* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.\n\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.", "_____no_output_____" ] ], [ [ "# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport gmaps\nimport os\n\n# Import API key\nfrom api_keys import g_key", "_____no_output_____" ] ], [ [ "### Store Part I results into DataFrame\n* Load the csv exported in Part I to a DataFrame", "_____no_output_____" ] ], [ [ "# Import World Happiness Report Data 2021\ncity_data = pd.read_csv(\"../WeatherPy/city_data.csv\")\ncity_data\n", "_____no_output_____" ] ], [ [ "### Humidity Heatmap\n* Configure gmaps.\n* Use the Lat and Lng as locations and Humidity as the weight.\n* Add Heatmap layer to map.", "_____no_output_____" ] ], [ [ "# Configure gmaps\ngmaps.configure(api_key=g_key)\n\n# Store 'Lat' and 'Lng' into locations \nlocations = city_data[[\"Lat\", \"Lng\"]].astype(float)\n\nhumidity = city_data['Humidity']", "_____no_output_____" ], [ "# Create a Humidity Heatmap\nfig = gmaps.figure(center=(0, 0), zoom_level=2)\n\nhumidity_heatmap = gmaps.heatmap_layer(locations, weights=humidity, \n dissipating=False, max_intensity=100,\n point_radius = 1)\n\n\nfig.add_layer(humidity_heatmap)\n\nfig", "_____no_output_____" ] ], [ [ "### Create new DataFrame fitting weather criteria\n* Narrow down the cities to fit weather conditions.\n* Drop any rows will null values.", "_____no_output_____" ] ], [ [ "# Dropna in city_data\ncity_data_narrowed = city_data.dropna()\n\n# Narrow down the cities to fit weather conditions\ncity_data_narrowed = city_data_narrowed[(city_data_narrowed['Max Temp'] > 70) & (city_data_narrowed['Max Temp'] < 80)]\ncity_data_narrowed = city_data_narrowed[city_data_narrowed['Wind Speed'] <= 10]\ncity_data_narrowed = city_data_narrowed[city_data_narrowed['Cloudiness'] == 0]\n\ncity_data_narrowed", "_____no_output_____" ] ], [ [ "### Hotel Map\n* Store into variable named `hotel_df`.\n* Add a \"Hotel Name\" column to the DataFrame.\n* Set parameters to search for hotels with 5000 meters.\n* Hit the Google Places API for each city's coordinates.\n* Store the first Hotel result into the DataFrame.\n* Plot markers on top of the heatmap.", "_____no_output_____" ] ], [ [ "# Store into variable named hotel_df\nhotel_df = city_data_narrowed\n\n# Add \"Hotel Name\" column\nhotel_df['Hotel Name'] = \"\"\n\nhotel_df", "_____no_output_____" ], [ "# Set parameters to search for hotels with 5000 meters\nbase_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?\"\n\nparams = {\"type\" : \"hotel\",\n \"keyword\" : \"hotel\",\n \"radius\" : 5000,\n \"key\" : g_key}\n", "_____no_output_____" ], [ "# Hit the Google Places API for each city's coordinates\n\n# Import time function \nimport time\n\n# Print header\nprint('Beginning Data Retrieval')\nprint('------------------------------------------------------------')\n\n# Loop through the hotel_df Dataframe with the iterrows function\nfor index, row in hotel_df.iterrows():\n lat = row['Lat']\n lng = row['Lng']\n city_name = row['City']\n \n params['location'] = f'{lat},{lng}'\n \n print('Processing Record for {} (Index {}):'.format(city_name, index))\n \n response = requests.get(base_url, params=params).json()\n results = response['results']\n \n try:\n hotel_df.loc[index, 'Hotel Name'] = results[0]['name']\n print('The closest hotel in {} is {}'.format(city_name, results[0]['name']))\n \n except (KeyError, IndexError):\n print('City not found. Skipping...')\n print(\"------------------------------------------------------------\")\n \n # Limit waiting time\n time.sleep(1)\n \n pass\n \n print(\"------------------------------------------------------------\")\n \n# Print footer\nprint('------------------------------------------------------------')\nprint('Data Retrieval Complete ')\nprint('------------------------------------------------------------')", "Beginning Data Retrieval\n------------------------------------------------------------\nProcessing Record for Boueni (Index 90):\nThe closest hotel in Boueni is LE CHISSIOUA\n------------------------------------------------------------\nProcessing Record for Barkhan (Index 335):\nThe closest hotel in Barkhan is National Bank of Pakistan (NBP)\n------------------------------------------------------------\nProcessing Record for Riyadh (Index 339):\nThe closest hotel in Riyadh is Four Seasons Hotel Riyadh At Kingdom Center\n------------------------------------------------------------\nProcessing Record for Suzhou (Index 381):\nThe closest hotel in Suzhou is Suzhou Marriott Hotel\n------------------------------------------------------------\nProcessing Record for Algiers (Index 396):\nThe closest hotel in Algiers is Sofitel Algiers Hamma Garden\n------------------------------------------------------------\n------------------------------------------------------------\nData Retrieval Complete \n------------------------------------------------------------\n" ], [ "# NOTE: Do not change any of the code in this cell\n\n# Using the template add the hotel marks to the heatmap\ninfo_box_template = \"\"\"\n<dl>\n<dt>Name</dt><dd>{Hotel Name}</dd>\n<dt>City</dt><dd>{City}</dd>\n<dt>Country</dt><dd>{Country}</dd>\n</dl>\n\"\"\"\n# Store the DataFrame Row\n# NOTE: be sure to update with your DataFrame name\nhotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]\nlocations = hotel_df[[\"Lat\", \"Lng\"]]", "_____no_output_____" ], [ "# Add marker layer ontop of heat map\nmarkers = gmaps.marker_layer(locations, info_box_content = hotel_info)\nfig.add_layer(markers)\n\n# Display figure\nfig\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d0bae5167dc40faf193861c2dcd36f4ea34f210d
45,763
ipynb
Jupyter Notebook
notebooks/recommendation_systems/labs/3_als_bqml_hybrid.ipynb
jfesteban/Google-ASL
8e991a437e348b1950cdc351dba39e2d40a6b08f
[ "Apache-2.0" ]
null
null
null
notebooks/recommendation_systems/labs/3_als_bqml_hybrid.ipynb
jfesteban/Google-ASL
8e991a437e348b1950cdc351dba39e2d40a6b08f
[ "Apache-2.0" ]
null
null
null
notebooks/recommendation_systems/labs/3_als_bqml_hybrid.ipynb
jfesteban/Google-ASL
8e991a437e348b1950cdc351dba39e2d40a6b08f
[ "Apache-2.0" ]
null
null
null
38.914116
1,494
0.517383
[ [ [ "# Hybrid Recommendations with the Movie Lens Dataset", "_____no_output_____" ], [ "__Note:__ It is recommended that you complete the companion __als_bqml.ipynb__ notebook before continuing with this __als_bqml_hybrid.ipynb__ notebook. This is, however, not a requirement for this lab as you have the option to bring over the dataset + trained model. If you already have the movielens dataset and trained model you can skip the \"Import the dataset and trained model\" section.\n\n## Learning Objectives\n1. Know extract user and product factors from a BigQuery Matrix Factorizarion Model\n2. Know how to format inputs for a BigQuery Hybrid Recommendation Model\n", "_____no_output_____" ] ], [ [ "import os\nimport tensorflow as tf\nPROJECT = \"qwiklabs-gcp-04-8722038efd75\" # REPLACE WITH YOUR PROJECT ID\n\n# Do not change these\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"TFVERSION\"] = '2.1'", "_____no_output_____" ] ], [ [ "## Import the dataset and trained model\nIn the previous notebook, you imported 20 million movie recommendations and trained an ALS model with BigQuery ML\n\nTo save you the steps of having to do so again (if this is a new environment) you can run the below commands to copy over the clean data and trained model.", "_____no_output_____" ], [ "First create the BigQuery dataset and copy over the data", "_____no_output_____" ] ], [ [ "!bq mk movielens", "BigQuery error in mk operation: Dataset 'qwiklabs-gcp-04-8722038efd75:movielens'\nalready exists.\n" ], [ "%%bigquery\nCREATE OR REPLACE TABLE movielens.ratings AS\nSELECT * FROM `cloud-training-demos`.movielens.ratings;\n\nCREATE OR REPLACE TABLE movielens.movies AS \nSELECT * FROM `cloud-training-demos`.movielens.movies;", "Executing query with job ID: 5796f872-80ab-436a-9761-47a4960e7621\nQuery executing: 0.33s" ] ], [ [ "Next, copy over the trained recommendation model. Note that if you're project is in the EU you will need to change the location from US to EU below. Note that as of the time of writing you cannot copy models across regions with `bq cp`.", "_____no_output_____" ] ], [ [ "%%bash\nbq --location=US cp \\\ncloud-training-demos:movielens.recommender_16 \\\nmovielens.recommender_16", "Table 'cloud-training-demos:movielens.recommender_16' successfully copied to 'qwiklabs-gcp-04-8722038efd75:movielens.recommender_16'\n" ] ], [ [ "Next, ensure the model still works by invoking predictions for movie recommendations:", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nSELECT * FROM\nML.PREDICT(MODEL `movielens.recommender_16`, (\n SELECT \n movieId, title, 903 AS userId\n FROM movielens.movies, UNNEST(genres) g\n WHERE g = 'Comedy'\n))\nORDER BY predicted_rating DESC\nLIMIT 5", "_____no_output_____" ] ], [ [ "### Incorporating user and movie information \nThe matrix factorization approach does not use any information about users or movies beyond what is available from the ratings matrix. However, we will often have user information (such as the city they live, their annual income, their annual expenditure, etc.) and we will almost always have more information about the products in our catalog. How do we incorporate this information in our recommendation model?\n\nThe answer lies in recognizing that the user factors and product factors that result from the matrix factorization approach end up being a concise representation of the information about users and products available from the ratings matrix. We can concatenate this information with other information we have available and train a regression model to predict the rating.\n### Obtaining user and product factors\nWe can get the user factors or product factors from ML.WEIGHTS. For example to get the product factors for movieId=96481 and user factors for userId=54192, we would do:", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nSELECT \n processed_input,\n feature,\n TO_JSON_STRING(factor_weights),\n intercept\nFROM ML.WEIGHTS(MODEL movielens.recommender_16)\nWHERE\n (processed_input = 'movieId' AND feature = '96481')\n OR (processed_input = 'userId' AND feature = '54192')", "_____no_output_____" ] ], [ [ "Multiplying these weights and adding the intercept is how we get the predicted rating for this combination of movieId and userId in the matrix factorization approach.\n\nThese weights also serve as a low-dimensional representation of the movie and user behavior. We can create a regression model to predict the rating given the user factors, product factors, and any other information we know about our users and products.\n### Creating input features\nThe MovieLens dataset does not have any user information, and has very little information about the movies themselves. To illustrate the concept, therefore, let’s create some synthetic information about users:\n", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nCREATE OR REPLACE TABLE movielens.users AS\nSELECT\n userId,\n RAND() * COUNT(rating) AS loyalty,\n CONCAT(SUBSTR(CAST(userId AS STRING), 0, 2)) AS postcode\nFROM\n movielens.ratings\nGROUP BY userId", "_____no_output_____" ] ], [ [ "Input features about users can be obtained by joining the user table with the ML weights and selecting all the user information and the user factors from the weights array.\n", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nWITH userFeatures AS (\n SELECT \n u.*,\n (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights)) AS user_factors\n FROM movielens.users u\n JOIN ML.WEIGHTS(MODEL movielens.recommender_16) w\n ON processed_input = 'userId' AND feature = CAST(u.userId AS STRING)\n)\n\nSELECT * FROM userFeatures\nLIMIT 5", "_____no_output_____" ] ], [ [ "Similarly, we can get product features for the movies data, except that we have to decide how to handle the genre since a movie could have more than one genre. If we decide to create a separate training row for each genre, then we can construct the product features using.", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nWITH productFeatures AS (\n SELECT \n p.* EXCEPT(genres),\n g, (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights))\n AS product_factors\n FROM movielens.movies p, UNNEST(genres) g\n JOIN ML.WEIGHTS(MODEL movielens.recommender_16) w\n ON processed_input = 'movieId' AND feature = CAST(p.movieId AS STRING)\n)\n\nSELECT * FROM productFeatures\nLIMIT 5", "_____no_output_____" ] ], [ [ "Combining these two WITH clauses and pulling in the rating corresponding the movieId-userId combination (if it exists in the ratings table), we can create the training dataset.\n\n**TODO 1**: Combine the above two queries to get the user factors and product factor for each rating.", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nCREATE OR REPLACE TABLE movielens.hybrid_dataset AS\n\n WITH userFeatures AS (\n SELECT \n u.*,\n (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights)) AS user_factors\n FROM movielens.users u\n JOIN ML.WEIGHTS(MODEL movielens.recommender_16) w\n ON processed_input = 'userId' AND feature = CAST(u.userId AS STRING)\n # TODO: Place the user features query here\n ),\n\n productFeatures AS (\n SELECT \n p.* EXCEPT(genres),\n g, (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights))\n AS product_factors\n FROM movielens.movies p, UNNEST(genres) g\n JOIN ML.WEIGHTS(MODEL movielens.recommender_16) w\n ON processed_input = 'movieId' AND feature = CAST(p.movieId AS STRING)\n # TODO: Place the product features query here\n )\n\n SELECT\n p.* EXCEPT(movieId),\n u.* EXCEPT(userId),\n rating \n FROM productFeatures p, userFeatures u\n JOIN movielens.ratings r\n ON r.movieId = p.movieId AND r.userId = u.userId", "Executing query with job ID: 5fa26fe2-8850-4f84-813e-753541f2c6f4\nQuery executing: 72.47s" ] ], [ [ "One of the rows of this table looks like this:", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nSELECT *\nFROM movielens.hybrid_dataset\nLIMIT 1", "_____no_output_____" ] ], [ [ "Essentially, we have a couple of attributes about the movie, the product factors array corresponding to the movie, a couple of attributes about the user, and the user factors array corresponding to the user. These form the inputs to our “hybrid” recommendations model that builds off the matrix factorization model and adds in metadata about users and movies.\n### Training hybrid recommendation model\nAt the time of writing, BigQuery ML can not handle arrays as inputs to a regression model. Let’s, therefore, define a function to convert arrays to a struct where the array elements are its fields:\n", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nCREATE OR REPLACE FUNCTION movielens.arr_to_input_16_users(u ARRAY<FLOAT64>)\nRETURNS \n STRUCT<\n u1 FLOAT64,\n u2 FLOAT64,\n u3 FLOAT64,\n u4 FLOAT64,\n u5 FLOAT64,\n u6 FLOAT64,\n u7 FLOAT64,\n u8 FLOAT64,\n u9 FLOAT64,\n u10 FLOAT64,\n u11 FLOAT64,\n u12 FLOAT64,\n u13 FLOAT64,\n u14 FLOAT64,\n u15 FLOAT64,\n u16 FLOAT64\n > AS (STRUCT(\n u[OFFSET(0)],\n u[OFFSET(1)],\n u[OFFSET(2)],\n u[OFFSET(3)],\n u[OFFSET(4)],\n u[OFFSET(5)],\n u[OFFSET(6)],\n u[OFFSET(7)],\n u[OFFSET(8)],\n u[OFFSET(9)],\n u[OFFSET(10)],\n u[OFFSET(11)],\n u[OFFSET(12)],\n u[OFFSET(13)],\n u[OFFSET(14)],\n u[OFFSET(15)]\n));", "_____no_output_____" ] ], [ [ "which gives:", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nSELECT movielens.arr_to_input_16_users(u).*\nFROM (SELECT\n [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.] AS u)", "_____no_output_____" ] ], [ [ "We can create a similar function named movielens.arr_to_input_16_products to convert the product factor array into named columns.\n\n**TODO 2**: Create a function that returns named columns from a size 16 product factor array.", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nCREATE OR REPLACE FUNCTION movielens.arr_to_input_16_products(p ARRAY<FLOAT64>)\nRETURNS \n STRUCT<\n p1 FLOAT64,\n p2 FLOAT64,\n p3 FLOAT64,\n p4 FLOAT64,\n p5 FLOAT64,\n p6 FLOAT64,\n p7 FLOAT64,\n p8 FLOAT64,\n p9 FLOAT64,\n p10 FLOAT64,\n p11 FLOAT64,\n p12 FLOAT64,\n p13 FLOAT64,\n p14 FLOAT64,\n p15 FLOAT64,\n p16 FLOAT64\n # TODO: Finish building this struct\n > AS (STRUCT(\n p[OFFSET(0)],\n p[OFFSET(1)],\n p[OFFSET(2)],\n p[OFFSET(3)],\n p[OFFSET(4)],\n p[OFFSET(5)],\n p[OFFSET(6)],\n p[OFFSET(7)],\n p[OFFSET(8)],\n p[OFFSET(9)],\n p[OFFSET(10)],\n p[OFFSET(11)],\n p[OFFSET(12)],\n p[OFFSET(13)],\n p[OFFSET(14)],\n p[OFFSET(15)]\n # TODO: Finish building this struct\n));", "_____no_output_____" ] ], [ [ "Then, we can tie together metadata about users and products with the user factors and product factors obtained from the matrix factorization approach to create a regression model to predict the rating:", "_____no_output_____" ] ], [ [ "%%bigquery --project $PROJECT\nCREATE OR REPLACE MODEL movielens.recommender_hybrid \nOPTIONS(model_type='linear_reg', input_label_cols=['rating'])\nAS\n\nSELECT\n * EXCEPT(user_factors, product_factors),\n movielens.arr_to_input_16_users(user_factors).*,\n movielens.arr_to_input_16_products(product_factors).*\nFROM\n movielens.hybrid_dataset", "_____no_output_____" ] ], [ [ "There is no point looking at the evaluation metrics of this model because the user information we used to create the training dataset was fake (not the RAND() in the creation of the loyalty column) -- we did this exercise in order to demonstrate how it could be done. And of course, we could train a dnn_regressor model and optimize the hyperparameters if we want a more sophisticated model. But if we are going to go that far, it might be better to consider using Auto ML tables, covered in the next section.\n", "_____no_output_____" ], [ "Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0bae6539a4789764c3198ae2374a0fba85e5d20
9,066
ipynb
Jupyter Notebook
examples/mortgage/notebooks/python/cv-mortgage-gpu.ipynb
wbo4958/spark-rapids-examples
4074d473e7bf82abeace01da4aac8a1ea312c472
[ "Apache-2.0" ]
null
null
null
examples/mortgage/notebooks/python/cv-mortgage-gpu.ipynb
wbo4958/spark-rapids-examples
4074d473e7bf82abeace01da4aac8a1ea312c472
[ "Apache-2.0" ]
null
null
null
examples/mortgage/notebooks/python/cv-mortgage-gpu.ipynb
wbo4958/spark-rapids-examples
4074d473e7bf82abeace01da4aac8a1ea312c472
[ "Apache-2.0" ]
null
null
null
30.019868
204
0.546217
[ [ [ "# Introduction to XGBoost-Spark Cross Validation with GPU\n\nThe goal of this notebook is to show you how to levarage GPU to accelerate XGBoost spark cross validatoin for hyperparameter tuning. The best model for the given hyperparameters will be returned.\n\nHere takes the application 'Mortgage' as an example.\n\nA few libraries are required for this notebook:\n 1. NumPy\n 2. cudf jar\n 2. xgboost4j jar\n 3. xgboost4j-spark jar", "_____no_output_____" ], [ "#### Import the Required Libraries", "_____no_output_____" ] ], [ [ "from ml.dmlc.xgboost4j.scala.spark import XGBoostClassificationModel, XGBoostClassifier\nfrom ml.dmlc.xgboost4j.scala.spark.rapids import CrossValidator\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nfrom pyspark.ml.tuning import ParamGridBuilder\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import FloatType, IntegerType, StructField, StructType\nfrom time import time\nimport os", "_____no_output_____" ] ], [ [ "As shown above, here `CrossValidator` is imported from package `ml.dmlc.xgboost4j.scala.spark.rapids`, not the spark's `tuning.CrossValidator`.", "_____no_output_____" ], [ "#### Create a Spark Session", "_____no_output_____" ] ], [ [ "spark = SparkSession.builder.appName(\"mortgage-cv-gpu-python\").getOrCreate()", "_____no_output_____" ] ], [ [ "#### Specify the Data Schema and Load the Data", "_____no_output_____" ] ], [ [ "label = 'delinquency_12'\nschema = StructType([\n StructField('orig_channel', FloatType()),\n StructField('first_home_buyer', FloatType()),\n StructField('loan_purpose', FloatType()),\n StructField('property_type', FloatType()),\n StructField('occupancy_status', FloatType()),\n StructField('property_state', FloatType()),\n StructField('product_type', FloatType()),\n StructField('relocation_mortgage_indicator', FloatType()),\n StructField('seller_name', FloatType()),\n StructField('mod_flag', FloatType()),\n StructField('orig_interest_rate', FloatType()),\n StructField('orig_upb', IntegerType()),\n StructField('orig_loan_term', IntegerType()),\n StructField('orig_ltv', FloatType()),\n StructField('orig_cltv', FloatType()),\n StructField('num_borrowers', FloatType()),\n StructField('dti', FloatType()),\n StructField('borrower_credit_score', FloatType()),\n StructField('num_units', IntegerType()),\n StructField('zip', IntegerType()),\n StructField('mortgage_insurance_percent', FloatType()),\n StructField('current_loan_delinquency_status', IntegerType()),\n StructField('current_actual_upb', FloatType()),\n StructField('interest_rate', FloatType()),\n StructField('loan_age', FloatType()),\n StructField('msa', FloatType()),\n StructField('non_interest_bearing_upb', FloatType()),\n StructField(label, IntegerType()),\n])\nfeatures = [ x.name for x in schema if x.name != label ]\n\n# You need to update them to your real paths!\ndataRoot = os.getenv(\"DATA_ROOT\", \"/data\")\ntrain_data = spark.read.parquet(dataRoot + '/mortgage/parquet/train')\ntrans_data = spark.read.parquet(dataRoot + '/mortgage/parquet/eval')", "_____no_output_____" ] ], [ [ "#### Build a XGBoost-Spark CrossValidator", "_____no_output_____" ] ], [ [ "# First build a classifier of GPU version using *setFeaturesCols* to set feature columns\nparams = { \n 'eta': 0.1,\n 'gamma': 0.1,\n 'missing': 0.0,\n 'treeMethod': 'gpu_hist',\n 'maxDepth': 10, \n 'maxLeaves': 256,\n 'growPolicy': 'depthwise',\n 'objective': 'binary:logistic',\n 'minChildWeight': 30.0,\n 'lambda_': 1.0,\n 'scalePosWeight': 2.0,\n 'subsample': 1.0,\n 'nthread': 1,\n 'numRound': 100,\n 'numWorkers': 1,\n}\nclassifier = XGBoostClassifier(**params).setLabelCol(label).setFeaturesCols(features)\n# Then build the evaluator and the hyperparameters\nevaluator = (MulticlassClassificationEvaluator()\n .setLabelCol(label))\nparam_grid = (ParamGridBuilder()\n .addGrid(classifier.maxDepth, [3, 6])\n .addGrid(classifier.numRound, [100, 200])\n .build())\n# Finally the corss validator\ncross_validator = (CrossValidator()\n .setEstimator(classifier)\n .setEvaluator(evaluator)\n .setEstimatorParamMaps(param_grid)\n .setNumFolds(3))", "_____no_output_____" ] ], [ [ "#### Start Cross Validation by Fitting Data to CrossValidator", "_____no_output_____" ] ], [ [ "def with_benchmark(phrase, action):\n start = time()\n result = action()\n end = time()\n print('{} takes {} seconds'.format(phrase, round(end - start, 2)))\n return result\nmodel = with_benchmark('Cross-Validation', lambda: cross_validator.fit(train_data)).bestModel", "Cross-Validation takes 88.53 seconds\n" ] ], [ [ "#### Transform On the Best Model", "_____no_output_____" ] ], [ [ "def transform():\n result = model.transform(trans_data).cache()\n result.foreachPartition(lambda _: None)\n return result\nresult = with_benchmark('Transforming', transform)\nresult.select(label, 'rawPrediction', 'probability', 'prediction').show(5)", "Transforming takes 3.13 seconds\n+--------------+--------------------+--------------------+----------+\n|delinquency_12| rawPrediction| probability|prediction|\n+--------------+--------------------+--------------------+----------+\n| 0|[2.57163572311401...|[0.92901364713907...| 0.0|\n| 0|[2.63977861404418...|[0.93337820470333...| 0.0|\n| 0|[2.50156974792480...|[0.92425179481506...| 0.0|\n| 0|[2.63977861404418...|[0.93337820470333...| 0.0|\n| 0|[2.09173870086669...|[0.89009761810302...| 0.0|\n+--------------+--------------------+--------------------+----------+\nonly showing top 5 rows\n\n" ] ], [ [ "#### Evaluation", "_____no_output_____" ] ], [ [ "accuracy = with_benchmark(\n 'Evaluation',\n lambda: MulticlassClassificationEvaluator().setLabelCol(label).evaluate(result))\nprint('Accuracy is ' + str(accuracy))", "Evaluation takes 0.29 seconds\nAccuracy is 0.9868033296704449\n" ], [ "spark.stop()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0baec1be0cd1d90b2be21551dfeefb58e52248d
491,065
ipynb
Jupyter Notebook
QuantImmunoSubtraction.ipynb
ryanlandvater/qIS
fe4f2eb9242d71552fdeaf344a0ef1b1644d4f4a
[ "RSA-MD" ]
null
null
null
QuantImmunoSubtraction.ipynb
ryanlandvater/qIS
fe4f2eb9242d71552fdeaf344a0ef1b1644d4f4a
[ "RSA-MD" ]
null
null
null
QuantImmunoSubtraction.ipynb
ryanlandvater/qIS
fe4f2eb9242d71552fdeaf344a0ef1b1644d4f4a
[ "RSA-MD" ]
null
null
null
560.576484
100,784
0.655771
[ [ [ "<a href=\"https://colab.research.google.com/github/ryanlandvater/qIS/blob/main/QuantImmunoSubtraction.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Quantitative Immuno-Subtraction Project\n---\n", "_____no_output_____" ] ], [ [ "# %matplotlib notebook\nimport sys as sys\nimport pandas as pd #import pandas for file reading\nimport matplotlib as mpl #import matplotlib for graphing\nimport matplotlib.pyplot as plt #import plot module\nimport numpy as np #import numpy for arithmetic fns\n\nimport IPython #import IPhython for settings\nfrom IPython import display as dsp #import IPython desplay\n# from dsp import clear_output #import clear output for dynamics\ndsp.set_matplotlib_formats('svg') # Create vector plots\n\nprint(\"Using Pandas to import data version:\\t\" + pd.__version__);\n# print(\"Plotting with MatPlotLib version:\\t\" + mpl.__version__);\nprint(\"Using numpy version:\\t\\t\\t\" + np.__version__);\n\n", "Using Pandas to import data version:\t1.1.5\nUsing numpy version:\t\t\t1.19.5\n" ] ], [ [ "## Ryan's Curve Fit Playground\n---\nBegin by defining the relevant functions for non-linear least-squared curve fitting using the Newton-Gauss Method.\n\n\n\n\n", "_____no_output_____" ], [ "### Defining the Partial Differential Equations\n1. Normal / Gaussian distribution ($\\rho$):\n$$ \\rho = \\frac{\\alpha e^{\\frac{-(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\pi}\\sigma} \\ \\text{where} \\ \n\\begin{aligned} \n& \\alpha = \\text{amplitude} \\\\\n& \\mu = \\text{mean} \\\\\n& \\sigma = \\text{standard deviation}\n\\end{aligned}$$ \n2. Residuals $r$ for $y$ discrete readings $\\left[0\\dots N\\right]$ taken at locations $x$ ($y_x$)\n$$ r_x = \\frac{1}{2}\\left( y_x - \\frac{\\alpha e^{\\frac{-(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\pi}\\sigma}\\right)^2\\\\\nr_x = \\left[r_0,r_1,r_2,\\dots,r_N\\right]$$ \n3. Differentials of residuals with respect to normal distribution parameters:\n 1. Partial with respect to the amplitude\n$$ \n\\frac{\\partial r_x}{\\partial \\alpha} = \n\\frac{\\partial r_x}{\\partial \\rho_x}\\frac{\\partial \\rho_x}{\\partial \\alpha} = \n-\\frac{e^{\\frac{-(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\pi}\\sigma}\\cdot \n\\left(y_x-\\frac{\\alpha e^{\\frac{-(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\pi}\\sigma}\\right) \\\\\n\\frac{\\partial r_x}{\\partial \\alpha} = \\left[\\frac{\\partial r_0}{\\partial \\alpha},\\frac{\\partial r_1}{\\partial \\alpha},\\frac{\\partial r_2}{\\partial \\alpha},\\dots,\\frac{\\partial r_N}{\\partial \\alpha}\\right] $$\n 2. Partial with respect to the mean\n$$\n\\frac{\\partial r_x}{\\partial \\mu} = \\frac{\\partial r_x}{\\partial \\rho_x}\\frac{\\partial \\rho_x}{\\partial \\mu} = -\\frac{\\alpha(x-\\mu)e^{\\frac{-(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\pi}\\sigma^3}\\cdot \n\\left(y_x-\\frac{\\alpha e^{\\frac{-(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\pi}\\sigma}\\right) \\\\\n\\frac{\\partial r_x}{\\partial \\mu} = \\left[\\frac{\\partial r_0}{\\partial \\mu},\\frac{\\partial r_1}{\\partial \\mu},\\frac{\\partial r_2}{\\partial \\mu},\\dots,\\frac{\\partial r_N}{\\partial \\mu}\\right] $$\n 3. Partial with respect to the standard deviation\n$$\n\\frac{\\partial r_x}{\\partial \\sigma} = \\frac{\\partial r_x}{\\partial \\rho_x}\\frac{\\partial \\rho_x}{\\partial \\sigma} =\n\\left(\\frac{\\alpha e^{\\frac{-(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\pi}\\sigma^2}-\\frac{a(x-m)^2 e^{\\frac{-(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\pi}\\sigma^4}\\right) \\cdot \n\\left(y_x-\\frac{\\alpha e^{\\frac{-(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\pi}\\sigma}\\right) \\\\\n\\frac{\\partial r_x}{\\partial \\sigma} = \\left[\\frac{\\partial r_0}{\\partial \\sigma},\\frac{\\partial r_1}{\\partial \\sigma},\\frac{\\partial r_2}{\\partial \\sigma},\\dots,\\frac{\\partial r_N}{\\partial \\sigma}\\right] $$\n", "_____no_output_____" ] ], [ [ "# param[0] = AMPLITUDE\n# param[1] = MEAN\n# param[2] = STANDARD DEVIATION\ndef conv_norm_dist(x,p):\n result = [0] * len(x)\n for p_ in p:\n result += p_[0]*np.exp(-pow((x-p_[1]),2)/(2*pow(p_[2],2))) / (np.sqrt(2*np.pi)*p_[2])\n return result\ndef norm_dist(x, p):\n result = []\n for p_ in p:\n result.append(p_[0]*np.exp(-pow((x-p_[1]),2)/(2*pow(p_[2],2))) / (np.sqrt(2*np.pi)*p_[2]))\n return result\ndef d_norm_dist_d_amp(x, p):\n result=[]\n for p_ in p:\n result.append(np.exp(-pow((x-p_[1]),2)/(2*pow(p_[2],2))) / (np.sqrt(2*np.pi)*p_[2]))\n return result\ndef d_norm_dist_d_mean(x, p):\n result=[]\n for p_ in p:\n result.append(p_[0]*(x-p_[1])*np.exp(-pow(x-p_[1],2)/(2*pow(p_[2],2)))/(np.sqrt(2*np.pi)*pow(p_[2],3)))\n return result\ndef d_norm_dist_d_sd(x, p):\n result=[]\n for p_ in p:\n result.append((p_[0]*pow(x-p_[1],2)*np.exp(-pow(x-p_[1],2)/(2*pow(p_[2],2)))/(np.sqrt(2*np.pi)*pow(p_[2],4)))\n -(p_[0]*np.exp(-pow(x-p_[1],2)/(2*pow(p_[2],2))) / (np.sqrt(2*np.pi)*pow(p_[2],2))))\n return result\n# Residuals return two \ndef residuals(y, x, p):\n return 1/2 * pow(y - conv_norm_dist(x,p),2)\n# dR/dRho returns \ndef d_r_d_rho(y,x,params = [3]):\n return y - norm_dist(x, params)\ndef d_r_d_amp(y,x, params = [3]):\n return -1 * d_r_d_rho(y,x,params) * d_norm_dist_d_amp(x,params)\ndef d_r_d_mean(y,x, params = [3]):\n return -1 * d_r_d_rho(y,x,params) * d_norm_dist_d_mean(x,params)\ndef d_r_d_sd(y,x,params = [3]):\n return -1 * d_r_d_rho(y,x, params) * d_norm_dist_d_sd(x,params)\n\n# NUMERICAL DERIVATIVES\ndef num_d_r_d_amp(y,x, params = [3]):\n step = params\n results = []\n for i in range(len(step)):\n step[i][0] += 1\n results.append(residuals(y,x,[step[i]]) - residuals(y,x,[params[i]]))\n return results\ndef num_d_r_d_mean(y,x, params = [3]):\n return residuals(y,x,amp,mean+1,sd) - residuals(y,x,params)\ndef num_d_r_d_sd(y,x, params = [3]):\n return residuals(y,x,amp,mean,sd+1) - residuals(y,x,params)", "_____no_output_____" ] ], [ [ "### Class Declarations\n---\nThe following code incapsulates the above functions in an object-oriented manner to provide greater ease of use.", "_____no_output_____" ] ], [ [ "class curve:\n amp = 1.0\n mean = 1.0\n sd = 1.0\n def __init__(self, init_amp, init_mean, init_sd):\n self.amp = init_amp\n self.mean = init_mean\n self.sd = init_sd\n # Generate a normal distribution using the curves parameters\n def normal_dist(self, x):\n return self.amp*np.exp(-pow((x-self.mean),2)/(2*pow(self.sd,2))) / (np.sqrt(2*np.pi)*self.sd)\n # Mathematical differentials\n def dR_dRho (self, x, y):\n return y - self.normal_dist(x)\n def dNorm_dAmp (self, x):\n return np.exp(-pow((x-self.mean),2)/(2*pow(self.sd,2))) / (np.sqrt(2*np.pi)*self.sd)\n def dNorm_dMean (self, x):\n return self.amp*(x-self.mean)*np.exp(-pow(x-self.mean,2)/(2*pow(self.sd,2)))/(np.sqrt(2*np.pi)*pow(self.sd,3))\n def dNorm_dSD (self, x):\n return (self.amp*pow(x-self.mean,2)*np.exp(-pow(x-self.mean,2)/(2*pow(self.sd,2)))/(np.sqrt(2*np.pi)*pow(self.sd,4)))\n -(self.amp*np.exp(-pow(x-self.mean,2)/(2*pow(self.sd,2))) / (np.sqrt(2*np.pi)*pow(self.sd,2)))\n def dR_dAmp (self, x, y):\n return -1 * self.dR_dRho(x, y) * self.dNorm_dAmp(x)\n def dR_dMean (self, x, y):\n return -1 * self.dR_dRho(x, y) * self.dNorm_dMean(x)\n def dR_dSD (self, x, y):\n return -1 * self.dR_dRho(x,y) * self.dNorm_dSD(x)\n #Numerical differentials\n def ndAmp (self, step, x):\n return (self.amp+step)*np.exp(-pow((x-self.mean),2)/(2*pow(self.sd,2))) / (np.sqrt(2*np.pi)*self.sd)\n def ndMean (self, step, x):\n return self.amp*np.exp(-pow((x-(self.mean+step)),2)/(2*pow(self.sd,2))) / (np.sqrt(2*np.pi)*self.sd)\n def ndSD (self, step, x):\n return self.amp*np.exp(-pow((x-self.mean),2)/(2*pow((self.sd+step),2))) / (np.sqrt(2*np.pi)*(self.sd+step))\n def ndR_ndAmp (self, step, x, y):\n return np.power(y - self.ndAmp(step,x),2)/2 - np.power(y - self.normal_dist(x),2)/2\n def ndR_ndMean (self, step, x, y):\n return np.power(y - self.ndMean(step,x),2)/2 - np.power(y - self.normal_dist(x),2)/2\n def ndR_ndSD (self, step, x, y):\n return np.power(y - self.ndSD(step,x),2)/2 - np.power(y - self.normal_dist(x),2)/2\n \nclass curve_list:\n x = 0\n y = 0\n n_curves = 0 # Number of curves within the list\n curves = [] # List of curves\n def __init__(self):\n n_curves = 0\n self.curves = []\n def __getitem__(self, _index_) :\n return self.curves[_index_]\n def set_x_array(self, x):\n self.x = x.copy()\n def set_y_array(self, y):\n self.y = y.copy()\n def add_blank_curve(self) :\n self.curves.append(curve(1,1,1))\n self.n_curves += 1;\n def add_curve(self, _curve_) :\n self.curves.append(_curve_);\n self.n_curves += 1;\n def front(self):\n return self[0]\n def back(self):\n return self[self.n_curves-1]\n \n def get_residuals (self):\n result = y.copy()\n for curve in self.curves:\n result -= curve.normal_dist(self.x)\n result = 1/2 * np.power(result,2)\n return result\n def get_jacobian (self):\n jacobian = []\n for C in self.curves:\n y = self.y.copy()\n for C_ in self.curves:\n if (C_ != C):\n y -= C_.normal_dist(self.x)\n jacobian.append(C.dR_dAmp(self.x,y))\n jacobian.append(C.dR_dMean(self.x,y))\n jacobian.append(C.dR_dSD(self.x,y))\n return np.array(jacobian)\n def get_nJacobian (self, step):\n jacobian = []\n for C in self.curves:\n y = self.y.copy()\n for C_ in self.curves:\n if (C_ != C):\n y -= C_.normal_dist(self.x)\n jacobian.append(C.ndR_ndAmp(step,self.x,y))\n jacobian.append(C.ndR_ndMean(step,self.x,y))\n jacobian.append(C.ndR_ndSD(step,self.x,y))\n return np.array(jacobian)\n def get_sub_jacobian (self, index):\n jacobian = []\n jacobian.append(self.curves[index].dR_dAmp(self.x,self.y))\n jacobian.append(self.curves[index].dR_dMean(self.x,self.y))\n jacobian.append(self.curves[index].dR_dSD(self.x,self.y))\n return np.array(jacobian)\n \n def update_curves(self, deltas):\n if (len(deltas)%3 != 0):\n raise \"Error, attempting to update curve parameters with an inappropriate number of features\"\n c_r = self.get_residuals();\n for index in range(0,len(deltas),3):\n C = self.curves[int(index/3)]\n C.amp -= deltas[index]\n C.mean -= deltas[index+1]\n C.sd -= deltas[index+2]\n u_r = self.get_residuals();\n if (np.sum(u_r) > np.sum(c_r)):\n for index in range(0,len(deltas),3):\n C = self.curves[int(index/3)]\n C.amp += deltas[index]\n C.mean += deltas[index+1]\n C.sd += deltas[index+2]\n return False\n return True\n def update_sub_curve(self, index, deltas):\n if (len(deltas) != 3):\n raise \"Error, this is only for a single curve\"\n c_r = self.get_residuals();\n C = self.curves[index]\n C.amp -= deltas[0]\n C.mean -= deltas[1]\n C.sd -= deltas[2]\n # u_r = self.get_residuals();\n # if (np.sum(u_r) > np.sum(c_r)):\n # C = self.curves[index]\n # C.amp += deltas[0]\n # C.mean += deltas[1]\n # C.sd += deltas[2]\n # return False\n # return True\n\n def iterator (self):\n return range(self.n_curves)\n", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "x = np.linspace(0,20,300)\n# param = [[5,10,4],[2,4,2]]\n# param = [[5,10,2],[3,4,1]]\nparam = [[3,3,0.5],[2,11,1],[2,14,2],[3,6,1.5,]]\ny = conv_norm_dist(x,param);\ndy_da = d_norm_dist_d_amp(x,param);\ndy_dm = d_norm_dist_d_mean(x,param);\ndy_ds = d_norm_dist_d_sd(x,param);", "_____no_output_____" ] ], [ [ "Below, a QC test to ensure the proper functioning of the above defined partial differential equations ", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots();\n# styles = ['-','--','-.']\nax.plot(x, y, '-');\nfor d_amp in dy_da:\n ax.plot(x,d_amp,'-', label='$\\partial\\ rho / \\partial\\ amp$', color = 'green');\nfor d_mean in dy_dm:\n ax.plot(x,d_mean,'-', label='$\\partial\\ rho / \\partial\\ mean$', color = 'red');\nfor d_sd in dy_ds:\n ax.plot(x,d_sd,'-', label='$\\partial\\ rho / \\partial\\ sd$', color = 'purple');\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_title(\"Test functions\")\nax.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "### Initial Estimates and Peak Finding\n\nRoot finding methods require initial estimates in most instances. The role of the **peakFinder** class described below is to identify inflection points using the second derivative of the trace.", "_____no_output_____" ] ], [ [ "class peakFinder:\n class peak:\n index = 0\n estimate = 1.0\n def __init__(self, _index_, _estimate_):\n self.index = _index_\n self.estimate = _estimate_\n peaks = []\n inflections = []\n def find_peaks (self, x):\n index = 0\n d_x = x[1:len(x-1)]\n d_x2 = d_x[1:len(x-1)]\n d_x3 = d_x2[1:len(x-1)]\n d_y = np.diff(y)\n d_y2 = np.diff(d_y)\n d_y3 = np.diff(d_y2)\n while index != (len(d_y3)-1):\n if d_y3[index] < 0 and d_y3[index+1] > 0:\n self.peaks.append(self.peak(index,d_x3[index]))\n index+=1\n self.inflections = d_y2.copy();\n def get_peaks (self):\n return self.peaks\n def get_inflections(self):\n return self.inflections", "_____no_output_____" ], [ "PF = peakFinder()\nPF.find_peaks(x)\n\nd_x = x[1:len(x-1)]\nd_x2 = d_x[1:len(x-1)]\nd_x3 = d_x2[1:len(x-1)]\nd_y = np.diff(y)\nd_y2 = np.diff(d_y)\nd_y3 = np.diff(d_y2)\n\nmean_est = []\nmean_inx = []\nindex = 0\n\n# while index != len(d_y2):\n# if d_y2[index] < 0:\n# start = index\n# while d_y2[index] < 0:\n# index+=1\n# min_index = start;\n# for sub_i in range(start,index):\n# if d_y2[sub_i] < d_y2[min_index]:\n# min_index = sub_i;\n# mean_inx.append(min_index)\n# mean_est.append(d_x2[min_index]);\n# else:\n# index+=1\n#ALTERNATIVE METHODS\nwhile index != (len(d_y3)-1):\n if d_y3[index] < 0 and d_y3[index+1] > 0:\n mean_inx.append(index)\n mean_est.append(d_x3[index])\n index+=1\n\nfig, ax = plt.subplots();\nax.plot(x, y, '-');\n# ax.plot(d_x, d_y,'-', label = 'dy_dx');\nax2 = plt.twinx()\nax2.plot(d_x2, [0]*d_x2,'--', color = 'green')\nax2.plot(d_x2, d_y2, '-', color = 'red', label = 'inflections');\nax.plot(mean_est, y[mean_inx] , 'o', color = 'orange')\nax2.plot(mean_est, d_y3[mean_inx] , 'o', color = 'red')\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_title(\"Curve Enumeration / Initial Guess\")\nax2.legend()\nplt.show()", "_____no_output_____" ], [ "y_rn = y.copy()#+np.random.normal(0,.02,300)\nCL = curve_list()\nCL.set_x_array(x)\nCL.set_y_array(y_rn)\nfor index in range(0,len(mean_est)) : \n CL.add_blank_curve()\n C = CL.back()\n C.mean = mean_est[index]\n C.amp = y_rn[mean_inx[index]]/conv_norm_dist(x,[[1.0,mean_est[index],1.0]])[mean_inx[index]]\ny_est = [0] * len(x)\nresiduals = y_rn.copy()\nfig, ax = plt.subplots();\nax.plot(x, y, '-');\nfor C in CL:\n ax.plot(x, C.normal_dist(x),'--', label = 'init estimate');\nax.plot(x, CL.get_residuals(), '-', label = 'residuals');\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_title(\"Test Residuals\")\nax.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "### Defining the Jacobian\n\nThe Jacobian matrix is defined as follows:\n$$ J = \\left[\n\\begin{aligned}\n&\\frac{\\partial r_0}{\\partial \\alpha_1},\\frac{\\partial r_1}{\\partial \\alpha_1},\\frac{\\partial r_2}{\\partial \\alpha_1},\\dots,&\\frac{\\partial r_N}{\\partial \\alpha_1} \\\\\n&\\frac{\\partial r_0}{\\partial \\mu_1},\\frac{\\partial r_1}{\\partial \\mu_1},\\frac{\\partial r_2}{\\partial \\mu_1},\\dots, &\\frac{\\partial r_N}{\\partial \\mu_1}\\\\\n&\\frac{\\partial r_0}{\\partial \\sigma_1},\\frac{\\partial r_1}{\\partial \\sigma_1},\\frac{\\partial r_2}{\\partial \\sigma_1},\\dots, &\\frac{\\partial r_N}{\\partial \\sigma_1}\\\\\n&\\vdots &\\vdots\\\\\n&\\frac{\\partial r_0}{\\partial \\sigma_M},\\frac{\\partial r_1}{\\partial \\sigma_M},\\frac{\\partial r_2}{\\partial \\sigma_M},\\dots, &\\frac{\\partial r_N}{\\partial \\sigma_M}\n\\end{aligned}\n\\right]\n $$\n\n For $M$ curves and $M\\cdot3$ vertical entries. The total dimensions of the Jacobian are $[N x 3M]$ where $M*3 \\leq N$.\n ", "_____no_output_____" ] ], [ [ "# residuals(y,x,(param_est[0][0],param_est[0][1],param_est[0][2]))\nnp.sum(CL.get_residuals())\nfig, ax = plt.subplots(CL.n_curves);\nax[0].set_title(\"Numerical Jacobians for \"+str(len(CL.curves))+\" Curves\");\nax[CL.n_curves-1].set_xlabel(\"x\")\n# J = CL.get_jacobian()\nJ = CL.get_nJacobian(1.0)\nfor index in range(0,CL.n_curves):\n ax_ = ax[index]\n ax_.plot(x, J[index*3], '-', label = 'var = amplitude', color = 'green');\n ax_.plot(x, J[index*3+1], '-', label = 'var = mean', color = 'red');\n ax_.plot(x, J[index*3+2], '-', label = 'var = standard dev', color = 'purple')\n ax_.set_ylabel(\"$\\partial$ error / $\\partial var$\")\nax[0].legend()\nplt.show()", "_____no_output_____" ] ], [ [ "The Newton Gauss Method iteratively finds the root of the derivative of the error ($\\ ^{dr}/_{d\\rho} = 0$) by estimating the change needed in the gaussian ($\\Delta\\rho$), which in and of itself is a function of $\\Delta\\alpha$, $\\Delta\\mu$, and $\\Delta\\sigma$. The Jacobian is convolved with its transpose ($J^TJ$) to acount for the fact we have calculated partial derivatives, and the Jacobian, multiplied by the residuals (i.e. the gradient $\\nabla = J^T\\cdot r$), is divided by this matrix. ", "_____no_output_____" ] ], [ [ "import time\nr = CL.get_residuals()\nerror = [np.sum(r)];\nmu = error[0]\nstep = 1.0\n\n# Figure preparation\nfig, ax = plt.subplots();\n_disp = dsp.display(\"\", display_id=True)\nax.plot(x, y, '-k');\ncomb = [0]*len(x)\nfor C in CL:\n comb += C.normal_dist(x)\n ax.plot(x, C.normal_dist(x),'--', label = 'Curve Fit');\nax.plot(x, CL.get_residuals(), '-', label = 'Residuals');\nax.plot(x, comb, '-.k')\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_title(\"Curve Fit Solution\")\nax.legend()\nax2 = plt.twinx()\nax2.plot(np.linspace(0,x.max(),len(error)),np.log(error), '-.', color = 'red', label = 'Error')\nax2.set_ylabel(\"Log Cumulative Error\")\nax2.legend()\n_disp.update(fig)\n\nwhile True:\n _e = error[len(error)-1]\n error.append(np.sum(CL.get_residuals()));\n J = CL.get_nJacobian(step)\n # J = CL.get_jacobian()\n JtJ = np.matmul(J,J.transpose())\n JtJ_i = np.linalg.inv(JtJ+mu*np.identity(len(JtJ))) # Get psudo-Hessian inverse matrix\n g_r = np.matmul(J,r) # Get the gradient\n deltas = np.matmul(g_r,JtJ_i) # Multiply the inverse Hessian by gradient\n # deltas\n if (CL.update_curves(deltas)):\n r = CL.get_residuals()\n error.append(np.sum(r))\n # mu = error[len(error)-1]*0.1\n # mu = error[len(error)-2] - error[len(error)-1]\n mu /= 3.0\n if (step < 1):\n step *= 2\n \n total = [0]*len(x)\n for CI in range (0,len(CL.curves)):\n trace = CL[CI].normal_dist(x)\n total += trace\n ax.lines[CI+1].set_ydata(trace)\n ax.lines[len(CL.curves)+1].set_ydata(CL.get_residuals())\n ax.lines[len(CL.curves)+2].set_ydata(total)\n ax2.lines[0].set_xdata(np.linspace(0,x.max(),len(error)))\n ax2.lines[0].set_ydata(np.log(error))\n ax2.set_ylim(np.log(error[len(error)-1])*1.1,np.log(error[0])*1.1)\n _disp.update(fig)\n else:\n mu *= 2 \n step /= 2\n if (step < 1E-15):\n break", "_____no_output_____" ], [ "fig, ax = plt.subplots();\n_disp = dsp.display(\"\", display_id=True)\nax.plot(x, y, '-', color = 'black');\nfor C in CL:\n ax.plot(x, C.normal_dist(x),'--', label = 'Curve Fit');\n ax.fill_between(x, C.normal_dist(x), alpha = 0.4)\n\nax.plot(x, CL.get_residuals(), '-', label = 'residuals');\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_ylim(0,y_rn.max())\nax.set_title(\"Fit Results\")\nax.legend()\nplt.show()\n\nfor index in range(0, len(CL.curves)):\n C = CL[index]\n print(\"Curve \"+ str(index)+\" Result:\"+\n \"\\n\\tAmplitude:\\t\"+str(C.amp)+\n \"\\n\\tMean:\\t\\t\"+str(C.mean)+\n \"\\n\\tStdDev:\\t\\t\"+str(C.sd)+\"\\n\\n\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0baed35e6ded22ebf33b3fa3e4cb68b559cd482
24,196
ipynb
Jupyter Notebook
homework_5.ipynb
pavanalikana/Visit-the-Wikipedia-hyperlinks-graph-
3780ad54e01b68151e3505c97f4a93d85afcd0b6
[ "MIT" ]
null
null
null
homework_5.ipynb
pavanalikana/Visit-the-Wikipedia-hyperlinks-graph-
3780ad54e01b68151e3505c97f4a93d85afcd0b6
[ "MIT" ]
null
null
null
homework_5.ipynb
pavanalikana/Visit-the-Wikipedia-hyperlinks-graph-
3780ad54e01b68151e3505c97f4a93d85afcd0b6
[ "MIT" ]
null
null
null
31.963012
635
0.575591
[ [ [ "<div>\n <h1 style=\"margin-top: 50px; font-size: 33px; text-align: center\"> Homework 5 - Visit the Wikipedia hyperlinks graph! </h1>\n <br>\n <div style=\"font-weight:200; font-size: 20px; padding-bottom: 15px; width: 100%; text-align: center;\">\n <right>Maria Luisa Croci, Livia Lilli, Pavan Kumar Alikana</right>\n <br>\n </div>\n <hr>\n</div>", "_____no_output_____" ], [ "# RQ1", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport json\nimport pickle\nfrom tqdm import tqdm\n\nfrom collections import defaultdict\nfrom heapq import *\nimport numpy as np\nimport collections\nimport networkx as nx", "_____no_output_____" ] ], [ [ "For our first requests, we can use 2 different approaches:\n\n* We can start from the file building a dictionary that describe our graph; we do it because we will need this dictionary for the request 2;\n\n* Or, better, we can use an easy command <b>nx.info</b>, to get all the informations we need.\n\nSo let's see!\n", "_____no_output_____" ], [ "## Approach 1", "_____no_output_____" ], [ "Let's start downloading <a href=\"https://drive.google.com/file/d/1ghPJ4g6XMCUDFQ2JPqAVveLyytG8gBfL/view\">Wikicat hyperlink graph</a> . \n\nIt is a reduced version of the one we can find on SNAP. \n\nEvery row is an <b>edge</b>. The two elements of each row are the <b>nodes</b>, in particular the first is the <b> source</b>, the second represents the <b>destination</b>.\n\nSo, our first goal is open and read the file with python, and split each line with the new line charachter.\nThen we take all the <i>source nodes</i> for each row, and we put them as keys into our <b>graph</b> dictionary. The values will be all the correspondent destination nodes.\n\nBut we have not done! Infact our scope is to analyze the graph, in particular discovering the following informations:\n\n* If it is direct or not;\n\n* The number of nodes;\n\n* The number of edges;\n\n* The average node degree. Is the graph dense?\n\nTo do this we want that our dictionary has as keys <u>all the nodes</u>, sources and destinations, and for now we have just the first ones. So we add as new keys all the nodes that are just destinations, putting as values empty lists.\n\n\nNow we have the dictionary with all the nodes of our graph as keys, and as values there are all the eventual destinations!", "_____no_output_____" ] ], [ [ "F = open('wiki-topcats-reduced.txt','r') \nall_rows = F.read().split('\\n')\n\ngraph = {}\nfor row in all_rows:\n row = row.split(\"\\t\")\n if row[0] not in graph:\n try:\n graph[row[0]] = [row[1]]\n except:\n pass\n else:\n graph[row[0]].append(row[1])\n \n ", "_____no_output_____" ], [ "lista = []\nfor l in graph.values():\n lista+= l\n ", "_____no_output_____" ], [ "for node in lista:\n if node not in graph:\n graph[node] = []\n else:\n pass", "_____no_output_____" ] ], [ [ "So, what can we say?\n\n* We are in a case of <b>page ranking</b>. So for definition we have nodes representing sources and destinations, with edges with a particular direction. In other words, our graph has a set of edges which are <i>ordered pairs</i> of nodes, and for the graph theory we have a <b>directed graph</b>.\n\n\n* The number of nodes is <u>461193</u>. It's just the number of keys of our dictionary.\n\n\n* The number of edges is <u>2645247</u> and it's computed looking at all the lenghts of the <b>adjacency list</b>.\n\n\n* In graph theory, the <b>degree</b> (or <i>valency</i>) of a vertex of a graph is the number of edges incident to the vertex. We need the <b>average node degree</b>, so we compute the ratio between number of edges and number of nodes. It results <u>5.735661642739591</u>.", "_____no_output_____" ], [ "#### Number of nodes", "_____no_output_____" ] ], [ [ "V = list(graph.keys())\nn_nodes = len(V)\nn_nodes", "_____no_output_____" ] ], [ [ "#### Number of edges", "_____no_output_____" ] ], [ [ "n_edges = 0\nfor l in graph.values():\n n_edges += len(l)\nn_edges ", "_____no_output_____" ] ], [ [ "#### Avarage node degree", "_____no_output_____" ] ], [ [ "avg_degree = n_edges/n_nodes\navg_degree", "_____no_output_____" ] ], [ [ "## Approach 2", "_____no_output_____" ], [ "Although, since we need the average in and out degree because our graph is directed, we could use an easy command nx.info as follow, in order to obtain the basic informations.\n\nFirst import the graph from the reduced file of the list of edges indicating with nx.DiGraph that what we want is an oriented graph.", "_____no_output_____" ] ], [ [ "graph = nx.read_edgelist(\"wiki-topcats-reduced.txt\", delimiter=\"\\t\", create_using=nx.DiGraph())\nprint(nx.info(graph))", "_____no_output_____" ] ], [ [ "**Is the graph dense?**\n\nWith the following formula $D=\\frac{E}{N(N-1)}$ we obtain a value that could go from 0 up to 1. It measure the probability that any pairs of vertex is connected, so technically if the density is close to 1 the number of edges is close to the maximal number of edges, viceversa if the density is close to 0 we have a graph with only few edges (called sparse graph).\n\n", "_____no_output_____" ] ], [ [ "nx.density(graph)", "_____no_output_____" ] ], [ [ "As we could expect, according to the number of nodes and edges that we already know, the density is very low, so it means that our graph is sparse.", "_____no_output_____" ], [ "# RQ2", "_____no_output_____" ], [ "Let's start creating a dictionary called <b>categories</b> where for every category taken from <i>wiki-topcats-categories.txt</i> file, we have the list of all its articles. But attention! We must take into account all the categories that has a number of articles greater than <b>3500</b>. So we filter our dictionary considering the categories with more that 3500 articles. Another, we take just the articles that are the result of the intersection between the set of articles of the category and the articles of our <b>graph</b>; in other words, we don't consider those nodes that are in our graph but not in our categories!\n\n\nWe create also a dictionary called <b>inv_dic</b> that shows for every node (article), a set of all the categories associated. \n", "_____no_output_____" ] ], [ [ "C = open('wiki-topcats-categories.txt','r') ", "_____no_output_____" ], [ "categories = {}\nfor line in C.readlines():\n l = line.split(' ')\n cat = l[0].replace(\"Category:\",\"\").replace(\";\", \"\")\n art = l[1:]\n art[-1] = art[-1].replace(\"\\n\",\"\")\n if len(art) >= 3500:\n categories[cat]= set(art).intersection(set(V))\n\n", "_____no_output_____" ], [ "all_set = categories.values()\nall_nodes = []\nfor s in all_set:\n all_nodes += s\ninv_dic = {}\nfor node in all_nodes:\n for cat in categories:\n if node in categories[cat] and node not in inv_dic:\n inv_dic[node] = [cat]\n elif node in categories[cat] and node in inv_dic and cat not in inv_dic[node]:\n inv_dic[node].append(cat)\n else:\n pass\n", "_____no_output_____" ] ], [ [ "## Block Ranking ", "_____no_output_____" ], [ "Our scope now is, to take in input a category $C_0 = \\{article_1, article_2, \\dots \\}$. Then we want to rank all of the nodes according to the following criterion:\n\nObtain a <b>block-ranking</b>, where the blocks are represented by the categories.\nThe first category of the rank, $C_0$, always corresponds to the input category. The order of the remaining categories is given by: $$distance(C_0, C_1) = median(ShortestPath(C_0, C_i))$$", "_____no_output_____" ], [ "How we do that? At first we create the functions we need.\n\nOur input category is 'Year_of_birth_unknown' for convention because the one with the smaller number of nodes.\n\n* The first function we write is the <b> ShortestPath</b> which takes in input a node (of the input category) and our graph. It computes the distances, using the visit in amplitude of the graph. For this we apply the <b><i>BFS</i></b> algorithm, that consists in searching graph data structures. It starts at the <i>tree root</i> (or some arbitrary node of a graph called <i>search key</i>), and at first it explores all of the neighbor nodes at the present depth prior, moving on to the nodes at the next depth level. The gif below shows this concept.\n\nSo the SorthestPath function creates a dictionary where the keys are the nodes (including the input node) and their value are the distances from the node of the input category. \n\nThe distance from the node of the input category to itself is written as zero. The others are started as -1, and then eventually incremented.\n\n\n* Now it's the turn of <b>createDistancesDict</b> function, which take 4 elements as input: the input category, the graph, the <i>categories</i> dictionary and finally the <i>inv_dic</i>. In easy words, it applies the ShortestPath function to every node of the input cateogory creating a dictionary where each key is one of these nodes, and the value is a dictionary where for every other node of the graph there is the distance from the starting node of C0.\n\n\n* Now we create the <b>dictDistanceCi</b> dictionary, where we wanna show for each category a list of all the distances of the correspondent nodes from the nodes of the input category. Of course we don't need the distances among the nodes of the input cateogory, so we don't consider them.\n\n\n* A the end of our process, we compute for each category (taken from the precedent dictionary) the <b>median</b> of the correspondent distances. Then we add in an Ordered Dictionary called <b>rank</b> each category with its value of median. So we obtain our <b>BLOCK RANKING</b>.\n\n\n", "_____no_output_____" ], [ "<img src=\"https://upload.wikimedia.org/wikipedia/commons/5/5d/Breadth-First-Search-Algorithm.gif\">", "_____no_output_____" ] ], [ [ "input_category = input()\n", "_____no_output_____" ], [ "def ShortestPath(c0, graph):\n queue = []\n queue.append(c0)\n \n distanceDict = dict()\n for node in graph:\n distanceDict[node] = -1\n distanceDict[c0] = 0\n\n while queue:\n vertex = queue.pop(0)\n for i in graph[vertex]:\n if distanceDict[i] == -1:\n queue.append(i)\n distanceDict[i] = distanceDict[vertex] + 1\n return distanceDict\n ", "_____no_output_____" ], [ "def calculateMedian(lista):\n lung = len(lista)\n #ordinata = sorted(lista)\n ordinata = lista\n if(lung % 2 != 0):\n return ordinata[lung/2]\n else:\n return (ordinata[lung/2]) + (ordinata[lung/2 + 1]) /2 ", "_____no_output_____" ], [ "from collections import OrderedDict\nimport pickle\n\ndef createDistancesDict(c0, graph, dizionarioCatNodi, listNode):\n \n #listNode è un dizionario <articolo, [categorie]>\n \n #Prendo come categoria 0 la lista di nodi della categoria 0\n Category0 = dizionarioCatNodi[c0]\n \n #Dizionario dove per ogni chiave(articolo in C0) c'è un dizionatio (nodo, distanza) con la distanza verso tutti gli altri nodi \n dictDistances = dict()\n \n for node in tqdm(Category0):\n try:\n dictDistances[node] = ShortestPath(node, graph)\n except Exception as e: print(e)\n \n with open(\"distance_dict.p\", 'wb') as handle:\n pickle.dump(dictDistances, handle, protocol=pickle.HIGHEST_PROTOCOL)", "_____no_output_____" ], [ "createDistancesDict(input_category, graph, categories, inv_dic)", "_____no_output_____" ], [ "with open(\"distance_dict.p\", 'rb') as handle:\n dist_dict = pickle.load(handle)", "_____no_output_____" ], [ "dictDistanceCi = dict()\n#inizializzo le distanze da C0 per ogni categoria ad una lista vuota\nfor cat in categories:\n dictDistanceCi[cat] = []", "_____no_output_____" ], [ "#for every cat the distances of its nodes from nodes of C0\nfor node in dist_dict:\n for node2 in dist_dict[node]:\n for cat in inv_dic[node2]:\n if cat != inv_dic[node]:\n dictDistanceCi[cat].append(dist_dict[node][node2])\n\nwith open(\"dictDistanceCi.p\", 'ab') as handle:\n pickle.dump(dictDistanceCi, handle, protocol=pickle.HIGHEST_PROTOCOL)", "_____no_output_____" ], [ "with open(\"dictDistanceCi.p\", 'rb') as handle:\n dictDistanceCi = pickle.load(handle)", "_____no_output_____" ], [ "rank = OrderedDict()\nfor cat in tqdm(dictDistanceCi):\n distance = np.median(dictDistanceCi[cat])\n rank[cat] = distance\n\nrank['Year_of_birth_unknown'] = 0", "_____no_output_____" ], [ "block_rank = {}\nfor tupla in rank:\n block_rank[tupla[0]] = tupla[1]", "_____no_output_____" ], [ "for el in block_rank:\n if block_rank[el] == -1.0:\n block_rank[el] = 10000.0\nblock_rank = sorted(block_rank.items(), key=lambda x: x[1])", "_____no_output_____" ], [ "block_rank", "_____no_output_____" ] ], [ [ "Obtained the Ordered Dictionary <b>rank</b> we notice that there are some categories with median equal to -1. This means that these categories are not reachable from the input category and so the values of distance among their nodes and the input category ones didn't change its initial values -1 associated during the inizialization in the BFS. For this reason we give to them a big value, for example 10000, so that in the block rank they will stay at the end.", "_____no_output_____" ], [ "## Sorting nodes of category", "_____no_output_____" ], [ "Once we obtain the <i>block ranking vector</i>, we want to sort the nodes in each category. The way we should sort them is the following...", "_____no_output_____" ], [ " We have to compute the subgraph induced by $C_0$. Then, for each node, we compute the sum of the weigths of the <b>in-edges</b>. The nodes will be ordered by this score.\n The following image explains how to do it for each step.\n \n In other words, we have to consider a category, and for that category we must compute for each node the number of in-edges, but considering just those that have the source of the same category! For example, in the first image, the B node of the category \"0\" has got 2 in-edges, but only one is from a node of the same category.", "_____no_output_____" ], [ "<img src=\"https://raw.githubusercontent.com/CriMenghini/ADM-2018/master/Homework_5/imgs/algorithm.PNG\">", "_____no_output_____" ], [ "For this scope we have created a function called <b>in_edges</b> that implements our idea of sorting, given as input a category. \n\nSo we apply this function for each category saving the correspondent dictionary on a file <i>pickle</i>, naming it as <i>\"cat_i.p\"</i> where i is the index of the i-category. To control the correspondence index-category, we create a dictionary where for each category we have its index; we call it <b>indexing</b>. ", "_____no_output_____" ], [ "What does our <i>in_edge()</i> function do exactly? \n\nWell, we can see that for a node <i>n1</i> of the choosen category, it starts a contator and for every node <i>n2</i> of our graph checks two important things:\n\n* if there is an edge from <i>n2</i> to <i>n1</i>;\n\n* if <i>n2</i>, the source node, is in the same category of <i>n1</i>;\n\nIf these 2 points are respected, then it increments the contator of <i>n1</i>. \n\nAt the end, it saves in a dictionary each node n1 and its contator, or in other words, the number of its in-edges.\nBut it's not finished! We want to sort the nodes in the dictionary in base of their values, so we just do it. Now the output is ready!\n\n\nWe have reported as examples some of our dictionaries saved on pickle. In particular you can see that for \"the category 7\" (that in our block ranking correponds to the <b>Category0</b>).", "_____no_output_____" ] ], [ [ "all_cat = list(categories.keys())", "_____no_output_____" ], [ "def in_edges(cat, graph):\n n_cat = categories[cat]\n d = {}\n for n1 in tqdm(n_cat):\n count = 0\n for n2 in graph:\n if n1 in graph[n2] and n2 in n_cat:\n count += 1\n d[n1] = count\n d = sorted(d.items(), key=lambda x: x[1])\n return d\n ", "_____no_output_____" ], [ "for i in range(len(all_cat)):\n dd = in_edges(all_cat[i], graph)\n \n #pickle.dump(dd, open( \"cat\"+str(i)+\".p\", \"wb\" ) )\n with open(\"cat\"+str(i)+\".p\", 'wb') as handle:\n pickle.dump(dd, handle, protocol=pickle.HIGHEST_PROTOCOL)", "_____no_output_____" ], [ "indexing = {}\nfor i in range(len(all_cat)):\n indexing[all_cat[i]] = i\n ", "_____no_output_____" ], [ "indexing", "_____no_output_____" ] ], [ [ "Here there is the indexing dictionary, that occurs to us to find the in_edge dictionary of a particular category, starting from its index.", "_____no_output_____" ], [ "Here, as promised, we have the dictionary for the category 0 of our block ranking, or in other words, the category7 of our indexing.\n\nFor convention we print just a portion of it, in particular a part where we can see the moment where the score changes.", "_____no_output_____" ] ], [ [ "with open(\"cat\"+str(7)+\".p\", 'rb') as handle:\n dd7 = pickle.load(handle)", "_____no_output_____" ], [ "print(dd7[1600:1700])\n", "_____no_output_____" ] ], [ [ "<img src=\"http://scalar.usc.edu/works/querying-social-media-with-nodexl/media/Network_theoryarticlenetworkonWikipedia1point5deg.jpg\" height=\"200\" width=\"400\">", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0bafd03d9c7c52438815ce408d8b03f5538275b
7,247
ipynb
Jupyter Notebook
sparse/repos/rodluger/starry/binder/Basics 5 - Multi-wavelength maps.ipynb
yuvipanda/mybinder.org-analytics
7b654e3e21dea790505c626d688aa15640ea5808
[ "BSD-3-Clause" ]
1
2021-03-18T23:33:35.000Z
2021-03-18T23:33:35.000Z
sparse/repos/rodluger/starry/binder/Basics 5 - Multi-wavelength maps.ipynb
yuvipanda/mybinder.org-analytics
7b654e3e21dea790505c626d688aa15640ea5808
[ "BSD-3-Clause" ]
17
2020-01-28T22:33:27.000Z
2021-06-10T21:05:49.000Z
sparse/repos/rodluger/starry/binder/Basics 5 - Multi-wavelength maps.ipynb
yuvipanda/mybinder.org-analytics
7b654e3e21dea790505c626d688aa15640ea5808
[ "BSD-3-Clause" ]
1
2021-07-17T12:55:22.000Z
2021-07-17T12:55:22.000Z
24.400673
377
0.551401
[ [ [ "# Multi-wavelength maps\nNew in version `0.2.1` is the ability for users to instantiate wavelength-dependent maps. Nearly all of the computational overhead in `starry` comes from computing rotation matrices and integrals of the Green's basis functions, which makes it **really** fast to compute light curves at different wavelengths if we simply recycle the results of all of these operations.\n\nBy \"wavelength-dependent map\" we mean a map whose spherical harmonic coefficients are a function of wavelength. Specifically, instead of setting the coefficient at $l, m$ to a scalar value, we can set it to a vector, where each element corresponds to the coefficient in a particular wavelength bin. Let's look at some examples.", "_____no_output_____" ], [ "## Instantiating multi-wavelength maps", "_____no_output_____" ], [ "The key is to pass the `nwav` keyword when instantiating a `starry` object. For simplicity, let's do `nwav=3`, corresponding to three wavelength bins.", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "from starry import Map\nmap = Map(lmax=2, nwav=3)", "_____no_output_____" ] ], [ [ "Recall that the map coefficients are now *vectors*. Here's what the coefficient *matrix* now looks like:", "_____no_output_____" ] ], [ [ "map.y", "_____no_output_____" ] ], [ [ "Each row corresponds to a given spherical harmonic, and each column to a given wavelength bin. Let's set the $Y_{1,0}$ coefficient:", "_____no_output_____" ] ], [ [ "map[1, 0] = [0.3, 0.4, 0.5]", "_____no_output_____" ] ], [ [ "Here's our new map vector:", "_____no_output_____" ] ], [ [ "map.y", "_____no_output_____" ] ], [ [ "To visualize the map, we can call `map.show()` as usual, but now we actually get an *animation* showing us what the map looks like at each wavelength.", "_____no_output_____" ] ], [ [ "map.show()", "_____no_output_____" ] ], [ [ "(*Caveat: the* `map.animate()` *routine is disabled for multi-wavelength maps.*)", "_____no_output_____" ], [ "Let's set a few more coefficients:", "_____no_output_____" ] ], [ [ "map[1, -1] = [0, 0.1, -0.1]\nmap[2, -1] = [-0.1, -0.2, -0.1]\nmap[2, 2] = [0.3, 0.2, 0.1]", "_____no_output_____" ], [ "map.show()", "_____no_output_____" ] ], [ [ "OK, our map now has some interesting wavelength-dependent features. Let's compute some light curves! First, a simple phase curve:", "_____no_output_____" ] ], [ [ "import numpy as np\ntheta = np.linspace(0, 360, 1000)\nmap.axis = [0, 1, 0]\nphase_curve = map.flux(theta=theta)", "_____no_output_____" ] ], [ [ "Let's plot it. The blue line is the first wavelength bin, the orange line is the second bin, and the green line is the third:", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as pl\n%matplotlib inline\n\nfig, ax = pl.subplots(1, figsize=(14, 6))\nax.plot(theta, phase_curve);\nax.set_xlabel(r'$\\theta$ (degrees)', fontsize=16)\nax.set_ylabel('Flux', fontsize=16);", "_____no_output_____" ] ], [ [ "We can also compute an occultation light curve:", "_____no_output_____" ] ], [ [ "xo = np.linspace(-1.5, 1.5, 1000)\nlight_curve = map.flux(xo=xo, yo=0.2, ro=0.1)", "_____no_output_____" ] ], [ [ "Let's plot it. This time we normalize the light curve by the baseline for better plotting, since the map has a different total flux at each wavelength:", "_____no_output_____" ] ], [ [ "fig, ax = pl.subplots(1, figsize=(14, 6))\nax.plot(theta, light_curve / light_curve[0]);\nax.set_xlabel('Occultor position', fontsize=16)\nax.set_ylabel('Flux', fontsize=16);", "_____no_output_____" ] ], [ [ "As we mentioned above, there's not that much overhead to computing light curves in many different wavelength bins. Check it out:", "_____no_output_____" ] ], [ [ "import time\nnp.random.seed(1234)\ndef runtime(nwav, N=10):\n total_time = 0\n xo = np.linspace(-1.5, 1.5, 1000)\n for n in range(N):\n map = Map(lmax=2, nwav=nwav)\n map[:, :] = np.random.randn(9, nwav)\n tstart = time.time()\n map.flux(xo=xo, yo=0.2, ro=0.1)\n total_time += time.time() - tstart\n return total_time / N", "_____no_output_____" ], [ "nwav = np.arange(1, 50)\nt = [runtime(n) for n in nwav]", "_____no_output_____" ], [ "fig, ax = pl.subplots(1, figsize=(14, 7))\nax.plot(nwav, t, '.')\nax.plot(nwav, t, '-', color='C0', lw=1, alpha=0.3)\nax.set_xlabel('nwav', fontsize=16)\nax.set_ylabel('time (s)', fontsize=16);\nax.set_ylim(0, 0.003);", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0bb00cb00aa5470f32fd50ea254a389f3dc6598
65,357
ipynb
Jupyter Notebook
Models/random_forest.ipynb
fereshtehaghaei/Machine-Learning-Exoplanets
2730ed27d1b85f8951523e5b53f3d194765819c4
[ "BSD-4-Clause-UC" ]
null
null
null
Models/random_forest.ipynb
fereshtehaghaei/Machine-Learning-Exoplanets
2730ed27d1b85f8951523e5b53f3d194765819c4
[ "BSD-4-Clause-UC" ]
null
null
null
Models/random_forest.ipynb
fereshtehaghaei/Machine-Learning-Exoplanets
2730ed27d1b85f8951523e5b53f3d194765819c4
[ "BSD-4-Clause-UC" ]
null
null
null
39.51451
1,370
0.446808
[ [ [ "## Machine Learning- Exoplanet Exploration", "_____no_output_____" ], [ "#### Extensive Data Dictionary: https://exoplanetarchive.ipac.caltech.edu/docs/API_kepcandidate_columns.html\n\nHighlightable columns of note are:\n\n* kepoi_name: A KOI is a target identified by the Kepler Project that displays at least one transit-like sequence within Kepler time-series photometry that appears to be of astrophysical origin and initially consistent with a planetary transit hypothesis\n\n* kepler_name: [These names] are intended to clearly indicate a class of objects that have been confirmed or validated as planets—a step up from the planet candidate designation.\n\n* koi_disposition: The disposition in the literature towards this exoplanet candidate. One of CANDIDATE, FALSE POSITIVE, NOT DISPOSITIONED or CONFIRMED.\n\n* koi_pdisposition: The disposition Kepler data analysis has towards this exoplanet candidate. One of FALSE POSITIVE, NOT DISPOSITIONED, and CANDIDATE.\n\n* koi_score: A value between 0 and 1 that indicates the confidence in the KOI disposition. For CANDIDATEs, a higher value indicates more confidence in its disposition, while for FALSE POSITIVEs, a higher value indicates less confidence in that disposition.\n", "_____no_output_____" ] ], [ [ "# # Update sklearn to prevent version mismatches\n# !pip install sklearn --upgrade\n# # install joblib\n# !pip install joblib", "_____no_output_____" ] ], [ [ "### Import Dependencies ", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n# Hide warning messages in notebook\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "# Read the CSV and Perform Basic Data Cleaning", "_____no_output_____" ] ], [ [ "# Read/Load CSV file\ndf = pd.read_csv(\"exoplanet_data.csv\")\n\n# Drop the null columns where all values are null\ndf = df.dropna(axis='columns', how='all')\n\n# Drop the null rows\ndf = df.dropna()\ndf.head()", "_____no_output_____" ] ], [ [ "## Basic Statistic Details", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "# Select Features (columns)\n* Feature Selection: Removing irrelevant feature results in better performing model that is easeir to understands & model runs faster\n", "_____no_output_____" ] ], [ [ "target_names = df[\"koi_disposition\"].unique()\n#target_names\nprint(df[\"koi_disposition\"].unique())", "['CONFIRMED' 'FALSE POSITIVE' 'CANDIDATE']\n" ], [ "# Assign X (Independant data) and y (Dependant target)\n\n# Set X equal to the entire data set, except for the first column\nX = df.iloc[:, 1:]\n# X.head()\n\n# Set y equal to the first column\ny = df.iloc[:,0].values.reshape(-1, 1)\n# y.head()", "_____no_output_____" ], [ "from sklearn.ensemble import ExtraTreesClassifier\n\n# Search for top 10 features according to feature importances\nmodel = ExtraTreesClassifier()\nmodel.fit(X,y)\nmodel.feature_importances_\n\n# sorted(zip(model.feature_importances_, X), reverse=True)", "_____no_output_____" ], [ "# Store the top (20) features as a series, using the column headers as the index\ntop_feat = pd.Series(model.feature_importances_, index=X.columns).nlargest(10)\ntop_feat", "_____no_output_____" ], [ "# Set features based on feature importances\nX = df[top_feat.index]\n\n# Use `koi_disposition` for the y values\ny = df['koi_disposition']\n\n# y = df['koi_disposition'].values.reshape(-1, 1)", "_____no_output_____" ] ], [ [ "# Create a Train Test Split", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\n# Split the data into smaller buckets for training and testing\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\nX_train.head()", "_____no_output_____" ], [ "# X and y Train shape have 5243 rows (80% of data)\nX_train.shape, y_train.shape", "_____no_output_____" ], [ "# X and y Test shape have 1748 rows (20% of data)\nX_test.shape, y_test.shape", "_____no_output_____" ] ], [ [ "# Pre-processing\n\nScale the data using the MinMaxScaler\n\nMinMaxScaler: \n * A way to normalize the input features/variables\n * Features will be transformed into the range\n * Scales the range of fetures from 0 to 1\n", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import MinMaxScaler\n\n# Create a MinMaxScaler model and fit it to the training data\nX_scaler = MinMaxScaler().fit(X_train)\n\n# Transform the training and testing data using the X_scaler\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)\n\n#print(np.matrix(X_test_scaled))", "_____no_output_____" ] ], [ [ "# Train the Model \n* Used Random Forest Model\n\n", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestClassifier\n\n# Create a Randome Forest Model\nmodel = RandomForestClassifier(n_estimators=200)\n\n# Train (Fit) the model to the data\nmodel.fit(X_train_scaled, y_train)\n\n# Score/Validate the model using the test data\nprint(f\"Training Data Score: {'%.3f' % model.score (X_train_scaled, y_train)}\")\nprint(f\"Testing Data Score: {'%.3f' % model.score(X_test_scaled, y_test) }\")\n\n# Printed the r2 score for the test data, testing is lower than training which is good we are not over feeding", "Training Data Score: 1.000\nTesting Data Score: 0.891\n" ] ], [ [ "## Model Accuracy", "_____no_output_____" ] ], [ [ "# Predicting the Test set results\ny_predic = model.predict(X_test)\n\n# Making the confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_predic)\n\nfrom sklearn.metrics import accuracy_score\naccuracy = accuracy_score(y_test, y_predic)\n\nprint('Test Model Accurracy: %.3f' % (accuracy))", "_____no_output_____" ] ], [ [ "## Prediction", "_____no_output_____" ] ], [ [ "predictions = model.predict(X_test_scaled)\n\n# print(f\"first 10 Predictions{predictions[:10].tolist()}\")\n# print(f\"first 10 Actual{y_test[:10].tolist()}\")\n\n# Printing into a Dataframe (y_test can't be reshap on top)\ndf_pred = pd.DataFrame({\"Actual\":y_test, \"Predicted\":predictions}) \ndf_pred.head()", "_____no_output_____" ] ], [ [ "# Hyperparameter Tuning\n\nUse `GridSearchCV` to tune the model's parameters", "_____no_output_____" ] ], [ [ "# Check Random Forest Model parameters that can be used for Tuning\nmodel = RandomForestClassifier()\nmodel", "_____no_output_____" ], [ "from sklearn.model_selection import GridSearchCV\n\nparam_grid = {'max_depth': [1, 5, 15, 25, 35],\n 'n_estimators': [100, 300, 500, 700, 1000]}\n\ngrid = GridSearchCV(model, param_grid, verbose=3)\n", "_____no_output_____" ], [ "# Train the model with GridSearch\n\ngrid.fit(X_train_scaled, y_train)", "Fitting 3 folds for each of 25 candidates, totalling 75 fits\n[CV] max_depth=1, n_estimators=100 ...................................\n" ], [ "# List the best parameters for this dataset\nprint('Best Parameter: ',grid.best_params_)\n\n# List the best score\nprint('Best Score: %.3f' % grid.best_score_)", "Best Parameter: {'max_depth': 15, 'n_estimators': 700}\nBest Score: 0.8800305168796491\n" ], [ "# Score the model\nprint('Model Score: %.3f' % grid.score(X_test_scaled, y_test))", "Model Score: 0.8935926773455377\n" ], [ "# Make predictions with the hypertuned model\npredictions = grid.predict(X_test_scaled)\ndf_grid = pd.DataFrame({\"Actual\":y_test, \"Predicted\":predictions}) \ndf_grid.head()", "_____no_output_____" ], [ "# Calculate classification report\n# print(np.array(y_test))\n\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test, predictions,\n target_names=target_names))", " precision recall f1-score support\n\n CONFIRMED 0.82 0.72 0.77 404\nFALSE POSITIVE 0.76 0.83 0.80 435\n CANDIDATE 0.99 1.00 0.99 909\n\n accuracy 0.89 1748\n macro avg 0.86 0.85 0.85 1748\n weighted avg 0.89 0.89 0.89 1748\n\n" ] ], [ [ "# Save the Model\n* Using joblib", "_____no_output_____" ] ], [ [ "import joblib\nfilename = 'RandomForestClassifier.sav'\njoblib.dump(RandomForestClassifier, filename)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0bb0b5d2deacdfaf9ce1f313ce2d74138777bae
8,229
ipynb
Jupyter Notebook
Bank_uppg/DataSource.ipynb
theokinell02/Bank_uppg
cfa85e3ce23e721b6f443d1b7a4813803534d78a
[ "MIT" ]
null
null
null
Bank_uppg/DataSource.ipynb
theokinell02/Bank_uppg
cfa85e3ce23e721b6f443d1b7a4813803534d78a
[ "MIT" ]
null
null
null
Bank_uppg/DataSource.ipynb
theokinell02/Bank_uppg
cfa85e3ce23e721b6f443d1b7a4813803534d78a
[ "MIT" ]
null
null
null
40.940299
138
0.447442
[ [ [ "path = \"D:\\\\School\\\\Bank_uppg_mockdata.txt\"\n\nclass DataSource:\n def datasource_conn():\n text_file = open(path)\n if(text_file.readable):\n text_file.close()\n return [True, \"Connection successful\"]\n text_file.close()\n return[False, \"Connection unsuccessful\"]\n \n def get_all(self):\n text_file = open(path)\n customer_list = text_file.readlines()\n text_file.close()\n return customer_list\n \n def update_by_id(self, id, target, input):\n flag = True\n customer_list = self.get_all()\n i = 0\n out_customer = \"\"\n for customer in customer_list:\n if(customer[0:customer.index(\":\")] == str(id)):\n start_index = customer.index(\":\")\n if(target == \"name\"):\n customer = customer[0:start_index + 1] + input + customer[customer.index(\":\", start_index + 1):] + \"\\n\"\n flag = False\n out_customer = customer\n elif(target == \"pnr\"):\n start_index = customer.index(\":\", start_index + 1)\n customer = customer[0:start_index + 1] + input + customer[customer.index(\":\", start_index + 1):] + \"\\n\"\n flag = False\n out_customer = customer\n elif(target == 0):\n for x in range(4):\n start_index = customer.index(\":\", start_index + 1)\n customer = customer[0:start_index + 1] + input + \"\\n\"\n flag = False\n out_customer = customer\n else:\n for x in range(0,target):\n start_index = customer.index(\"#\", start_index + 1)\n start_index = customer.index(\":\", start_index + 1)\n start_index = customer.index(\":\", start_index + 1)\n customer = customer[0:start_index + 1] + input + customer[customer.index(\":\", start_index + 1):] + \"\\n\"\n flag = False\n out_customer = customer\n customer_list[i] = customer\n i += 1\n if(flag):\n return -1\n text_file = open(path, \"w\")\n for customer in customer_list:\n text_file.write(customer)\n text_file.close()\n return out_customer\n \n def find_by_id(self, id):\n customer_list = self.get_all()\n for customer in customer_list:\n if(customer[0:customer.index(\":\")] == str(id)):\n return customer\n return -1\n \n def find_by_pnr(self, pnr):\n customer_list = self.get_all()\n start_index = 0\n for customer in customer_list:\n start_index = customer.index(\":\", start_index + 1)\n start_index = customer.index(\":\", start_index + 1)\n print(customer[start_index + 1:customer.index(\":\", start_index + 1) - 1])\n if(customer[start_index:customer.index(\":\")] == str(pnr)):\n return customer\n start_index = 0\n \n def remove_by_id(self, id):\n text_file = open(path, \"r\")\n customer_list = text_file.readlines()\n text_file.close()\n i = 0\n for customer in customer_list:\n if(customer[0:customer.index(\":\")] == str(id)):\n del customer_list[i]\n i += 1\n text_file = open(path, \"w\")\n for customer in customer_list:\n text_file.write(customer)\n text_file.close()\n \n def add_customer(self, id, name, pnr, acc_id):\n customer = str(id) + \":\" + name + \":\" + str(pnr) + \":\" + str(acc_id) + \":debit account:0.0\\n\"\n customer_list = self.get_all()\n customer_list.append(customer)\n text_file = open(path, \"w\")\n for customer in customer_list:\n text_file.write(customer)\n text_file.close()\n \n def add_account(self, id, acc_id):\n customer = self.find_by_id(id)\n temp_customer = customer\n customer = customer[:len(customer) - 1] + \"#\" + str(acc_id) + \":debit account:0.0\\n\"\n customer_list = self.get_all()\n i = 0\n for x in customer_list:\n if(x == temp_customer):\n customer_list[i] = customer\n break\n i += 1\n text_file = open(path, \"w\")\n for customer in customer_list:\n text_file.write(customer)\n text_file.close()\n \n def remove_account(self, id, acc_id):\n customer = self.find_by_id(id)\n start_index = 0\n for x in range(3):\n start_index = customer.index(\":\", start_index + 1)\n if(customer.find(str(acc_id), start_index, customer.index(\":\", start_index + 1)) != -1):\n if(\"#\" in customer):\n customer = customer[:start_index] + \":\" + customer[customer.index(\"#\") + 1:]\n else:\n self.remove_by_id(id)\n elif(\"#\" in customer[customer.index(\"#\" + str(acc_id), customer.index(\"#\")) + len(str(acc_id)) + 1:]):\n start_index = customer.index(\"#\" + str(acc_id), customer.index(\"#\"))\n customer = customer[:start_index] + customer[customer.index(\"#\", start_index + 1):]\n elif(customer[customer.rindex(\"#\") + 1: customer.index(\":\", customer.rindex(\"#\"))] == str(acc_id)):\n customer = customer[:customer.rindex(\"#\")] + \"\\n\"\n else:\n return -1\n i = 0\n customer_list = self.get_all()\n for temp_customer in customer_list:\n if(temp_customer[0:temp_customer.index(\":\")] == str(id)):\n break\n i += 1\n customer_list[i] = customer\n text_file = open(path, \"w\")\n for customer in customer_list:\n text_file.write(customer)\n text_file.close()\nc = DataSource()\nprint(c.find_by_pnr(20020218))", "2002100\n1991111\n1986010\n1991121\n2002021\nNone\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
d0bb1a535441314278c95ead2d5434617ce5971b
3,373
ipynb
Jupyter Notebook
examples/example.ipynb
Christovis/wys-ars
bb15f2d392842f9b32de12b5db5c86079bc97105
[ "MIT" ]
3
2021-07-27T14:45:58.000Z
2022-01-31T21:09:46.000Z
examples/example.ipynb
Christovis/wys-ars
bb15f2d392842f9b32de12b5db5c86079bc97105
[ "MIT" ]
1
2021-11-03T10:47:45.000Z
2021-11-03T10:47:45.000Z
examples/example.ipynb
Christovis/wys-ars
bb15f2d392842f9b32de12b5db5c86079bc97105
[ "MIT" ]
1
2021-11-03T10:17:34.000Z
2021-11-03T10:17:34.000Z
27.422764
99
0.475541
[ [ [ "from pathlib import Path\n\nfrom wys_ars.particles.ecosmog import Ecosmog\nfrom wys_ars.simcoll import SimulationCollection as SimColl\nfrom wys_ars.power_spectra.power_spectrum import PowerSpectrum", "_____no_output_____" ], [ "dir_src = Path(__file__).parent.absolute()\nsim_config_times = dir_src / \"configs/particle_snapshot_info.h5\"\nsim_config_places = dir_src / f\"configs/dtfe/cvg_{cosmo}_simulation_collection.yaml\"", "_____no_output_____" ], [ "domain_level = 512\namr_levels = (9, 9)", "_____no_output_____" ], [ "# Initialise collection of simulations\nsimcoll = SimColl.from_file(sim_config_places, sim_config_times)\n\nsim_nrs = {\n \"sim1\": [5],\n \"sim2\": [5],\n \"sim3\": [5],\n \"sim4\": [5],\n \"sim5\": [7],\n}\n\n# Loop over each simulation in the collection\nfor sim_name, sim in simcoll.sim.items():\n # Flatten AMR mesh into domain layer mesh using DTFE\n sim.dtfe(\n snap_nrs=sim_nrs[sim_name],\n file_root=\"snap_\",\n quantities=[\"density_a\", \"velocity_a\", \"divergence_a\"],\n file_dsc={\n \"root\": \"snap_%03d.\" % sim_nrs[sim_name][0], \n \"extension\": None,\n },\n )\n \n # Read Ramses cpu output binary files for a snapshot and convert into pandas DataFrame\n if \"grav\" in sim.file_dsc[\"root\"]:\n fields = [\n \"x\",\n \"y\",\n \"z\",\n \"phi\",\n \"f1\",\n \"f2\",\n \"f3\",\n ]\n sim.compress_snapshot(\n amr_levels,\n domain_level,\n fields,\n sim.file_dsc[\"root\"],\n None,\n )\n \n # Get power spectrum of all simulations\n pk = PowerSpectrum(\"particles\", sim)\n file_dsc = {\"root\": \"density_dtfe\", \"extension\": \"npy\"}\n #file_dsc = {\"root\": \"div_velocity_dtfe\", \"extension\": \"npy\"}\n pk.compute(\n \"dtfe_matter\",\n #\"div_velocity\",\n None,\n file_dsc,\n None,\n save=True,\n )", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d0bb230ba3976aa3120621e90b2e342f96a112c5
11,166
ipynb
Jupyter Notebook
Docs/Notebooks/FingerprintGenerators.ipynb
hryknkgw/rdkit
f01f819a1f800aeef3fa4999e247e9d665d99761
[ "BSD-3-Clause" ]
3
2020-03-30T04:00:52.000Z
2021-05-31T01:32:13.000Z
Docs/Notebooks/FingerprintGenerators.ipynb
hryknkgw/rdkit
f01f819a1f800aeef3fa4999e247e9d665d99761
[ "BSD-3-Clause" ]
1
2020-05-23T17:31:04.000Z
2020-05-26T06:52:47.000Z
Docs/Notebooks/FingerprintGenerators.ipynb
hryknkgw/rdkit
f01f819a1f800aeef3fa4999e247e9d665d99761
[ "BSD-3-Clause" ]
4
2020-03-30T04:00:53.000Z
2021-02-25T23:11:52.000Z
32.841176
325
0.629232
[ [ [ "# Fingerprint Generators", "_____no_output_____" ], [ "## Creating and using a fingerprint generator\n\nFingerprint generators can be created by using the functions that return the type of generator desired.", "_____no_output_____" ] ], [ [ "from rdkit import Chem\nfrom rdkit.Chem import rdFingerprintGenerator\n\nmol = Chem.MolFromSmiles('CC(O)C(O)(O)C')\ngenerator = rdFingerprintGenerator.GetAtomPairGenerator()\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(non_zero)", "{541731: 1, 574497: 1, 574498: 1, 590881: 1, 590882: 1, 590945: 1, 1590306: 3, 1590307: 3, 1590369: 1, 1590370: 2, 1590401: 2, 1590402: 1, 1592354: 1, 1592355: 2}\n" ] ], [ [ "We can set the parameters for the fingerprint while creating the generator for it.", "_____no_output_____" ] ], [ [ "generator = rdFingerprintGenerator.GetAtomPairGenerator(minDistance = 1, maxDistance = 2, includeChirality = False)\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(non_zero)", "{574497: 1, 574498: 1, 590881: 1, 590882: 1, 590945: 1, 1590306: 3, 1590369: 1, 1590370: 2, 1590401: 2, 1590402: 1, 1592354: 1}\n" ] ], [ [ "We can provide the molecule dependent arguments while creating the fingerprint.", "_____no_output_____" ] ], [ [ "fingerprint = generator.GetSparseCountFingerprint(mol, fromAtoms = [1])\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(non_zero)\n\nfingerprint = generator.GetSparseCountFingerprint(mol, ignoreAtoms = [1, 5])\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(non_zero)", "{574497: 1, 574498: 1, 590945: 1, 1590369: 1, 1590370: 2}\n{590881: 1, 590882: 1, 1590306: 2, 1590401: 1, 1590402: 1}\n" ] ], [ [ "## Types of fingerprint generators\n\nCurrently 4 fingerprint types are supported by fingerprint generators", "_____no_output_____" ] ], [ [ "generator = rdFingerprintGenerator.GetAtomPairGenerator()\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(\"Atom pair\", non_zero)\n\ngenerator = rdFingerprintGenerator.GetMorganGenerator(radius = 3)\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(\"Morgan\", non_zero)\n\ngenerator = rdFingerprintGenerator.GetRDKitFPGenerator()\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(\"RDKitFingerprint\", non_zero)\n\ngenerator = rdFingerprintGenerator.GetTopologicalTorsionGenerator()\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(\"TopologicalTorsion\", non_zero)", "Atom pair {541731: 1, 574497: 1, 574498: 1, 590881: 1, 590882: 1, 590945: 1, 1590306: 3, 1590307: 3, 1590369: 1, 1590370: 2, 1590401: 2, 1590402: 1, 1592354: 1, 1592355: 2}\nMorgan {864662311: 3, 1542631284: 2, 1542633699: 1, 1741045729: 1, 2245273601: 1, 2245277810: 1, 2246728737: 2, 2782665878: 1, 2927183216: 1, 3537119515: 1, 3537123720: 1}\nRDKitFingerprint {398441839: 4, 561308092: 2, 623990427: 1, 1524090560: 6, 1606685044: 2, 1636471275: 3, 1753257252: 1, 1940446997: 2, 2332326087: 1, 2880661462: 1, 2911990635: 1, 3060973103: 1, 3083228099: 1, 3473416248: 3, 3743603664: 1, 3768818763: 1, 3977409745: 3, 4274652475: 3, 4275705116: 3, 4279989780: 2}\nTopologicalTorsion {4303897120: 1, 12893570080: 1, 12893831712: 2, 12893831776: 2}\n" ] ], [ [ "## Invariant generators\n\nIt is possible to use a custom invariant generators while creating fingerprints. Invariant generators provide values to be used as invariants for each atom or bond in the molecule and these values affect the generated fingerprint.", "_____no_output_____" ] ], [ [ "simpleMol = Chem.MolFromSmiles('CCC')\n\ngenerator = rdFingerprintGenerator.GetRDKitFPGenerator()\nfingerprint = generator.GetSparseCountFingerprint(simpleMol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(\"RDKitFingerprint\", non_zero)\n\natomInvariantsGen = rdFingerprintGenerator.GetAtomPairAtomInvGen()\n\ngenerator = rdFingerprintGenerator.GetRDKitFPGenerator(atomInvariantsGenerator = atomInvariantsGen)\nfingerprint = generator.GetSparseCountFingerprint(simpleMol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(\"RDKitFingerprint\", non_zero)", "RDKitFingerprint {1940446997: 1, 4275705116: 2}\nRDKitFingerprint {578931652: 1, 2298572045: 2}\n" ] ], [ [ "Currently avaliable invariants generators are:", "_____no_output_____" ] ], [ [ "atomInvariantsGen = rdFingerprintGenerator.GetAtomPairAtomInvGen()\n\ngenerator = rdFingerprintGenerator.GetMorganGenerator(radius = 3, atomInvariantsGenerator = atomInvariantsGen)\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(\"Morgan with AtomPairAtomInvGen\", non_zero)\n\natomInvariantsGen = rdFingerprintGenerator.GetMorganAtomInvGen()\n\ngenerator = rdFingerprintGenerator.GetMorganGenerator(radius = 3, atomInvariantsGenerator = atomInvariantsGen)\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\n# Default for Morgan FP\nprint(\"Morgan with MorganAtomInvGen\", non_zero)\n\natomInvariantsGen = rdFingerprintGenerator.GetMorganFeatureAtomInvGen()\n\ngenerator = rdFingerprintGenerator.GetMorganGenerator(radius = 3, atomInvariantsGenerator = atomInvariantsGen)\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(\"Morgan with MorganFeatureAtomInvGen\", non_zero)\n\natomInvariantsGen = rdFingerprintGenerator.GetRDKitAtomInvGen()\n\ngenerator = rdFingerprintGenerator.GetMorganGenerator(radius = 3, atomInvariantsGenerator = atomInvariantsGen)\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(\"Morgan with RDKitAtomInvGen\", non_zero)\n\nbondInvariantsGen = rdFingerprintGenerator.GetMorganBondInvGen()\n\ngenerator = rdFingerprintGenerator.GetMorganGenerator(radius = 3, bondInvariantsGenerator = bondInvariantsGen)\nfingerprint = generator.GetSparseCountFingerprint(mol)\nnon_zero = fingerprint.GetNonzeroElements()\n\n# Default for Morgan FP\nprint(\"Morgan with MorganBondInvGen\", non_zero)", "Morgan with AtomPairAtomInvGen {33: 2, 35: 1, 36: 1, 97: 3, 523835848: 1, 618975071: 1, 2343097318: 1, 3205489706: 2, 3205489717: 1, 3205494725: 1, 3205494778: 1}\nMorgan with MorganAtomInvGen {864662311: 3, 1542631284: 2, 1542633699: 1, 1741045729: 1, 2245273601: 1, 2245277810: 1, 2246728737: 2, 2782665878: 1, 2927183216: 1, 3537119515: 1, 3537123720: 1}\nMorgan with MorganFeatureAtomInvGen {0: 4, 3: 3, 614176407: 1, 792807483: 1, 3205495869: 2, 3205496825: 3, 3208860345: 1}\nMorgan with RDKitAtomInvGen {12: 4, 16: 3, 165450225: 1, 608338133: 1, 2705297134: 1, 3205492925: 3, 3205493174: 2}\nMorgan with MorganBondInvGen {864662311: 3, 1542631284: 2, 1542633699: 1, 1741045729: 1, 2245273601: 1, 2245277810: 1, 2246728737: 2, 2782665878: 1, 2927183216: 1, 3537119515: 1, 3537123720: 1}\n" ] ], [ [ "## Custom Invariants\n\nIt is also possible to provide custom invariants instead of using a invariants generator", "_____no_output_____" ] ], [ [ "\ngenerator = rdFingerprintGenerator.GetAtomPairGenerator()\nfingerprint = generator.GetSparseCountFingerprint(simpleMol)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(non_zero)\n\ncustomAtomInvariants = [1, 1, 1]\nfingerprint = generator.GetSparseCountFingerprint(simpleMol, customAtomInvariants = customAtomInvariants)\nnon_zero = fingerprint.GetNonzeroElements()\n\nprint(non_zero)\n", "{541730: 1, 558113: 2}\n{16417: 2, 16418: 1}\n" ] ], [ [ "## Convenience functions", "_____no_output_____" ], [ "## Bulk fingerprint", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0bb29fcf1ee3f728901b9331d54bf4572ef136b
7,107
ipynb
Jupyter Notebook
complete-foundations-bootcamp-output-main/content/section-3-python-crash-course/exercises.ipynb
redditech/cadCad-training
a1ab040e9baf1863a75b2c85cb3ea567049b6c2a
[ "MIT" ]
null
null
null
complete-foundations-bootcamp-output-main/content/section-3-python-crash-course/exercises.ipynb
redditech/cadCad-training
a1ab040e9baf1863a75b2c85cb3ea567049b6c2a
[ "MIT" ]
null
null
null
complete-foundations-bootcamp-output-main/content/section-3-python-crash-course/exercises.ipynb
redditech/cadCad-training
a1ab040e9baf1863a75b2c85cb3ea567049b6c2a
[ "MIT" ]
null
null
null
18.556136
187
0.476572
[ [ [ "<span style=\"display:block;text-align:center;margin-right:105px\"><img src=\"../../media/logos/logo-vertical.png\" width=\"200\"/></span>", "_____no_output_____" ], [ "# Section 3: Python Crash Course Exercises\n\n---", "_____no_output_____" ], [ "Not comfortable with the exercises? Try the Udemy [Complete Python Bootcamp](https://www.udemy.com/complete-python-bootcamp/?couponCode=PY20) first.", "_____no_output_____" ], [ "## Variables", "_____no_output_____" ], [ "**Assign the number value `5` to the variable `a`, and print it.**", "_____no_output_____" ], [ "**Assign appropriate number values to variables `a` & `b`, and use those variables to produce a sum of value `3`.**", "_____no_output_____" ], [ "**Assign the number value `1` to the variable `a`, and use additive assignment to increment its value by `5`.**", "_____no_output_____" ], [ "## Data Types", "_____no_output_____" ], [ "**What is `3` to the power of `5`?**", "_____no_output_____" ], [ "**Create an unordered, unique collection with the following values `1`, `2`, `3`, `'a'`, and `'b'`, using the most appropriate collection type.**", "_____no_output_____" ], [ "**Select the element `'a'` from the list `[1, 2, 3, [1, 2, 3, [1, 'a', 3]]]`.**", "_____no_output_____" ], [ "## Functions", "_____no_output_____" ], [ "**Define a function that returns the string `Hello world!`.**", "_____no_output_____" ], [ "**Define a function that takes two string arguments and returns them concatenated.**", "_____no_output_____" ], [ "**Define a function that returns an anonymous function that takes two arguments and adds them.**", "_____no_output_____" ], [ "## Control Structures", "_____no_output_____" ], [ "**Given any valid Python integer, write a control structure that prints a unique message when the integer is a negative number, positive number, or the value zero.**", "_____no_output_____" ], [ "**Given the nested list `[[1, 2, 3], ['one', 'two', 'three'], ['Hello', 'World', '!']]`, how would you use a loop to print the value of every element within the list and sublists?**", "_____no_output_____" ], [ "**Write a loop that will sum every 3rd element of the list `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]` and print the result.**", "_____no_output_____" ], [ "## Well done!", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0bb2bdb84c9bbd42c2cf31daec1656073d4aed4
17,163
ipynb
Jupyter Notebook
.ipynb_checkpoints/R&D_Profit-checkpoint.ipynb
nwmsno1/tensorflow_base
158592f87285a6d2516ae4d72e6015054db5991c
[ "MIT" ]
null
null
null
.ipynb_checkpoints/R&D_Profit-checkpoint.ipynb
nwmsno1/tensorflow_base
158592f87285a6d2516ae4d72e6015054db5991c
[ "MIT" ]
null
null
null
.ipynb_checkpoints/R&D_Profit-checkpoint.ipynb
nwmsno1/tensorflow_base
158592f87285a6d2516ae4d72e6015054db5991c
[ "MIT" ]
null
null
null
33.197292
260
0.455282
[ [ [ "### 1. 数据读入", "_____no_output_____" ] ], [ [ "import pandas as pd\ndf = pd.read_csv('50_Startups.csv')\ndf.head()", "_____no_output_____" ] ], [ [ "### 2. 数据归一化", "_____no_output_____" ] ], [ [ "def normalize_feature(df):\n return df.apply(lambda column: (column - column.mean())/column.std())\n\ndf = normalize_feature(df[['R&D Spend', 'Marketing Spend', 'Profit']])\ndf.head()", "_____no_output_____" ], [ "# 数据分析(3D)\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nfig = plt.figure()\nax = plt.axes(projection='3d')\nax.set_xlabel('R&D Spend')\nax.set_ylabel('Marketing Spend')\nax.set_zlabel('Profit')\nax.scatter3D(df['R&D Spend'], df['Marketing Spend'], df['Profit'], c=df['Profit'], cmap='Reds')", "_____no_output_____" ] ], [ [ "### 3. 数据处理", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# 为了方便矩阵相乘,添加一列全为1的x0\nones = pd.DataFrame({'ones': np.ones(len(df))}) # ones是n行1列的数据框,表示x0恒为1\ndf = pd.concat([ones, df], axis=1) # 根据列合并数据\n\nX_data = np.array(df[df.columns[0:3]])\nY_data = np.array(df[df.columns[-1]]).reshape(len(df), 1)\n\nprint(X_data.shape, type(X_data))\nprint(Y_data.shape, type(Y_data))\ndf.head()", "(50, 3) <class 'numpy.ndarray'>\n(50, 1) <class 'numpy.ndarray'>\n" ] ], [ [ "### 4. 创建线性回归模型(数据流图)", "_____no_output_____" ] ], [ [ "import tensorflow as tf\n\ntf.reset_default_graph() # https://www.cnblogs.com/demo-deng/p/10365889.html\n\nalpha = 0.01 # 学习率\nepoch = 500 # 训练全量数据集的轮数\n\n# 创建线性回归模型(数据流图)\n# 输入X, 形状[50,3]\nX = tf.placeholder(tf.float32, X_data.shape)\n# 输入Y,形状[50,1]\nY = tf.placeholder(tf.float32, Y_data.shape)\n\n# 权重变量W,形状[3,1]\n# 存疑:tf.get_variable_scope().reuse_variables() # https://cloud.tencent.com/developer/article/1335672\nW = tf.get_variable(\"weights\", (X_data.shape[1], 1), initializer=tf.constant_initializer())\n\n# 假设函数 h(x) = w0*x0+w1*x1+w2*x2,其中x0恒为1\n# 推理值 Y_pred 形状[47,1]\nY_pred = tf.matmul(X, W)\n\n# 损失函数采用最小二乘法,Y_pred - y 是形如[47,1]的向量\n# tf.matmul(a, b, transpose_a=True) 表示: 矩阵a的转置乘矩阵b,及[1,47] x [47,1]\n\n# 损失函数操作 loss\nloss_op = 1 / (2 * len(X_data)) * tf.matmul((Y_pred - Y), (Y_pred - Y), transpose_a=True)\n# 随机梯度下降优化器 opt\nopt = tf.train.GradientDescentOptimizer(learning_rate=alpha)\n# 单步训练操作 train_op\ntrain_op = opt.minimize(loss_op)", "C:\\Users\\nwmsn\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "### 5. 创建会话(运行环境)", "_____no_output_____" ] ], [ [ "# 创建会话(运行环境)\nwith tf.Session() as sess:\n # 初始化全局变量\n sess.run(tf.global_variables_initializer())\n # 开始训练模型\n # 因为训练集较小,所以不采用批梯度下降优化算法,每次都采用全量数据训练\n for e in range(1, epoch+1):\n sess.run(train_op, feed_dict={X: X_data, Y: Y_data})\n if e % 10 == 0:\n loss, w = sess.run([loss_op, W], feed_dict={X: X_data, Y: Y_data})\n log_str = \"Epoch %d \\t Loss=%.4g \\t Model: y = %.4gx1 + %.4gx2 + %.4g\"\n print(log_str % (e, loss, w[1], w[2], w[0]))", "Epoch 10 \t Loss=0.3661 \t Model: y = 0.08908x1 + 0.06728x2 + -8.941e-10\nEpoch 20 \t Loss=0.2775 \t Model: y = 0.1656x1 + 0.1226x2 + -4.098e-10\nEpoch 30 \t Loss=0.2139 \t Model: y = 0.2315x1 + 0.1679x2 + 1.08e-09\nEpoch 40 \t Loss=0.1682 \t Model: y = 0.2885x1 + 0.2047x2 + -6.706e-10\nEpoch 50 \t Loss=0.1352 \t Model: y = 0.3378x1 + 0.2345x2 + -2.235e-10\nEpoch 60 \t Loss=0.1113 \t Model: y = 0.3807x1 + 0.2583x2 + 3.725e-10\nEpoch 70 \t Loss=0.09382 \t Model: y = 0.4181x1 + 0.2772x2 + -2.794e-10\nEpoch 80 \t Loss=0.08101 \t Model: y = 0.4508x1 + 0.2919x2 + 1.863e-11\nEpoch 90 \t Loss=0.07152 \t Model: y = 0.4796x1 + 0.3031x2 + 9.313e-10\nEpoch 100 \t Loss=0.06441 \t Model: y = 0.505x1 + 0.3114x2 + 1.621e-09\nEpoch 110 \t Loss=0.05901 \t Model: y = 0.5275x1 + 0.3173x2 + 2.403e-09\nEpoch 120 \t Loss=0.05484 \t Model: y = 0.5476x1 + 0.3212x2 + 3.241e-09\nEpoch 130 \t Loss=0.05157 \t Model: y = 0.5656x1 + 0.3235x2 + 3.669e-09\nEpoch 140 \t Loss=0.04895 \t Model: y = 0.5818x1 + 0.3243x2 + 3.371e-09\nEpoch 150 \t Loss=0.04681 \t Model: y = 0.5964x1 + 0.3241x2 + 3.576e-09\nEpoch 160 \t Loss=0.04502 \t Model: y = 0.6098x1 + 0.3229x2 + 3.781e-09\nEpoch 170 \t Loss=0.04351 \t Model: y = 0.622x1 + 0.3209x2 + 3.967e-09\nEpoch 180 \t Loss=0.04219 \t Model: y = 0.6331x1 + 0.3183x2 + 4.414e-09\nEpoch 190 \t Loss=0.04103 \t Model: y = 0.6435x1 + 0.3153x2 + 4.917e-09\nEpoch 200 \t Loss=0.04 \t Model: y = 0.6531x1 + 0.3118x2 + 4.806e-09\nEpoch 210 \t Loss=0.03906 \t Model: y = 0.662x1 + 0.3081x2 + 5.066e-09\nEpoch 220 \t Loss=0.0382 \t Model: y = 0.6704x1 + 0.3041x2 + 5.141e-09\nEpoch 230 \t Loss=0.03741 \t Model: y = 0.6782x1 + 0.2999x2 + 5.309e-09\nEpoch 240 \t Loss=0.03668 \t Model: y = 0.6856x1 + 0.2956x2 + 5.607e-09\nEpoch 250 \t Loss=0.036 \t Model: y = 0.6926x1 + 0.2912x2 + 5.83e-09\nEpoch 260 \t Loss=0.03537 \t Model: y = 0.6992x1 + 0.2868x2 + 5.811e-09\nEpoch 270 \t Loss=0.03477 \t Model: y = 0.7056x1 + 0.2823x2 + 6.277e-09\nEpoch 280 \t Loss=0.0342 \t Model: y = 0.7116x1 + 0.2779x2 + 6.519e-09\nEpoch 290 \t Loss=0.03367 \t Model: y = 0.7174x1 + 0.2734x2 + 7.004e-09\nEpoch 300 \t Loss=0.03317 \t Model: y = 0.7229x1 + 0.269x2 + 7.395e-09\nEpoch 310 \t Loss=0.0327 \t Model: y = 0.7282x1 + 0.2646x2 + 7.618e-09\nEpoch 320 \t Loss=0.03226 \t Model: y = 0.7333x1 + 0.2603x2 + 7.618e-09\nEpoch 330 \t Loss=0.03183 \t Model: y = 0.7382x1 + 0.2561x2 + 7.749e-09\nEpoch 340 \t Loss=0.03143 \t Model: y = 0.743x1 + 0.2519x2 + 7.6e-09\nEpoch 350 \t Loss=0.03106 \t Model: y = 0.7476x1 + 0.2478x2 + 8.084e-09\nEpoch 360 \t Loss=0.0307 \t Model: y = 0.752x1 + 0.2438x2 + 8.009e-09\nEpoch 370 \t Loss=0.03036 \t Model: y = 0.7563x1 + 0.2398x2 + 8.103e-09\nEpoch 380 \t Loss=0.03004 \t Model: y = 0.7604x1 + 0.236x2 + 7.842e-09\nEpoch 390 \t Loss=0.02974 \t Model: y = 0.7644x1 + 0.2322x2 + 8.121e-09\nEpoch 400 \t Loss=0.02945 \t Model: y = 0.7683x1 + 0.2285x2 + 8.27e-09\nEpoch 410 \t Loss=0.02918 \t Model: y = 0.7721x1 + 0.2249x2 + 8.419e-09\nEpoch 420 \t Loss=0.02892 \t Model: y = 0.7758x1 + 0.2214x2 + 8.382e-09\nEpoch 430 \t Loss=0.02867 \t Model: y = 0.7793x1 + 0.2179x2 + 8.419e-09\nEpoch 440 \t Loss=0.02844 \t Model: y = 0.7828x1 + 0.2146x2 + 8.55e-09\nEpoch 450 \t Loss=0.02822 \t Model: y = 0.7862x1 + 0.2113x2 + 8.978e-09\nEpoch 460 \t Loss=0.02801 \t Model: y = 0.7894x1 + 0.2081x2 + 9.034e-09\nEpoch 470 \t Loss=0.02782 \t Model: y = 0.7926x1 + 0.205x2 + 9.295e-09\nEpoch 480 \t Loss=0.02763 \t Model: y = 0.7957x1 + 0.202x2 + 9.481e-09\nEpoch 490 \t Loss=0.02745 \t Model: y = 0.7987x1 + 0.199x2 + 9.667e-09\nEpoch 500 \t Loss=0.02729 \t Model: y = 0.8016x1 + 0.1962x2 + 9.444e-09\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0bb592af04259595de2cbe0c5a69e1ca0e151ef
8,310
ipynb
Jupyter Notebook
examples/Part 3 - Launch a Grid Network Locally.ipynb
joaolcaas/PyGrid
2ad5e64c428eff856ef9f1c72e6683b93274140b
[ "Apache-2.0" ]
1
2020-03-17T22:17:46.000Z
2020-03-17T22:17:46.000Z
examples/Part 3 - Launch a Grid Network Locally.ipynb
joaolcaas/PyGrid
2ad5e64c428eff856ef9f1c72e6683b93274140b
[ "Apache-2.0" ]
null
null
null
examples/Part 3 - Launch a Grid Network Locally.ipynb
joaolcaas/PyGrid
2ad5e64c428eff856ef9f1c72e6683b93274140b
[ "Apache-2.0" ]
null
null
null
26.980519
285
0.557641
[ [ [ "# Part 3: Launch a Grid Network Locally\n\nIn this tutorial, you'll learn how to deploy a grid network into a local machine and then interact with it using PySyft.\n\n_WARNING: Grid nodes publish datasets online and are for EXPERIMENTAL use only. Deploy nodes at your own risk. Do not use OpenGrid with any data/models you wish to keep private._\n\nIn order to run an grid network locally you will need to run two different apps: a grid gateway and one or more grid workers. In this tutorial we will use the websocket app available [here](https://github.com/OpenMined/PyGrid/tree/dev/app/websocket) to start the grid workers.\n\n\n## Starting the Grid Gateway\n\n\n### Step 1: Download the repository\n\n```bash\ngit clone https://github.com/OpenMined/PyGrid/\n```\n\n\n### Step 2: Download dependencies\n\nYou'll need to have the app dependencies installed. We recommend setting up an independent [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/concepts/environments.html) to avoid problems with library versions.\n\nYou can install the dependencies by running:\n\n```bash\ncd PyGrid/gateway/\npip install -r requirements.txt\n```\n\n### Step 3: Make grid importable\n\nInstall grid as a python package\n\n```bash\ncd PyGrid\npython setup.py install (or python setup.py develop)\n```\n\n### Step 4: Start gateway app\n\nThen to start the app just run the `gateway.py` script. The `--start_local_db` automatically starts a local database so you don't have to configure one yourself.\n\n```bash\npython gateway.py --start_local_db --port=<port_number>\n```\n\nThis will start the app on address: `http://0.0.0.0/<port_number>`.\n\nTo check what other arguments you can use when running this app, run:\n\n```bash\npython gateway.py --help\n```\n\nLet's start a grid gateway on port `5000`\n\n```bash\npython gateway.py --port=5000\n```\n\nGreat, so if your app started successfully the script should still be running.\n\n## Starting the Grid Worker App\n\n### Step 5: Starting the Grid Worker app\n\nThis is the same procedure already described at Part 1. But we add a new argument when starting the app called `--gateway_url` this should equal to the address used by the grid network here it's \"http://localhost:5000\"\n\nLet's start two workers:\n\n* bob on port `3000`\n* alice on port `3001`\n\n```bash\npython websocket_app.py --db_url=redis:///redis:6379 --id=bob --port=3000 --gateway_url=http://localhost:5000\n```\n\n```bash\npython websocket_app.py --db_url=redis:///redis:6379 --id=alice --port=3001 --gateway_url=http://localhost:5000\n```\n\nWe should always start the workers after starting the grid gateway!!\n\nGreat, so if your app started successfully the script should still be running.\n\n\n### Step 6: Start communication with the Grid Gateway and workers\n\nLet's start communication with the Gateway and the workers.", "_____no_output_____" ] ], [ [ "# General dependencies\nimport torch as th\nimport syft as sy\nimport grid as gr\n\nhook = sy.TorchHook(th)", "_____no_output_____" ], [ "gateway = gr.GridNetwork(\"http://localhost:5000\")", "_____no_output_____" ], [ "# WARNING: We should use the same id and port as the one used to start the app!!!\nbob = gr.WebsocketGridClient(hook, id=\"bob\", address=\"http://localhost:3000\")\n# If you don't connect to the worker you can't send messages to it\nbob.connect()\n\n# WARNING: We should use the same id and port as the one used to start the app!!!\nalice = gr.WebsocketGridClient(hook, id=\"alice\", address=\"http://localhost:3001\")\n# If you don't connect to the worker you can't send messages to it\nalice.connect()", "_____no_output_____" ] ], [ [ "### Step 7: Use PySyft Like Normal\n\nNow you can simply use the worker you created like you would any other normal PySyft worker. For more on how PySyft works, please see the PySyft tutorials: https://github.com/OpenMined/PySyft/tree/dev/examples/tutorials", "_____no_output_____" ] ], [ [ "x = th.tensor([1,2,3,4]).send(bob)\nx", "_____no_output_____" ], [ "y = x + x\ny", "_____no_output_____" ], [ "y.get()", "_____no_output_____" ] ], [ [ "### Step 7: Perform operations on Grid Network\n\nSo far we haven't done anything different, but here is the magic: we can interact with the network to query general information about it.", "_____no_output_____" ] ], [ [ "x = th.tensor([1, 2, 3, 4, 5]).tag(\"#tensor\").send(bob)", "_____no_output_____" ] ], [ [ "We can search for a tensor in the entire network, and get pointers to all tensors.", "_____no_output_____" ] ], [ [ "gateway.search(\"#tensor\")", "_____no_output_____" ], [ "y = th.tensor([1, 2, 3, 4, 5]).tag(\"#tensor\").send(alice)", "_____no_output_____" ], [ "gateway.search(\"#tensor\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0bb7687ce74a95a4082cdaae2e35579fd7743fa
113,939
ipynb
Jupyter Notebook
files/REF!IEX01.ipynb
alphaBenj/RoughCut
3e0a11170c704acba4fe0b03c6a200efd86cbb56
[ "Apache-2.0" ]
null
null
null
files/REF!IEX01.ipynb
alphaBenj/RoughCut
3e0a11170c704acba4fe0b03c6a200efd86cbb56
[ "Apache-2.0" ]
null
null
null
files/REF!IEX01.ipynb
alphaBenj/RoughCut
3e0a11170c704acba4fe0b03c6a200efd86cbb56
[ "Apache-2.0" ]
null
null
null
32.544702
2,250
0.376333
[ [ [ "# import sys", "_____no_output_____" ], [ "# sys.path.append('https://github.com/alphaBenj/RoughCut/blob/master/files/data_iex.py')", "_____no_output_____" ], [ "%matplotlib inline \nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport data_iex as IEX", "_____no_output_____" ], [ "dir(IEX)\n# ?filter=symbol,volume,lastSalePrice ", "_____no_output_____" ], [ "iex = IEX.API()\ndir(iex)", "_____no_output_____" ], [ "iex.lastTrade(['AAPL', 'IBM', \"FLR\"])", "_____no_output_____" ], [ "iex.lastTradeQuote(['AAPL', 'IBM'])", "_____no_output_____" ], [ "# %%timeit\niex.lastTrade(['AAPL', 'MSFT'])", "_____no_output_____" ], [ "# %%timeit -n 1\nsymList = ['AAPL', 'CSCO',\"OXY\",\"NFLX\", \"SBUX\", \"VXX\", \"MO\", \"PM\", \"TSLA\",\"GE\" ]\n# symList = ['GE' ]\nbars = iex.tradeBars(symList, bucket='1m')\n\n# print (bars.columns)\nbars", "_____no_output_____" ], [ "# bars[[ 'symbol','date', 'minute','open','high', 'low', 'close','volume','marketOpen',\n# 'marketHigh','marketLow', 'marketClose', 'marketVolume', 'average','marketAverage', \n# 'numberOfTrades', 'marketNumberOfTrades', \n# ]]", "_____no_output_____" ], [ "try: bar = bars[['symbol','date','minute','open','high', 'low', 'close', 'volume',]]\nexcept: bar = bars[['symbol','date','open','high', 'low', 'close', 'volume',]]\n", "_____no_output_____" ], [ "bar[bar['symbol']==\"AAPL\"]", "_____no_output_____" ], [ "https://api.iextrading.com/1.0/tops/last/?symbols=SNAP,fb,AIG%2b&format=csv", "_____no_output_____" ], [ "# https://api.iextrading.com/1.0/tops/?symbols=SNAP,fb,AIG%2b&format=csv", "_____no_output_____" ], [ "import pandas as pd\nfrom urllib.request import Request, urlopen\nimport json\nfrom pandas.io.json import json_normalize\nimport requests, io\n", "_____no_output_____" ], [ "_IEX_URL_PREFIX = r'https://api.iextrading.com/1.0/'", "_____no_output_____" ], [ "def get_trade_bars_data2( securities, bucket='1m'):\n\n \"\"\"\n Get bucketed trade/volume data. Supported buckets are: 1m, 3m, 6m, 1y, ytd, 2y, 5y\n :param securities: list of securities\n :param bucket:\n :return: dataframe\n \"\"\"\n # securities = self.return_valid_securities(securities)\n #https://api.iextrading.com/1.0/stock/market/batch?&types=time-series&range=5y&symbols=qep,oxy,flr,mo,pm,nflx\n \n syms = (',').join(securities)\n df = pd.DataFrame()\n # Get price data for each security and then append the results together\n # if securities:\n\n # for symbol in securities:\n # suffix = r'stock/{symbol}/chart/{bucket}'.format(symbol=symbol,bucket=bucket)\n # df = self._url_to_dataframe(self._IEX_URL_PREFIX + suffix)\n # df['symbol'] = symbol\n # final_df = final_df.append(df, ignore_index=True)\n # return final_df\n # else: print('These stock(s) are invalid!')\n\n # if securities: \n # for symbol in securities:\n \n suffix = r\"stock/market/batch?&types=time-series&range={bucket}&symbols={symbol}\".format(bucket=bucket,symbol=syms)\n filter = \"&filter=symbol,date,open,high,low,close,volume,changePercent,vwap\"\n urlData = requests.get(_IEX_URL_PREFIX + suffix+filter).content\n rawData = json.loads(urlData)\n \n for sym in list(rawData.keys()): \n _df = pd.DataFrame(list(rawData.get(sym).values())[0])\n _df[\"symbol\"] = sym\n print (_df.head())\n df = pd.concat([df,_df])\n \n \n return df\n ", "_____no_output_____" ], [ "# %%timeit -n 1\nsecurities = ['AAPL', 'CSCO',\"OXY\",\"NFLX\", \"SBUX\", \"VXX\", \"MO\", \"PM\" ]\nbars = get_trade_bars_data2( securities, bucket='2y')", " changePercent close date high low open volume \\\n0 1.271 93.1697 2016-05-23 93.9040 92.4354 92.6286 38018643 \n1 1.524 94.5900 2016-05-24 94.7735 93.5658 93.9329 35140174 \n2 1.757 96.2518 2016-05-25 96.3677 94.7929 95.3339 38642108 \n3 0.793 97.0151 2016-05-26 97.3243 95.3049 96.3098 56331159 \n4 -0.060 96.9571 2016-05-27 97.0731 95.8895 96.0779 36341240 \n\n vwap symbol \n0 93.3771 AAPL \n1 94.2808 AAPL \n2 95.7591 AAPL \n3 96.3515 AAPL \n4 96.5612 AAPL \n changePercent close date high low open volume \\\n0 -0.107 26.1068 2016-05-23 26.3964 25.9573 25.9573 19850040 \n1 1.897 26.6020 2016-05-24 26.7048 26.1722 26.1722 26514828 \n2 1.581 27.0225 2016-05-25 27.0972 26.6487 26.7048 25729908 \n3 -0.069 27.0038 2016-05-26 27.1346 26.9057 26.9664 19366299 \n4 0.069 27.0225 2016-05-27 27.1299 26.9010 27.0505 16673324 \n\n vwap symbol \n0 26.2496 CSCO \n1 26.5854 CSCO \n2 26.9954 CSCO \n3 27.0286 CSCO \n4 27.0029 CSCO \n changePercent close date high low open volume \\\n0 -0.120 68.3968 2016-05-23 68.8536 67.8852 68.0405 3334451 \n1 1.670 69.5388 2016-05-24 69.7854 68.4973 69.0089 3620728 \n2 0.066 69.5844 2016-05-25 70.6624 69.3195 70.2239 3399508 \n3 -0.079 69.5296 2016-05-26 70.5071 69.4748 70.0686 2286839 \n4 0.053 69.5662 2016-05-27 69.6301 69.0454 69.4017 2203127 \n\n vwap symbol \n0 68.4580 OXY \n1 69.4648 OXY \n2 69.7519 OXY \n3 69.8131 OXY \n4 69.3975 OXY \n changePercent close date high low open volume \\\n0 2.595 94.89 2016-05-23 95.2924 92.85 92.98 13992330 \n1 3.162 97.89 2016-05-24 99.1400 95.75 95.98 21246023 \n2 2.360 100.20 2016-05-25 100.3100 98.30 99.00 15211295 \n3 2.605 102.81 2016-05-26 104.0000 101.38 103.21 17824673 \n4 0.477 103.30 2016-05-27 103.5000 101.44 102.44 9092127 \n\n vwap symbol \n0 94.5605 NFLX \n1 97.8675 NFLX \n2 99.2864 NFLX \n3 102.7704 NFLX \n4 102.5826 NFLX \n changePercent close date high low open volume \\\n0 -0.037 52.6105 2016-05-23 52.8197 52.3128 52.6298 7352054 \n1 1.538 53.4199 2016-05-24 53.5934 52.6876 52.7454 7748697 \n2 -0.523 53.1405 2016-05-25 53.4392 52.9478 53.1887 8126058 \n3 0.254 53.2754 2016-05-26 53.8342 52.9478 53.5259 9451708 \n4 -0.253 53.1405 2016-05-27 53.5259 53.0923 53.3428 6631120 \n\n vwap symbol \n0 52.6105 SBUX \n1 53.2790 SBUX \n2 53.2011 SBUX \n3 53.2509 SBUX \n4 53.2845 SBUX \n changePercent close date high low open volume \\\n0 -0.464 240.48 2016-05-23 243.3184 237.12 241.12 2825376 \n1 -4.325 230.08 2016-05-24 237.1200 227.44 236.80 3872151 \n2 -1.878 225.76 2016-05-25 228.6400 221.36 226.48 3422037 \n3 -1.063 223.36 2016-05-26 226.2400 222.40 225.44 2740833 \n4 -2.686 217.36 2016-05-27 222.5600 217.12 221.60 2757274 \n\n vwap symbol \n0 239.5273 VXX \n1 232.1445 VXX \n2 224.9995 VXX \n3 224.1171 VXX \n4 219.6263 VXX \n changePercent close date high low open volume \\\n0 0.445 58.7608 2016-05-23 58.8630 58.3613 58.5285 3709788 \n1 0.870 59.2719 2016-05-24 59.5803 59.0117 59.0860 5209705 \n2 0.204 59.3927 2016-05-25 59.6064 59.1325 59.2719 4265913 \n3 0.125 59.4670 2016-05-26 59.5506 59.1432 59.3276 3732653 \n4 -0.063 59.4298 2016-05-27 59.7179 59.1604 59.5227 3255749 \n\n vwap symbol \n0 58.6949 MO \n1 59.3222 MO \n2 59.4220 MO \n3 59.3803 MO \n4 59.3966 MO \n changePercent close date high low open volume \\\n0 0.245 90.7501 2016-05-23 90.8794 90.2423 90.5470 2365439 \n1 0.570 91.2672 2016-05-24 91.7566 91.0364 91.1472 3074422 \n2 0.212 91.4611 2016-05-25 91.8767 91.0918 91.1103 2543136 \n3 -0.040 91.4242 2016-05-26 91.5673 90.9164 91.4611 2350547 \n4 0.172 91.5812 2016-05-27 91.7382 91.1472 91.5073 2059805 \n\n vwap symbol \n0 90.6034 PM \n1 91.3768 PM \n2 91.5345 PM \n3 91.3463 PM \n4 91.5329 PM \n" ], [ "# list(rd.keys())[0]\n# _df = pd.DataFrame()", "_____no_output_____" ], [ "# _df = pd.DataFrame()\n# print (_df.head())\n\n\n# # for sym in list(rd.keys()): \n# df = pd.DataFrame(list(rd.get(sym).values())[0])\n# df[\"symbol\"] = sym\n# _df = pd.concat([_df,df])\n# print (_df.head())", "_____no_output_____" ], [ "bar[bar['symbol']==\"OXY\"]", "_____no_output_____" ], [ "final_df", "_____no_output_____" ], [ "securities = ['oxy', \"QEP\",\"VXX\",\"HI\"]", "_____no_output_____" ], [ "str(securities).replace('\"', \"\").replace(\"'\", \"\").replace('[', \"\").replace(']', \"\").replace(' ', \"\")", "_____no_output_____" ], [ "(',').join(securities)", "_____no_output_____" ], [ "dllist = list(range(821))\nprint (round((len(dllist)),-2)/100+1)", "9.0\n" ], [ "dll = [ dllist[ x*100: (x+1)*100] for x in range(int(round((len(dllist)),-2)/100+1)) ]", "_____no_output_____" ], [ "range(int(round((len(dllist)),-2)/100+1))", "_____no_output_____" ], [ "dll", "_____no_output_____" ], [ "from IPython.core.display import HTML\nHTML(\"<style>.container { width:98% !important; white-space: none; no-wrap: true; }</style>\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0bb7ec3e978cd6c9bea94cb718d1789321a3b82
38,138
ipynb
Jupyter Notebook
ipynb/Chapter 2 - Moving Average.ipynb
jongha/stock-ai-book
421553267f8f123464532f20e758c507fbc2421d
[ "MIT" ]
1
2020-06-20T14:25:02.000Z
2020-06-20T14:25:02.000Z
ipynb/Chapter 2 - Moving Average.ipynb
jongha/stock-ai-book
421553267f8f123464532f20e758c507fbc2421d
[ "MIT" ]
null
null
null
ipynb/Chapter 2 - Moving Average.ipynb
jongha/stock-ai-book
421553267f8f123464532f20e758c507fbc2421d
[ "MIT" ]
2
2020-09-16T08:40:18.000Z
2021-10-09T23:17:22.000Z
36.777242
113
0.394567
[ [ [ "#-*- coding: utf-8 -*-\nimport pandas as pd\nimport pandas_datareader.data as web\nimport datetime", "_____no_output_____" ], [ "def get_file_path(code):\n return \"../data/\" + code", "_____no_output_____" ], [ "def download(code, year1, month1, day1, year2, month2, day2):\n start = datetime.datetime(year1, month1, day1)\n end = datetime.datetime(year2, month2, day2)\n df = web.DataReader(\"%s.KS\" % code, \"yahoo\", start, end)\n df.to_pickle(get_file_path(code))\n \n return df", "_____no_output_____" ], [ "def load(code):\n df = pd.read_pickle(get_file_path(code))\n return df", "_____no_output_____" ], [ "df = download(\"005930\", 2016, 1, 1, 2016, 11, 1)", "_____no_output_____" ], [ "#Moving Average\ndef EMA(m_Df, m_N, m_ColumnName='Close'):\n if m_ColumnName in m_Df.columns:\n m_Df[\"MA\" + str(m_N)] = pd.Series.rolling(m_Df[m_ColumnName], window=m_N, center=False).mean()\n else:\n raise(\"You didn't input a Column Name\")\n \n return m_Df", "_____no_output_____" ], [ "EMA(df, 5)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0bb88fc955d4bc02b8f289f054f3f305339a27f
73,303
ipynb
Jupyter Notebook
Mandelbrot/Render a Mandelbrot Set.ipynb
OMerkel/Fractals
cce15672ebc19fa99dc1551fc257c3aed54e12c7
[ "MIT" ]
1
2019-04-30T20:38:24.000Z
2019-04-30T20:38:24.000Z
Mandelbrot/Render a Mandelbrot Set.ipynb
OMerkel/Fractals
cce15672ebc19fa99dc1551fc257c3aed54e12c7
[ "MIT" ]
null
null
null
Mandelbrot/Render a Mandelbrot Set.ipynb
OMerkel/Fractals
cce15672ebc19fa99dc1551fc257c3aed54e12c7
[ "MIT" ]
null
null
null
446.969512
34,748
0.942267
[ [ [ "import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "def mandelbrot( w, h, maxiter=200 ):\n y, x = np.ogrid[ -1.2:1.2:h*1j, -2:0.8:w*1j ]\n C = x + y * 1j\n Z = np.zeros(C.shape, dtype=int)\n N = maxiter + Z\n bailout = 2.0\n \n for i in range(maxiter):\n Z = Z ** 2 + C\n diverged = np.abs(Z) > bailout\n N[diverged & (N==maxiter)] = i\n Z[diverged] = 2\n return N\n\n# plt.imshow(mandelbrot(1920,1200), cmap='tab20c')\nt1 = datetime.datetime.now()\nplt.imshow(mandelbrot(1920,1200), cmap='prism')\nt2 = datetime.datetime.now()\nruntime1 = (t2-t1).total_seconds() \nprint(str(runtime1) + \" seconds\")\n# plt.savefig('mandelbrot1.png', dpi=300)\nplt.show()", "12.814103 seconds\n" ], [ "def mandelbrot( w, h, maxiter=200 ):\n y, x = np.ogrid[ -1.2:1.2:h*1j, -2:0.8:w*1j ]\n C = x + y * 1j\n Z = np.zeros(C.shape, dtype=complex)\n M = np.full(C.shape, True, dtype=bool)\n N = np.zeros(C.shape)\n bailout = 2.0\n \n for i in range(maxiter):\n Z[M] = Z[M] ** 2 + C[M]\n diverged = np.abs(Z) > bailout\n M[diverged] = False\n N[M] = i+1\n return N\n\n# plt.imshow(mandelbrot(1920,1200), cmap='tab20c')\nt1 = datetime.datetime.now()\nplt.imshow(mandelbrot(1920,1200), cmap='prism')\nt2 = datetime.datetime.now()\nruntime2 = (t2-t1).total_seconds() \nprint(str(runtime2) + \" seconds\")\n# plt.savefig('mandelbrot2.png', dpi=300)\nplt.show()", "9.18136 seconds\n" ], [ "print(\"Delta absolute:\" + str(runtime1-runtime2) + \" seconds\")\nprint(\"Delta relative:\" + str(runtime2/runtime1*100) + \" %\")", "Delta absolute:3.6327429999999996 seconds\nDelta relative:71.65043077927498 %\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d0bbbbe0a14526650970d38961c0d9a593f3edc8
33,451
ipynb
Jupyter Notebook
biotacs-noise/Extract Tactile Info.ipynb
3dperceptionlab/tactile-gcn
e05cd574f097a372a612e8fcbeb7645c316dd97a
[ "MIT" ]
10
2019-05-02T08:42:09.000Z
2021-03-15T05:44:29.000Z
biotacs-noise/Extract Tactile Info.ipynb
3dperceptionlab/tactile-gcn
e05cd574f097a372a612e8fcbeb7645c316dd97a
[ "MIT" ]
null
null
null
biotacs-noise/Extract Tactile Info.ipynb
3dperceptionlab/tactile-gcn
e05cd574f097a372a612e8fcbeb7645c316dd97a
[ "MIT" ]
5
2019-03-22T06:21:33.000Z
2020-07-10T09:13:35.000Z
126.230189
9,998
0.873786
[ [ [ "import os\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\n\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "csv_file = 'untouched.csv'", "_____no_output_____" ], [ "IN_FILE = csv_file \nOUT_FILE = 'untouched.npy'\n\n# The bag2csv script generates empty header names so the real\n# tactile values are stored in other columns\nFF_TAC_ATT = 'tdc'\nMF_TAC_ATT = 'tac.1'\nTH_TAC_ATT = 'pac0.4'\n\nFINGERS = 3\nELECTRODES = 24\n\n# Read CSV and fill numpy object\nraw_df = pd.read_csv(IN_FILE)\ntactiles_df = raw_df[[FF_TAC_ATT, MF_TAC_ATT, TH_TAC_ATT]]\ntactiles_np = np.zeros([tactiles_df.shape[0], FINGERS, ELECTRODES], dtype=int)\n\nfor index, row in tactiles_df.iterrows():\n ff_values = row[FF_TAC_ATT]\n ff_values = ff_values.replace('[', '').replace(']', '')\n ff_values = [int(x) for x in ff_values.split(', ')]\n\n mf_values = row[MF_TAC_ATT]\n mf_values = mf_values.replace('[', '').replace(']', '')\n mf_values = [int(x) for x in mf_values.split(', ')]\n\n th_values = row[TH_TAC_ATT]\n th_values = th_values.replace('[', '').replace(']', '')\n th_values = [int(x) for x in th_values.split(', ')]\n\n tactiles_np[index, 0, :] = ff_values\n tactiles_np[index, 1, :] = mf_values\n tactiles_np[index, 2, :] = th_values", "==============================\n('IN_FILE', 'untouched.csv')\n('OUT_FILE', 'untouched.npy')\n" ], [ "ff_np = np.reshape(tactiles_np[:, 0, :], (-1))\nmf_np = np.reshape(tactiles_np[:, 1, :], (-1))\nth_np = np.reshape(tactiles_np[:, 2, :], (-1))", "_____no_output_____" ], [ "_, ff_min_max, ff_mean, ff_variance, _, _ = stats.describe(ff_np)\nff_std = np.std(ff_np)", "_____no_output_____" ], [ "_, mf_min_max, mf_mean, mf_variance, _, _ = stats.describe(mf_np)\nmf_std = np.std(mf_np)", "_____no_output_____" ], [ "_, th_min_max, th_mean, th_variance, _, _ = stats.describe(th_np)\nth_std = np.std(th_np)", "_____no_output_____" ], [ "mins = [ff_min_max[0], mf_min_max[0], th_min_max[0]]\nmaxs = [ff_min_max[1], mf_min_max[1], th_min_max[1]]\nmeans = [ff_mean, mf_mean, th_mean]\nvariances = [ff_variance, mf_variance, th_variance]\nstds = [ff_std, mf_std, th_std]\n\ninds = range(3)", "_____no_output_____" ], [ "plt.bar(inds, mins, color=['red', 'green', 'cyan'])\nplt.ylim(0, 3750)\n\nplt.title('Min sensor value per finger')\nplt.xticks(inds, ('FF', 'MF', 'TH'))\n\nprint 'Mins:', mins", "Mins: [1607, 2616, 2383]\n" ], [ "plt.bar(inds, maxs, color=['red', 'green', 'cyan'])\nplt.ylim(0, 3750)\n\nplt.title('Max sensor value per finger')\nplt.xticks(inds, ('FF', 'MF', 'TH'))\n\nprint 'Maxs:', maxs", "Maxs: [3181, 3488, 3276]\n" ], [ "plt.bar(inds, means, yerr=stds, color=['red', 'green', 'cyan'])\nplt.ylim(0, 3750)\n\nplt.title('Mean (+/ std) sensor value per finger')\nplt.xticks(inds, ('FF', 'MF', 'TH'))\n\nprint 'Means:', means\nprint 'Stds:', stds", "Means: [2815.290216649608, 3256.6313971340837, 2984.2897475264413]\nStds: [287.00657538171123, 197.845728169163, 236.90013125285444]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0bbbe9f44cfb5450d425cb1a065e9d7f7c0636e
10,894
ipynb
Jupyter Notebook
Example_workflows/End-to-end_Workflows/end-to-end-2-sn2-pes/end-to-end-example-2.ipynb
jvalegre/RotaConfort
1d40d7f26c3b5f22e7054b13a69b8d5c0a377241
[ "MIT" ]
2
2020-06-24T23:34:35.000Z
2020-06-25T09:38:13.000Z
Example_workflows/End-to-end_Workflows/end-to-end-2-sn2-pes/end-to-end-example-2.ipynb
jvalegre/RotaConfort
1d40d7f26c3b5f22e7054b13a69b8d5c0a377241
[ "MIT" ]
1
2020-05-15T20:56:52.000Z
2020-05-15T20:56:52.000Z
Example_workflows/End-to-end_Workflows/end-to-end-2-sn2-pes/end-to-end-example-2.ipynb
jvalegre/RotaConfort
1d40d7f26c3b5f22e7054b13a69b8d5c0a377241
[ "MIT" ]
2
2020-05-08T16:39:53.000Z
2020-06-24T23:34:36.000Z
32.326409
158
0.596567
[ [ [ "#### Reactions processing with AQME - substrates + TS\n", "_____no_output_____" ] ], [ [ "# cell with import, system name and PATHs\nimport os, glob, subprocess\nimport shutil\nfrom pathlib import Path\nfrom aqme.csearch import csearch\nfrom aqme.qprep import qprep\nfrom aqme.qcorr import qcorr\nfrom rdkit import Chem\nimport pandas as pd", "_____no_output_____" ] ], [ [ "###### Step 1: Determining the constraints for SN2 TS", "_____no_output_____" ] ], [ [ "# Provide the TS smiles to detemine the numbering for constraints\nsmi = 'C(C)(F)C.[OH-]'\nmol = Chem.MolFromSmiles(smi)\nmol = Chem.AddHs(mol)\nfor i,atom in enumerate(mol.GetAtoms()):\n atom.SetAtomMapNum(i)\nsmi_new = Chem.MolToSmiles(mol)\nprint(smi_new)", "_____no_output_____" ], [ "mol\n# distance and angle to fix are \n# constraits_dist = [[0,2,1.8],[0,4,1.8]]\n# constraits_angle = [[2,0,4,180]]", "_____no_output_____" ] ], [ [ "###### Step 2: Create a CSV as follows", "_____no_output_____" ] ], [ [ "data = pd.read_csv('example2.csv')\ndata", "_____no_output_____" ] ], [ [ "###### Step 3: Running CSEARCH on the CSV", "_____no_output_____" ] ], [ [ "# run CSEARCH conformational sampling, specifying:\n\n# choose program for conformer sampling\n# 1) RDKit ('rdkit'): Fast sampling, only works for systems with one molecule\n# 2) CREST ('crest'): Slower sampling, works for noncovalent complexes and \n# transition structures (see example of TS in the CSEARCH_CREST_TS.ipynb notebook\n# from the CSEARCH_CMIN_conformer_generation folder)\n\n# 3) Program for conformer sampling (program=program)\n# 4) SMILES string (smi=smi)\n# 5) Name for the output SDF files (name=name)\n# 6) Include CREGEN post-analysis for CREST sampling (cregen=True)\ncsearch(input='example2.csv', program='crest', cregen=True, cregen_keywords='--ethr 0.1 --rthr 0.2 --bthr 0.3 --ewin 1')", "_____no_output_____" ] ], [ [ "###### Step 4: Create input files using QPREP \n\n###### a. for TS with TS keywords\n###### b. for substrates with substrate keywords", "_____no_output_____" ] ], [ [ "# set SDF filenames and directory where the new com files will be created\nsdf_rdkit_files = ['CSEARCH/crest/TS_SN2_crest.sdf']\n\n# choose program for input file generation, with the corresponding keywords line, memory and processors:\n# 1) Gaussian ('gaussian')\nprogram = 'gaussian'\nqm_input = 'B3LYP/6-31G(d) opt=(ts,calcfc,noeigen) freq'\nmem='40GB'\nnprocs=36\n\n# run QPREP input files generator, with:\n# 1) Working directory (w_dir_main=sdf_path)\n# 2) PATH to create the new SDF files (destination=com_path)\n# 3) Files to convert (files=sdf_rdkit_files)\n# 4) QM program for the input (program=program)\n# 5) Keyword line for the Gaussian inputs (qm_input=qm_input)\n# 6) Memory to use in the calculations (mem='24GB')\n# 7) Processors to use in the calcs (nprocs=8)\nqprep(files=sdf_rdkit_files,program=program,\n qm_input=qm_input,mem=mem,nprocs=nprocs)\n ", "_____no_output_____" ], [ "# set SDF filenames and directory where the new com files will be created\nsdf_rdkit_files = ['CSEARCH/crest/F_crest.sdf', 'CSEARCH/crest/O_anion_crest.sdf']\n\n# choose program for input file generation, with the corresponding keywords line, memory and processors:\n# 1) Gaussian ('gaussian')\nprogram = 'gaussian'\nqm_input = 'B3LYP/6-31G(d) opt freq'\nmem='40GB'\nnprocs=36\n\n# run QPREP input files generator, with:\n# 1) Working directory (w_dir_main=sdf_path)\n# 2) PATH to create the new SDF files (destination=com_path)\n# 3) Files to convert (files=sdf_rdkit_files)\n# 4) QM program for the input (program=program)\n# 5) Keyword line for the Gaussian inputs (qm_input=qm_input)\n# 6) Memory to use in the calculations (mem='24GB')\n# 7) Processors to use in the calcs (nprocs=8)\nqprep(files=sdf_rdkit_files,program=program,\n qm_input=qm_input,mem=mem,nprocs=nprocs)", "_____no_output_____" ] ], [ [ "###### Step 5: Checking with QPREP for corrections", "_____no_output_____" ] ], [ [ "w_dir_main=os.getcwd()+'/QCALC'\n\n# run the QCORR analyzer, with:\n# 1) Working directory (w_dir_main=com_path)\n# 2) Names of the QM output files (files='*.log')\n# 3) Detect and fix calcs that converged during geometry optimization but didn't converge during frequency calcs (freq_conv='opt=(calcfc,maxstep=5)')\n# 4) Type of initial input files where the LOG files come from (isom_type='com')\n# 5) Folder with the initial input files (isom_inputs=com_path)\n\nqcorr(w_dir_main=w_dir_main,files='*.log',freq_conv='opt=(calcfc,maxstep=5)')", "_____no_output_____" ] ], [ [ "###### Step 6: creation of DLPNO input files for ORCA single-point energy calculations", "_____no_output_____" ] ], [ [ "# choose output files to get atoms and coordinates to generate inputs for single-point energy calculations\nsuccess_dir = os.getcwd()+'/QCALC/successful_QM_outputs'\nqm_files = '*.log'\n\n# choose program for input file generation with QPREP, with the corresponding keywords line, memory and processors:\n\n# 1) ORCA ('orca')\nprogram = 'orca'\n# a DLPNO example keywords line for ORCA calculations\n# qm_input = 'Extrapolate(2/3,cc) def2/J cc-pVTZ/C DLPNO-CCSD(T) NormalPNO TightSCF RIJCOSX\\n'\nqm_input ='DLPNO-CCSD(T) def2-tzvpp def2-tzvpp/C\\n'\nqm_input += '%scf maxiter 500\\n'\nqm_input += 'end\\n'\nqm_input += '% mdci\\n'\nqm_input += 'Density None\\n'\nqm_input += 'end\\n'\nqm_input += '% elprop\\n'\nqm_input += 'Dipole False\\n'\nqm_input += 'end'\nmem='4GB'\nnprocs=8\n\n# run QPREP input files generator, with:\n# 1) Working directory (w_dir_main=sdf_path)\n# 2) PATH to create the new SDF files (destination=com_path)\n# 3) Files to convert (files=sdf_rdkit_files)\n# 4) QM program for the input (program=program)\n# 5) Keyword line for the Gaussian inputs (qm_input=qm_input)\n# 6) Memory to use in the calculations (mem='24GB')\n# 7) Processors to use in the calcs (nprocs=8)\n\nqprep(w_dir_main=success_dir,destination=success_dir,files=qm_files,program=program,\n qm_input=qm_input,mem=mem,nprocs=nprocs, suffix='DLPNO')", "_____no_output_____" ] ], [ [ "###### Step 7: Analysis with goodvibes", "_____no_output_____" ] ], [ [ "# track all the output files from Gaussian and ORCA\nopt_files = glob.glob(f'{success_dir}/*.log')\nspc_files = glob.glob(f'{success_dir}/*.out')\nall_files = opt_files + spc_files\n\n# move all the output files together to a folder called \"GoodVibes_analysis\" for simplicity\nw_dir_main = Path(os.getcwd())\nGV_folder = w_dir_main.joinpath('GoodVibes_analysis')\nGV_folder.mkdir(exist_ok=True, parents=True)\n\nfor file in all_files:\n\tshutil.copy(file, GV_folder)\n\n# this commands runs GoodVibes, including the population % of each conformer \n# (final results in the GoodVibes.out file)\nos.chdir(GV_folder)\nsubprocess.run(['python', '-m', 'goodvibes', '--xyz','--pes', '../pes.yaml','--graph','../pes.yaml', '--spc', 'DLPNO', '*.log',])\nos.chdir(w_dir_main)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0bbc0f21e1398a648c2eb8bde55db9267e3d399
5,521
ipynb
Jupyter Notebook
Motion Detection with Webcam.ipynb
annmohankunnath/Motion-Detection-with-Webcam
7ed8e84e182aa610be5bf527d0ea8697e01ab7e7
[ "MIT" ]
null
null
null
Motion Detection with Webcam.ipynb
annmohankunnath/Motion-Detection-with-Webcam
7ed8e84e182aa610be5bf527d0ea8697e01ab7e7
[ "MIT" ]
null
null
null
Motion Detection with Webcam.ipynb
annmohankunnath/Motion-Detection-with-Webcam
7ed8e84e182aa610be5bf527d0ea8697e01ab7e7
[ "MIT" ]
null
null
null
31.729885
152
0.5691
[ [ [ "# Import all the necessary libraries", "_____no_output_____" ] ], [ [ "import cv2, time, pandas\nfrom datetime import datetime", "_____no_output_____" ] ], [ [ "# Initialize the variables", "_____no_output_____" ] ], [ [ "first = None # This variable holds the value of the first frame\nstatus_list = [None,None] # This variable holds the list of statuses - if Python has come across a frame greater than 1000 pixels\ntimes = [] # This variable holds the timestamps during which a motion that was detected started and ended\ndf = pandas.DataFrame(columns = [\"Start\",\"End\"]) # The dataframe that will hold the timestamp for each detected motion", "_____no_output_____" ] ], [ [ "# Capture the video", "_____no_output_____" ] ], [ [ "vid = cv2.VideoCapture(0) # To capture the video from the first camera of the device", "_____no_output_____" ] ], [ [ "# Process the video", "_____no_output_____" ] ], [ [ "while True:\n \n check, frame = vid.read() # To read frame by frame\n status = 0\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)# To convert the image to grayscale\n gray_frame = cv2.GaussianBlur(gray_frame,(21,21),0) # To make the image blurry so that noise is reduced and accuracy is improved\n \n if first is None:\n first = gray_frame\n continue\n \n \n diff = cv2.absdiff(first, gray_frame) # To compute and store the absolute difference between the frames\n thresh_diff = cv2.threshold(diff,30,255,cv2.THRESH_BINARY)[1] # To convert differences less than 30 as WHITE and more than 30 as BLACK\n thresh_diff = cv2.dilate(thresh_diff,None,iterations = 2) # To increase the object area\n (_,cnts,_) = cv2.findContours(thresh_diff.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) # To find the contours of the frame\n \n \n for contour in cnts: # To iterate through the contours\n if cv2.contourArea(contour) < 1000: # To check if the contour area is less than 1000 pixels\n continue\n status = 1 # To remember that Python has found a frame that is bigger than 1000px\n (x,y,w,h)= cv2.boundingRect(contour) # To find the rectangle bounding the contours\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3) # To draw a rectangle around the object\n \n status_list.append(status)\n if status_list[-1] == 1 and status_list[-2] == 0: # To figure out the start time\n times.append(datetime.now()) # To store the start time\n if status_list[-1] == 0 and status_list[-2] == 1: # To figure out the end tome\n times.append(datetime.now()) # To store the end time\n \n \n cv2.imshow(\"Gray\",gray_frame) # To display the frames that are captured in gray scale\n cv2.imshow(\"Difference\",diff) # To display the difference between the frames\n cv2.imshow(\"Threshold\",thresh_diff) # To display the difference after the threshold has been applied\n cv2.imshow(\"Color\",frame) # To display the original color frames\n \n key = cv2.waitKey(1) # To wait till a key is pressed\n \n if key == ord('q'): # To quit if the key pressed is q\n if status == 1:\n times.append(datetime.now())\n break\n \n ", "_____no_output_____" ] ], [ [ "# Write the timestamps to a csv file", "_____no_output_____" ] ], [ [ "for i in range(0,len(times),2):\n df=df.append({\"Start\":times[i],\"End\":times[i+1]},ignore_index=True) # To store the start and end times of when each motion was detected\n\ndf.to_csv(\"Times.csv\") # To write to the Times.csv file", "_____no_output_____" ] ], [ [ "# End the video processing", "_____no_output_____" ] ], [ [ "vid.release() # To stop the capturing device from capturing video\ncv2.destroyAllWindows # To close the windows and deallocate associated memory usage", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0bbe7b669e2b015cdd2ad89d2ffffb11c5a1f27
4,321
ipynb
Jupyter Notebook
mnist.ipynb
rpalsaxena/1000daysofcode
36922898c1e80fee3b118480f8300be70ffb3737
[ "Apache-2.0" ]
1
2021-07-21T09:14:57.000Z
2021-07-21T09:14:57.000Z
mnist.ipynb
rpalsaxena/1000daysofcode
36922898c1e80fee3b118480f8300be70ffb3737
[ "Apache-2.0" ]
null
null
null
mnist.ipynb
rpalsaxena/1000daysofcode
36922898c1e80fee3b118480f8300be70ffb3737
[ "Apache-2.0" ]
null
null
null
23.483696
164
0.52835
[ [ [ "### Install the required packages to download and unzip data", "_____no_output_____" ] ], [ [ "!pip install xtarfile", "Collecting xtarfile\n Downloading https://files.pythonhosted.org/packages/e1/48/cdd07360e52e530f2b32992ba0415205dec007df7778216873ccf3c95b7d/xtarfile-0.1.0.tar.gz\nBuilding wheels for collected packages: xtarfile\n Building wheel for xtarfile (setup.py): started\n Building wheel for xtarfile (setup.py): finished with status 'done'\n Created wheel for xtarfile: filename=xtarfile-0.1.0-cp37-none-any.whl size=3867 sha256=2a61422c083220d62d69f66f1dd67a2e39e4c6f7541cec7860abdfdae75e5730\n Stored in directory: C:\\Users\\rpals\\AppData\\Local\\pip\\Cache\\wheels\\f5\\b4\\44\\c5fe30217f193222fa60a89b0688058322d663dfda176e65c0\nSuccessfully built xtarfile\nInstalling collected packages: xtarfile\nSuccessfully installed xtarfile-0.1.0\n" ], [ "!pip install wget", "Requirement already satisfied: wget in c:\\users\\rpals\\anaconda3\\lib\\site-packages (3.2)\n" ] ], [ [ "### Download data and unzip it", "_____no_output_____" ] ], [ [ "import wget\nwget.download('https://activeeon-public.s3.eu-west-2.amazonaws.com/datasets/MNIST.old.tar.gz')", "100% [..............................................................] 23212732 / 23212732" ], [ "import tarfile\nfilename = tarfile.open('MNIST.old.tar.gz')\nfilename.extractall('./data') # specify which folder to extract to\nfilename.close()\n", "_____no_output_____" ] ], [ [ "### Load MNIST data", "_____no_output_____" ] ], [ [ "import torch\nimport numpy as np\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\n\ntransform = transforms.ToTensor()\ntrainset = datasets.MNIST('./data/', download=False, train=True, transform=transform)\n\ntest_data = datasets.MNIST(root='./data', train=False,\n download=False, transform=transform)", "_____no_output_____" ], [ "trainset.classes", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0bbeadf04c612953d9bf108be65d5de92e4d527
5,985
ipynb
Jupyter Notebook
docs/machine_learning/preprocessing_text/bag_of_words.ipynb
revgizmo-forks/ds_notes
ffc73d06b07fb2b137e7e679d3c99dab53580afa
[ "CC0-1.0" ]
1
2020-03-18T21:13:25.000Z
2020-03-18T21:13:25.000Z
docs/machine_learning/preprocessing_text/bag_of_words.ipynb
revgizmo-forks/ds_notes
ffc73d06b07fb2b137e7e679d3c99dab53580afa
[ "CC0-1.0" ]
null
null
null
docs/machine_learning/preprocessing_text/bag_of_words.ipynb
revgizmo-forks/ds_notes
ffc73d06b07fb2b137e7e679d3c99dab53580afa
[ "CC0-1.0" ]
1
2020-08-28T11:03:18.000Z
2020-08-28T11:03:18.000Z
22.756654
118
0.41203
[ [ [ "---\ntitle: \"Bag Of Words\"\nauthor: \"Chris Albon\"\ndate: 2017-12-20T11:53:49-07:00\ndescription: \"How to encode unstructured text data as bags of words for machine learning in Python.\"\ntype: technical_note\ndraft: false\n---", "_____no_output_____" ] ], [ [ "<a alt=\"Bag Of Words\" href=\"https://machinelearningflashcards.com\">\n <img src=\"/images/machine_learning_flashcards/Bag_Of_Words_print.png\" class=\"flashcard center-block\">\n</a>", "_____no_output_____" ], [ "## Preliminaries", "_____no_output_____" ] ], [ [ "# Load library\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Create Text Data", "_____no_output_____" ] ], [ [ "# Create text\ntext_data = np.array(['I love Brazil. Brazil!',\n 'Sweden is best',\n 'Germany beats both'])", "_____no_output_____" ] ], [ [ "## Create Bag Of Words", "_____no_output_____" ] ], [ [ "# Create the bag of words feature matrix\ncount = CountVectorizer()\nbag_of_words = count.fit_transform(text_data)\n\n# Show feature matrix\nbag_of_words.toarray()", "_____no_output_____" ] ], [ [ "## View Bag Of Words Matrix Column Headers", "_____no_output_____" ] ], [ [ "# Get feature names\nfeature_names = count.get_feature_names()\n\n# View feature names\nfeature_names", "_____no_output_____" ] ], [ [ "## View As A Data Frame", "_____no_output_____" ] ], [ [ "# Create data frame\npd.DataFrame(bag_of_words.toarray(), columns=feature_names)", "_____no_output_____" ] ] ]
[ "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "raw" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0bbee1729855b644f51ce072115282eb2565caf
8,654
ipynb
Jupyter Notebook
explore/Iain/load_ig_chalkboard.ipynb
wethepeopleonline/law-net
e8b01136360078c89b666e2b127672644ed0c54b
[ "MIT" ]
17
2016-09-02T19:39:11.000Z
2021-11-15T21:22:48.000Z
explore/Iain/load_ig_chalkboard.ipynb
wethepeopleonline/law-net
e8b01136360078c89b666e2b127672644ed0c54b
[ "MIT" ]
7
2016-09-04T17:19:13.000Z
2017-01-19T19:17:10.000Z
explore/Iain/load_ig_chalkboard.ipynb
idc9/law-net
e8b01136360078c89b666e2b127672644ed0c54b
[ "MIT" ]
8
2017-01-19T04:24:09.000Z
2021-09-13T20:22:58.000Z
28.943144
1,358
0.547724
[ [ [ "import sys\n\nsys.path.append('../../code/')\nimport os\nimport json\nfrom datetime import datetime\nimport time\nfrom math import *\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\nimport igraph as ig\nimport networkx as nx\n\nfrom load_data import load_citation_network, case_info\n\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n\ndata_dir = '../../data/'\ncourt_name = 'scotus'", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "case_metadata = pd.read_csv(data_dir + 'clean/case_metadata_master.csv')\nedgelist = pd.read_csv(data_dir + 'clean/edgelist_master.csv')", "_____no_output_____" ], [ "# net_dir = data_dir + 'clean/' + court_name + '/'\n# case_metadata = pd.read_csv(net_dir + 'case_metadata.csv')\n\n# edgelist = pd.read_csv(net_dir + 'edgelist.csv')\n# edgelist.drop('Unnamed: 0', inplace=True, axis=1)", "_____no_output_____" ] ], [ [ "# Compare iterrows vs itertuples", "_____no_output_____" ] ], [ [ "\nstart = time.time()\n# create graph and add metadata\nG = nx.DiGraph()\nG.add_nodes_from(case_metadata.index.tolist())\nnx.set_node_attributes(G, 'date', case_metadata['date'].to_dict())\nfor index, edge in edgelist.iterrows():\n ing = edge['citing']\n ed = edge['cited']\n G.add_edge(ing, ed)\nend = time.time()\n\nprint 'pandas took %d seconds to go though %d edges using iterrows' % (end - start, edgelist.shape[0])", "pandas took 29 seconds to go though 250465 edges using iterrows\n" ], [ "# go through edglist using itertuples\n\nstart = time.time()\n# create graph and add metadata\nG = nx.DiGraph()\nG.add_nodes_from(case_metadata.index.tolist())\nnx.set_node_attributes(G, 'date', case_metadata['date'].to_dict())\nfor row in edgelist.itertuples():\n ing = row[1]\n ed = row[2]\n G.add_edge(ing, ed)\nend = time.time()\n\nprint 'pandas took %d seconds to go though %d edges using itertuples' % (end - start, edgelist.shape[0])", "pandas took 1 seconds to go though 250465 edges using itertuples\n" ] ], [ [ "# load into igraph", "_____no_output_____" ] ], [ [ "# create a dictonary that maps court listener ids to igraph ids\ncl_to_ig_id = {}\ncl_ids = case_metadata['id'].tolist()\nfor i in range(case_metadata['id'].size):\n cl_to_ig_id[cl_ids[i]] = i", "_____no_output_____" ], [ "start = time.time()\nV = case_metadata.shape[0]\n\ng = ig.Graph(n=V, directed=True)\ng.vs['date'] = case_metadata['date'].tolist()\ng.vs['name'] = case_metadata['id'].tolist()\n\nig_edgelist = []\nmissing_cases = 0\nstart = time.time()\n# i = 1\nfor row in edgelist.itertuples():\n# if log(i, 2) == int(log(i, 2)):\n# print 'edge %d' % i\n# i += 1\n\n cl_ing = row[1]\n cl_ed = row[2]\n\n if (cl_ing in cl_to_ig_id.keys()) and (cl_ed in cl_to_ig_id.keys()):\n ing = cl_to_ig_id[cl_ing]\n ed = cl_to_ig_id[cl_ed]\n else:\n missing_cases += 0\n \n ig_edgelist.append((ing, ed))\nintermediate = time.time()\n\ng.add_edges(ig_edgelist)\nend = time.time()\n\nprint 'itertuples took %d seconds to go through %d edges' % (intermediate - start, edgelist.shape[0])\nprint 'igraph took %d seconds to add %d edges' % (end - start, edgelist.shape[0])", "_____no_output_____" ] ], [ [ "# igraph find vs. select", "_____no_output_____" ] ], [ [ "start = time.time()\nR = 1000\nfor i in range(R):\n g.vs.find(name='92891')\nend = time.time()\nprint 'g.vs.find took %E seconds per lookup' % ((end - start)/R)", "_____no_output_____" ], [ "start = time.time()\nR = 1000\nfor i in range(R):\n g.vs.select(name='92891')\nend = time.time()\nprint 'g.vs.select took %E seconds per lookup' % ((end - start)/R)", "_____no_output_____" ], [ "start = time.time()\nR = 1000\nfor i in range(R):\n cl_to_ig_id[92891]\nend = time.time()\nprint 'pandas df lookup took %E seconds per lookup' % ((end - start)/R)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0bbfb0458d7dc894176eb036aeadeb5eb90449c
7,729
ipynb
Jupyter Notebook
1. Safety Challenge - Data reading and cleaning.ipynb
jtsw1990/grab-ai-safety
2a6a34615bd155cc0ebef51dfd65f92f9db2ddae
[ "MIT" ]
1
2020-01-24T17:29:41.000Z
2020-01-24T17:29:41.000Z
1. Safety Challenge - Data reading and cleaning.ipynb
jtsw1990/grab-ai-safety
2a6a34615bd155cc0ebef51dfd65f92f9db2ddae
[ "MIT" ]
null
null
null
1. Safety Challenge - Data reading and cleaning.ipynb
jtsw1990/grab-ai-safety
2a6a34615bd155cc0ebef51dfd65f92f9db2ddae
[ "MIT" ]
null
null
null
29.276515
120
0.563333
[ [ [ "## Download and extract zip from web\n\n- Specifies the source link, destination url and file name to download and extract data files\n- Currently reading from external folder as github does not support large files\n - To rerun function for testing before submission\n - To add checks and conditions for the function\n- Link to zip download here: \"https://s3-ap-southeast-1.amazonaws.com/grab-aiforsea-dataset/safety.zip\" ", "_____no_output_____" ] ], [ [ "import zipfile\nimport urllib.request\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom tqdm import tqdm", "_____no_output_____" ], [ "SOURCE = \"https://s3-ap-southeast-1.amazonaws.com/grab-aiforsea-dataset/safety.zip\"\nOUTPUT_PATH = \"../grab-ai-safety-data\"\nFILE_NAME = \"\"", "_____no_output_____" ], [ "class DownloadProgressBar(tqdm):\n '''Class for tqdm progress bar.'''\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\n\ndef maybe_download(url, output_path, dest_file_name):\n '''Function that checks the validity of a desired URL,\n downloads and extracts a ZIP file for the purposes of\n the Grab AI challenge.\n \n Args:\n url (str): Download path of the dataset in question\n output_path(str): path of the desired download destination\n dest_file_name(str): Desired file name. \n To include .zip extension\n \n Returns:\n None.\n Extracts all relevant data files into a desired folder for\n download.\n '''\n full_path = output_path+'/'+dest_file_name\n with DownloadProgressBar(\n unit='B', \n unit_scale=True,\n miniters=1, \n desc=url.split(\"/\")[-1]\n ) as t:\n urllib.request.urlretrieve(\n url, \n filename=full_path, \n reporthook=t.update_to\n )\n with zipfile.ZipFile(full_path, \"r\") as zip_ref:\n zip_ref.extractall(output_path)", "_____no_output_____" ], [ "# download_url(SOURCE, OUTPUT_PATH, FILE_NAME)", "_____no_output_____" ], [ "df0 = pd.read_csv(\"../grab-ai-safety-data/features/part-00000-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\ndf1 = pd.read_csv(\"../grab-ai-safety-data/features/part-00001-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\ndf2 = pd.read_csv(\"../grab-ai-safety-data/features/part-00002-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\ndf3 = pd.read_csv(\"../grab-ai-safety-data/features/part-00003-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\ndf4 = pd.read_csv(\"../grab-ai-safety-data/features/part-00004-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\ndf5 = pd.read_csv(\"../grab-ai-safety-data/features/part-00005-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\ndf6 = pd.read_csv(\"../grab-ai-safety-data/features/part-00006-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\ndf7 = pd.read_csv(\"../grab-ai-safety-data/features/part-00007-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\ndf8 = pd.read_csv(\"../grab-ai-safety-data/features/part-00008-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\ndf9 = pd.read_csv(\"../grab-ai-safety-data/features/part-00009-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv\")\nresponse = pd.read_csv(\"../grab-ai-safety-data/labels/part-00000-e9445087-aa0a-433b-a7f6-7f4c19d78ad6-c000.csv\")", "_____no_output_____" ] ], [ [ "## Merge and drop duplicates\n- Join the feautres together with the labels\n- Get rid of any obvious duplicates in the features and response\n- No data cleaning or formatting to minimize data leakage", "_____no_output_____" ] ], [ [ "df_features = pd.concat(\n [df1, df2, df3, df4, df5, df6, df7, df8, df9], \n axis=0\n).drop_duplicates(\n keep=False\n)\n\nresponse = response.drop_duplicates(\n subset=\"bookingID\", \n keep=False\n)", "_____no_output_____" ], [ "df = pd.merge(\n df_features,\n response,\n how=\"inner\",\n on=\"bookingID\"\n).sort_values(\n [\"bookingID\", \"second\"],\n ascending=True\n)", "_____no_output_____" ], [ "with open('../grab-ai-safety-data/df_full.pickle', 'wb') as f:\n pickle.dump(df, f)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0bc1285e248ac492a0ed719814f8eba69c49bf3
412,600
ipynb
Jupyter Notebook
KNNvsEM.ipynb
beicoles/blank_sweeper
75eb66a558baef4b303baedd2d6df04a384a7474
[ "MIT" ]
null
null
null
KNNvsEM.ipynb
beicoles/blank_sweeper
75eb66a558baef4b303baedd2d6df04a384a7474
[ "MIT" ]
null
null
null
KNNvsEM.ipynb
beicoles/blank_sweeper
75eb66a558baef4b303baedd2d6df04a384a7474
[ "MIT" ]
null
null
null
342.406639
328,788
0.869818
[ [ [ "import impyute\nimport pandas as pd\nimport copy", "_____no_output_____" ], [ "import numpy as np\nfrom sklearn.impute import SimpleImputer\nfrom time import time\n\nfrom sklearn.decomposition import MiniBatchDictionaryLearning\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport scipy as sp", "_____no_output_____" ], [ "imp = SimpleImputer(missing_values= np.nan, strategy='mean')\n\ncsv = open(\"Fireball_And_Bolide_Reports.csv\", 'r')\nall_data = csv.readlines()[1:]\n\nfor i in range(len(all_data)):\n all_data[i] = all_data[i][:-2].split(',')[1:]\n\n\nnparray = np.array(all_data)[:,:3].tolist()\n\nfor arr in nparray:\n arr[0] = float(arr[0][:-1]) if arr[0][-1:] is 'N' else float(arr[0][:-1]) * -1.0\n \n arr[1] = float(arr[1][:-1]) if arr[1][-1:] is 'E' else float(arr[1][:-1]) * -1.0\ntrain = []\ntarget = []\nfor arr in nparray:\n if arr[2] is \"\":\n target.append(arr)\n else:\n train.append(arr)\n\nprint(np.array(train))", "[['-22.0' '29.2' '38']\n ['2.0' '28.8' '36']\n ['-44.2' '-176.2' '44']\n ['-61.7' '132.6' '22.2']\n ['33.5' '144.9' '26.3']\n ['-65.6' '138.4' '28.9']\n ['-8.0' '-11.2' '39']\n ['-4.6' '-66.3' '39']\n ['32.8' '-165.1' '23.5']\n ['12.4' '-122.4' '42']\n ['43.1' '115.8' '22.2']\n ['21.0' '-178.5' '37']\n ['38.0' '158.0' '26']\n ['45.7' '26.9' '45.5']\n ['-18.5' '141.8' '29.6']\n ['39.5' '2.0' '34.3']\n ['-68.2' '-24.0' '37']\n ['0.0' '-111.8' '30.5']\n ['54.8' '61.1' '23.3']\n ['-28.7' '121.5' '30.7']\n ['-4.2' '120.6' '19.1']\n ['18.9' '141.2' '26.3']\n ['-19.1' '-25.0' '22.2']\n ['44.7' '35.3' '59.3']\n ['-32.7' '17.1' '26']\n ['-18.8' '-73.4' '38']\n ['-31.8' '137.1' '29.1']\n ['68.0' '-149.0' '33.7']\n ['76.6' '96.3' '26.3']\n ['-34.4' '118.2' '66.6']\n ['-15.9' '88.1' '39.8']\n ['-15.1' '-155.6' '36.1']\n ['13.3' '-110.7' '25']\n ['-50.2' '90.2' '25.6']\n ['-10.3' '-164.7' '40.7']\n ['35.5' '-30.7' '21.2']\n ['0.3' '156.2' '26.5']\n ['-39.4' '-95.9' '30.8']\n ['-1.3' '147.6' '18.7']\n ['-36.9' '87.3' '35.4']\n ['-28.1' '-64.6' '40.7']\n ['86.7' '-162.1' '30.7']\n ['3.2' '137.2' '32.5']\n ['-69.5' '-179.7' '23.3']\n ['-31.1' '140.0' '38.1']\n ['8.0' '119.1' '35.2']\n ['-71.5' '93.4' '28.5']\n ['2.9' '64.4' '37']\n ['-45.8' '-172.7' '26.1']\n ['-2.0' '119.2' '27.2']\n ['21.9' '-131.1' '28.7']\n ['-5.4' '159.3' '50']\n ['-8.1' '-111.9' '35']\n ['2.5' '29.6' '33.3']\n ['-75.4' '49.6' '29.3']\n ['-36.1' '-5.5' '33.1']\n ['51.2' '-84.6' '27.8']\n ['-69.8' '-111.7' '23.8']\n ['-25.5' '51.5' '36.3']\n ['-18.3' '64.2' '38.7']\n ['11.8' '117.0' '36']\n ['1.2' '-52.2' '28.1']\n ['-48.7' '139.1' '26.7']\n ['63.1' '172.3' '27.2']\n ['36.2' '107.4' '25.2']\n ['36.4' '41.5' '26.8']\n ['37.7' '-39.6' '37.4']\n ['-61.8' '-135.5' '33.3']]\n" ], [ "arr =np.array(train).astype(np.float)\nt0 = time()\ndico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=100)\nV = dico.fit(arr).components_\ndt = time() - t0\nprint('done in %.2fs.' % dt)", "done in 0.30s.\n" ], [ "target_np = np.array(target)\nfor npa in nparray:\n if npa[2] == \"\":\n npa[2]=np.nan \n\nresult = impyute.fast_knn(np.array(nparray).astype(float))\n#result = np.around(result,3)\n\n\ntarget_arr = copy.deepcopy(nparray)\nfor npaa in target_arr:\n if npaa[2] is np.nan:\n npaa[2]=-1 \n\nprint(nparray)\ntype(nparray)\n\n#print(result[np.lexsort((result[:,2],result[:,1],result[:,0]))])\n\n", "[[-22.0, 29.2, '38'], [2.0, 28.8, '36'], [-44.2, -176.2, '44'], [-61.7, 132.6, '22.2'], [33.5, 144.9, '26.3'], [-49.2, -172.2, nan], [-65.6, 138.4, '28.9'], [-8.0, -11.2, '39'], [-4.6, -66.3, '39'], [32.8, -165.1, '23.5'], [22.7, -150.0, nan], [12.4, -122.4, '42'], [43.1, 115.8, '22.2'], [21.0, -178.5, '37'], [38.0, 158.0, '26'], [45.7, 26.9, '45.5'], [60.3, -64.6, nan], [3.2, -45.4, nan], [-18.5, 141.8, '29.6'], [39.5, 2.0, '34.3'], [-68.2, -24.0, '37'], [0.0, -111.8, '30.5'], [-29.0, -94.9, nan], [54.8, 61.1, '23.3'], [-28.7, 121.5, '30.7'], [-4.2, 120.6, '19.1'], [18.9, 141.2, '26.3'], [22.2, -132.9, nan], [-19.1, -25.0, '22.2'], [44.7, 35.3, '59.3'], [-33.9, -115.9, nan], [-32.7, 17.1, '26'], [-18.8, -73.4, '38'], [-32.8, -61.5, nan], [-31.8, 137.1, '29.1'], [68.0, -149.0, '33.7'], [76.6, 96.3, '26.3'], [-34.4, 118.2, '66.6'], [-23.3, -49.2, nan], [-15.9, 88.1, '39.8'], [-15.1, -155.6, '36.1'], [13.3, -110.7, '25'], [-50.2, 90.2, '25.6'], [-10.3, -164.7, '40.7'], [35.5, -30.7, '21.2'], [0.3, 156.2, '26.5'], [-6.2, -49.9, nan], [-39.4, -95.9, '30.8'], [-23.0, -38.8, nan], [-1.3, 147.6, '18.7'], [-36.9, 87.3, '35.4'], [-28.1, -64.6, '40.7'], [86.7, -162.1, '30.7'], [3.2, 137.2, '32.5'], [-69.5, -179.7, '23.3'], [-39.1, -118.6, nan], [-31.1, 140.0, '38.1'], [8.0, 119.1, '35.2'], [-71.5, 93.4, '28.5'], [-43.7, 85.7, nan], [2.9, 64.4, '37'], [-45.8, -172.7, '26.1'], [-2.0, 119.2, '27.2'], [21.9, -131.1, '28.7'], [-18.8, -158.6, nan], [-3.0, 76.4, nan], [-41.5, -21.9, nan], [17.3, -83.6, nan], [-5.4, 159.3, '50'], [-8.1, -111.9, '35'], [-6.9, 73.7, nan], [-41.8, -36.2, nan], [2.5, 29.6, '33.3'], [-8.0, -86.0, nan], [-75.4, 49.6, '29.3'], [-36.1, -5.5, '33.1'], [51.2, -84.6, '27.8'], [-69.8, -111.7, '23.8'], [-25.5, 51.5, '36.3'], [-18.3, 64.2, '38.7'], [11.8, 117.0, '36'], [1.2, -52.2, '28.1'], [8.4, -157.9, nan], [-18.9, 105.2, nan], [-15.8, -174.8, nan], [-48.7, 139.1, '26.7'], [63.1, 172.3, '27.2'], [76.7, -10.6, nan], [36.2, 107.4, '25.2'], [36.4, 41.5, '26.8'], [37.7, -39.6, '37.4'], [-61.8, -135.5, '33.3']]\n" ], [ "type(result)", "_____no_output_____" ], [ "target_np = np.array(target_arr).astype(np.float)\nshow_arr= (result - target_np)\nshow_arr[show_arr > 1] = 1\n#show_arr", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(20,100))\nim = ax.imshow((show_arr*100)+10)\n\nfor i in range(len(result)):\n for j in range(len(result[i])):\n ax.text(j,i, result[i, j], ha='center', va='center',color='w',fontsize='x-large',fontweight='bold')\n", "_____no_output_____" ], [ "em_array = nparray.copy()\npd.DataFrame(em_array).head(30).style.hide_index()", "_____no_output_____" ], [ "em_array= impyute.em(np.array(em_array).astype('float64'))", "_____no_output_____" ], [ "resultdf = pd.DataFrame(result,columns=['Lat','Long','Distance(KNN)'])\n\nemdf = pd.DataFrame(em_array,columns=['Lat','Long','Distance(EM)'])\n\nemdf.iloc[:,2].to_frame()", "_____no_output_____" ], [ "concatdf = pd.concat([resultdf,emdf.iloc[:,2]], axis=1)\npd.set_option('display.max_rows', 500)\nconcatdf.sort_values(by=['Lat', 'Long']).style.hide_index()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0bc27960d2fd18ca1b8ab68cd9f351b29374b76
11,909
ipynb
Jupyter Notebook
Hugging face/huggingface_ar.ipynb
akelimad/nlp
7911d29d6c89264d8944d53b2d51d7b3a2fd6161
[ "MIT" ]
1
2021-07-09T10:49:39.000Z
2021-07-09T10:49:39.000Z
Hugging face/huggingface_ar.ipynb
akelimad/nlp
7911d29d6c89264d8944d53b2d51d7b3a2fd6161
[ "MIT" ]
null
null
null
Hugging face/huggingface_ar.ipynb
akelimad/nlp
7911d29d6c89264d8944d53b2d51d7b3a2fd6161
[ "MIT" ]
null
null
null
20.497418
627
0.200269
[ [ [ "from transformers import AutoTokenizer\nfrom transformers import pipeline", "None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\n" ], [ "token = AutoTokenizer.from_pretrained(\"bert-base-uncased\")", "_____no_output_____" ], [ "ar_text = u\"ولعل أحد أهم التطورات التي حققتها الرياضيات العربية بدأ في هذا الوقت مع عمل الخوارزمي ، أي بدايات الجبر. من المهم أن نفهم مدى أهمية هذه الفكرة الجديدة. لقد كان ابتعادًا ثوريًا عن المفهوم اليوناني للرياضيات الذي كان أساسًا الهندسة. كان الجبر نظرية موحدة سمحت بأن يتم التعامل مع الأعداد العقلانية والأعداد غير المنطقية والمقادير الهندسية وما إلى ذلك على أنها 'كائنات جبرية'. لقد أعطى الرياضيات مسارًا جديدًا تمامًا للتطور أوسع بكثير من حيث المفهوم إلى ما كان موجودًا من قبل ، ووفر وسيلة للتطور المستقبلي للموضوع. جانب آخر مهم لإدخال الأفكار الجبرية هو أنه سمح بتطبيق الرياضيات على نفسها بطريقة لم تحدث من قبل\"", "_____no_output_____" ], [ "words_tokens = token.tokenize(ar_text)\nwords_tokens", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d0bc5510fdd1a6e011dcebedddadee365c30277a
1,147
ipynb
Jupyter Notebook
Python/14. python functionals/71. map and lambda fn.ipynb
faisalsanto007/Hakcerrank-problem-solving
eaf6404e8896fe3448df8a3cb4c86585fd7bebcc
[ "MIT" ]
null
null
null
Python/14. python functionals/71. map and lambda fn.ipynb
faisalsanto007/Hakcerrank-problem-solving
eaf6404e8896fe3448df8a3cb4c86585fd7bebcc
[ "MIT" ]
null
null
null
Python/14. python functionals/71. map and lambda fn.ipynb
faisalsanto007/Hakcerrank-problem-solving
eaf6404e8896fe3448df8a3cb4c86585fd7bebcc
[ "MIT" ]
null
null
null
18.5
55
0.457716
[ [ [ "cube of fibonacci numbers", "_____no_output_____" ] ], [ [ "cube = lambda x: x ** 3\n\ndef fibonacci(n):\n nums = [0, 1]\n for i in range(2, n):\n nums.append(nums[i - 1] + nums[i - 2])\n return nums[:n]\n\n\nif __name__ == '__main__':\n n = int(input())\n print(list(map(cube, fibonacci(n))))\n", "5\n[0, 1, 1, 8, 27]\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
d0bc5640f0e1cdad2e0013c8f2477ea697329aa1
5,960
ipynb
Jupyter Notebook
0.17/_downloads/d1b18c3376911723f0257fe5003a8477/plot_linear_model_patterns.ipynb
drammock/mne-tools.github.io
5d3a104d174255644d8d5335f58036e32695e85d
[ "BSD-3-Clause" ]
null
null
null
0.17/_downloads/d1b18c3376911723f0257fe5003a8477/plot_linear_model_patterns.ipynb
drammock/mne-tools.github.io
5d3a104d174255644d8d5335f58036e32695e85d
[ "BSD-3-Clause" ]
null
null
null
0.17/_downloads/d1b18c3376911723f0257fe5003a8477/plot_linear_model_patterns.ipynb
drammock/mne-tools.github.io
5d3a104d174255644d8d5335f58036e32695e85d
[ "BSD-3-Clause" ]
null
null
null
55.185185
971
0.631711
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Linear classifier on sensor data with plot patterns and filters\n\n\nHere decoding, a.k.a MVPA or supervised machine learning, is applied to M/EEG\ndata in sensor space. Fit a linear classifier with the LinearModel object\nproviding topographical patterns which are more neurophysiologically\ninterpretable [1]_ than the classifier filters (weight vectors).\nThe patterns explain how the MEG and EEG data were generated from the\ndiscriminant neural sources which are extracted by the filters.\nNote patterns/filters in MEG data are more similar than EEG data\nbecause the noise is less spatially correlated in MEG than EEG.\n\nReferences\n----------\n\n.. [1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,\n Blankertz, B., & Bießmann, F. (2014). On the interpretation of\n weight vectors of linear models in multivariate neuroimaging.\n NeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067\n\n", "_____no_output_____" ] ], [ [ "# Authors: Alexandre Gramfort <[email protected]>\n# Romain Trachel <[email protected]>\n# Jean-Remi King <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport mne\nfrom mne import io, EvokedArray\nfrom mne.datasets import sample\nfrom mne.decoding import Vectorizer, get_coef\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\n\n# import a linear classifier from mne.decoding\nfrom mne.decoding import LinearModel\n\nprint(__doc__)\n\ndata_path = sample.data_path()", "_____no_output_____" ] ], [ [ "Set parameters\n\n", "_____no_output_____" ] ], [ [ "raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\ntmin, tmax = -0.1, 0.4\nevent_id = dict(aud_l=1, vis_l=3)\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname, preload=True)\nraw.filter(.5, 25, fir_design='firwin')\nevents = mne.read_events(event_fname)\n\n# Read epochs\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n decim=2, baseline=None, preload=True)\n\nlabels = epochs.events[:, -1]\n\n# get MEG and EEG data\nmeg_epochs = epochs.copy().pick_types(meg=True, eeg=False)\nmeg_data = meg_epochs.get_data().reshape(len(labels), -1)", "_____no_output_____" ] ], [ [ "Decoding in sensor space using a LogisticRegression classifier\n\n", "_____no_output_____" ] ], [ [ "clf = LogisticRegression(solver='lbfgs')\nscaler = StandardScaler()\n\n# create a linear model with LogisticRegression\nmodel = LinearModel(clf)\n\n# fit the classifier on MEG data\nX = scaler.fit_transform(meg_data)\nmodel.fit(X, labels)\n\n# Extract and plot spatial filters and spatial patterns\nfor name, coef in (('patterns', model.patterns_), ('filters', model.filters_)):\n # We fitted the linear model onto Z-scored data. To make the filters\n # interpretable, we must reverse this normalization step\n coef = scaler.inverse_transform([coef])[0]\n\n # The data was vectorized to fit a single model across all time points and\n # all channels. We thus reshape it:\n coef = coef.reshape(len(meg_epochs.ch_names), -1)\n\n # Plot\n evoked = EvokedArray(coef, meg_epochs.info, tmin=epochs.tmin)\n evoked.plot_topomap(title='MEG %s' % name, time_unit='s')", "_____no_output_____" ] ], [ [ "Let's do the same on EEG data using a scikit-learn pipeline\n\n", "_____no_output_____" ] ], [ [ "X = epochs.pick_types(meg=False, eeg=True)\ny = epochs.events[:, 2]\n\n# Define a unique pipeline to sequentially:\nclf = make_pipeline(\n Vectorizer(), # 1) vectorize across time and channels\n StandardScaler(), # 2) normalize features across trials\n LinearModel(\n LogisticRegression(solver='lbfgs'))) # 3) fits a logistic regression\nclf.fit(X, y)\n\n# Extract and plot patterns and filters\nfor name in ('patterns_', 'filters_'):\n # The `inverse_transform` parameter will call this method on any estimator\n # contained in the pipeline, in reverse order.\n coef = get_coef(clf, name, inverse_transform=True)\n evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin)\n evoked.plot_topomap(title='EEG %s' % name[:-1], time_unit='s')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0bc56aaf5b7f5c97bef3bb97f80e05f9b81d7ba
165,990
ipynb
Jupyter Notebook
notebooks/1. Entropy Prox Term.ipynb
robin-vjc/smpspy
eea63a6836a7cb9257ba53775c2a239cc79a4c4f
[ "MIT" ]
2
2021-08-13T09:25:17.000Z
2021-08-13T09:25:18.000Z
notebooks/1. Entropy Prox Term.ipynb
robin-vjc/smpspy
eea63a6836a7cb9257ba53775c2a239cc79a4c4f
[ "MIT" ]
null
null
null
notebooks/1. Entropy Prox Term.ipynb
robin-vjc/smpspy
eea63a6836a7cb9257ba53775c2a239cc79a4c4f
[ "MIT" ]
4
2019-06-30T19:02:53.000Z
2022-01-11T12:26:58.000Z
152.986175
79,395
0.720935
[ [ [ "import numpy as np\n\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.io import output_notebook\n\nfrom nsopy import SGMDoubleSimpleAveraging as DSA\nfrom nsopy.loggers import EnhancedDualMethodLogger\n\noutput_notebook()\n%cd ..\n\nfrom smpspy.oracles import TwoStage_SMPS_InnerProblem", "_____no_output_____" ] ], [ [ "# Solving dual model using DSA with Entropy Prox Term\n\nInstantiating inner problem", "_____no_output_____" ], [ "### Solve battery of problems", "_____no_output_____" ] ], [ [ "# Setup\nBENCHMARKS_PATH = './smpspy/benchmark_problems/2_caroe_schultz/'\n\nn_S_exp = [10, 50, 100, 500]\nN_STEPS = 200\nGAMMA = 1.0", "_____no_output_____" ], [ "# First generate traditional DSA\n\ninner_problems = {}\nmethods = {}\nmethod_loggers = {}\n\nfor n_S in n_S_exp:\n ip = TwoStage_SMPS_InnerProblem(BENCHMARKS_PATH+'caroe_schultz_{}'.format(n_S))\n dsa = DSA(ip.oracle, ip.projection_function, dimension=ip.dimension, gamma=GAMMA)\n logger_dsa = EnhancedDualMethodLogger(dsa)\n \n inner_problems[n_S] = ip\n methods[n_S] = dsa\n method_loggers[n_S] = logger_dsa", "Parsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10.sto ...\nStochastic model is of type SCENARIOS DISCRETE\nParsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_50.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_50.sto ...\nStochastic model is of type SCENARIOS DISCRETE\nParsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_100.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_100.sto ...\nStochastic model is of type SCENARIOS DISCRETE\nParsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_500.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_500.sto ...\nStochastic model is of type SCENARIOS DISCRETE\n" ], [ "for n_S, method in methods.items():\n for step in range(N_STEPS):\n if not step % 100:\n print('[n_S={}] step: {} of method {}'.format(n_S, str(step), str(method.desc)))\n method.dual_step()", "[n_S=500] step: 0 of method DSA, $\\gamma = 1.0$\n[n_S=500] step: 100 of method DSA, $\\gamma = 1.0$\n[n_S=10] step: 0 of method DSA, $\\gamma = 1.0$\n[n_S=10] step: 100 of method DSA, $\\gamma = 1.0$\n[n_S=100] step: 0 of method DSA, $\\gamma = 1.0$\n[n_S=100] step: 100 of method DSA, $\\gamma = 1.0$\n[n_S=50] step: 0 of method DSA, $\\gamma = 1.0$\n[n_S=50] step: 100 of method DSA, $\\gamma = 1.0$\n" ], [ "inner_problems_entropy = {}\nmethods_entropy = {}\nmethod_loggers_entropy = {}\n\nfor n_S in n_S_exp:\n R_a_posteriori = np.linalg.norm(methods[n_S].lambda_k, ord=np.inf)\n R_safe = R_a_posteriori*1.1\n \n ip = TwoStage_SMPS_InnerProblem(BENCHMARKS_PATH+'caroe_schultz_{}'.format(n_S), R=R_safe)\n dsa_entropy = DSA(ip.oracle, ip.softmax_projection, dimension=ip.dimension, gamma=GAMMA)\n logger_dsa_entropy = EnhancedDualMethodLogger(dsa_entropy)\n \n inner_problems_entropy[n_S] = ip\n methods_entropy[n_S] = dsa_entropy\n method_loggers_entropy[n_S] = logger_dsa_entropy", "Parsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10.sto ...\nStochastic model is of type SCENARIOS DISCRETE\nParsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_50.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_50.sto ...\nStochastic model is of type SCENARIOS DISCRETE\nParsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_100.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_100.sto ...\nStochastic model is of type SCENARIOS DISCRETE\nParsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_500.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_500.sto ...\nStochastic model is of type SCENARIOS DISCRETE\n" ], [ "for n_S, method in methods_entropy.items():\n for step in range(N_STEPS):\n if not step % 100:\n print('[n_S={}] step: {} of method {}'.format(n_S, str(step), str(method.desc)))\n method.dual_step()", "[n_S=500] step: 0 of method DSA, $\\gamma = 1.0$\n[n_S=500] step: 100 of method DSA, $\\gamma = 1.0$\n[n_S=10] step: 0 of method DSA, $\\gamma = 1.0$\n[n_S=10] step: 100 of method DSA, $\\gamma = 1.0$\n[n_S=100] step: 0 of method DSA, $\\gamma = 1.0$\n[n_S=100] step: 100 of method DSA, $\\gamma = 1.0$\n[n_S=50] step: 0 of method DSA, $\\gamma = 1.0$\n[n_S=50] step: 100 of method DSA, $\\gamma = 1.0$\n" ], [ "# find \"d*\"\nd_stars = {}\nEPS = 0.01\n\nfor n_S in n_S_exp:\n d_star_dsa = max(method_loggers[n_S].d_k_iterates)\n d_star_dsa_entropy = max(method_loggers_entropy[n_S].d_k_iterates)\n d_stars[n_S] = max(d_star_dsa, d_star_dsa_entropy) + EPS", "_____no_output_____" ], [ "p = figure(title=\"comparison\", x_axis_label='iteration', y_axis_label='d* - d_k', y_axis_type='log', toolbar_location='above')\n\nplot_colors = {\n 10: 'blue',\n 50: 'green',\n 100: 'red',\n 500: 'orange',\n 1000: 'purple',\n}", "_____no_output_____" ], [ "for n_S in n_S_exp:\n logger = method_loggers[n_S]\n p.line(range(len(logger.d_k_iterates)), d_stars[n_S] - np.array(logger.d_k_iterates), legend=\"DSA, n_scen={}, gamma={}\".format(n_S, GAMMA, inner_problems[n_S].R), \n color=plot_colors[n_S], line_dash='dashed')\n \nfor n_S in n_S_exp:\n logger = method_loggers_entropy[n_S]\n p.line(range(len(logger.d_k_iterates)), d_stars[n_S] - np.array(logger.d_k_iterates), legend=\"DSA Entropy, n_scen={}, gamma={}, R={}\".format(n_S, GAMMA, inner_problems_entropy[n_S].R), \n color=plot_colors[n_S])", "_____no_output_____" ], [ "p.legend.location = \"top_right\"\np.legend.visible = True\np.legend.background_fill_alpha = 0.5\nshow(p)", "_____no_output_____" ] ], [ [ "### Single run", "_____no_output_____" ] ], [ [ "ip = TwoStage_SMPS_InnerProblem('./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10')", "Parsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10.sto ...\nStochastic model is of type SCENARIOS DISCRETE\n" ] ], [ [ "First solving it with DSA", "_____no_output_____" ] ], [ [ "GAMMA = 1.0\n\ndsa = DSA(ip.oracle, ip.projection_function, dimension=ip.dimension, gamma=GAMMA)\nlogger_dsa = EnhancedDualMethodLogger(dsa)", "_____no_output_____" ], [ "for iteration in range(1000):\n if not iteration%50:\n print('Iteration: {}, d_k={}'.format(iteration, dsa.d_k))\n dsa.dual_step()", "Iteration: 0, d_k=-inf\nIteration: 50, d_k=-55.0159186353\nIteration: 100, d_k=-54.9786759617\nIteration: 150, d_k=-54.8958271191\nIteration: 200, d_k=-54.946123664\nIteration: 250, d_k=-54.9126206845\nIteration: 300, d_k=-54.8681531927\nIteration: 350, d_k=-54.8759044684\nIteration: 400, d_k=-54.83683095\nIteration: 450, d_k=-54.8435540524\nIteration: 500, d_k=-54.833795125\nIteration: 550, d_k=-54.8560302563\nIteration: 600, d_k=-54.8279813457\nIteration: 650, d_k=-54.8655052529\nIteration: 700, d_k=-54.8348583158\nIteration: 750, d_k=-54.8305845772\nIteration: 800, d_k=-54.8340387889\nIteration: 850, d_k=-54.8374382962\nIteration: 900, d_k=-54.8204466776\nIteration: 950, d_k=-54.8407756352\nIteration: 1000, d_k=-54.8468588731\nIteration: 1050, d_k=-54.8219773445\nIteration: 1100, d_k=-54.8287186107\nIteration: 1150, d_k=-54.8173967699\nIteration: 1200, d_k=-54.8214714153\nIteration: 1250, d_k=-54.8338867381\nIteration: 1300, d_k=-54.8255492724\nIteration: 1350, d_k=-54.8146888169\nIteration: 1400, d_k=-54.8334153832\nIteration: 1450, d_k=-54.8073424585\n" ] ], [ [ "Then get the required parameters (R is derived a posteriori)", "_____no_output_____" ] ], [ [ "R_a_posteriori = np.linalg.norm(dsa.lambda_k, ord=np.inf)\nR_safe = R_a_posteriori*1.1\nip = TwoStage_SMPS_InnerProblem('./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10', R=R_safe)\n\nprint('A-posteriori R={}'.format(R_a_posteriori))", "Parsing nominal model information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10.cor and .tim ...\nParsing stochastic information from ./smpspy/benchmark_problems/2_caroe_schultz/caroe_schultz_10.sto ...\nStochastic model is of type SCENARIOS DISCRETE\nA-posteriori R=0.994113159479\n" ] ], [ [ "Solve it using DSA with Entropy prox function. **Note that the only difference is that we pass in softmax projection function!**", "_____no_output_____" ] ], [ [ "dsa_entropy = DSA(ip.oracle, ip.softmax_projection, dimension=ip.dimension, gamma=GAMMA)\nlogger_dsa_entropy = EnhancedDualMethodLogger(dsa_entropy)", "_____no_output_____" ], [ "for iteration in range(1000):\n if not iteration%50:\n print('Iteration: {}, d_k={}'.format(iteration, dsa_entropy.d_k))\n dsa_entropy.dual_step()", "Iteration: 0, d_k=-inf\nIteration: 50, d_k=-55.7332142332\nIteration: 100, d_k=-55.567744575\nIteration: 150, d_k=-55.523640381\nIteration: 200, d_k=-55.3948346217\nIteration: 250, d_k=-55.2914856945\nIteration: 300, d_k=-54.9578756746\nIteration: 350, d_k=-54.9153649334\nIteration: 400, d_k=-55.0115510287\nIteration: 450, d_k=-54.9759676722\nIteration: 500, d_k=-54.9421228308\nIteration: 550, d_k=-54.9955055834\nIteration: 600, d_k=-54.9981796194\nIteration: 650, d_k=-54.911199455\nIteration: 700, d_k=-54.975455558\nIteration: 750, d_k=-54.9287581365\nIteration: 800, d_k=-54.9192514909\nIteration: 850, d_k=-54.8836071148\nIteration: 900, d_k=-54.8820872471\nIteration: 950, d_k=-54.8604667678\nIteration: 1000, d_k=-54.8862314468\nIteration: 1050, d_k=-54.8452524481\nIteration: 1100, d_k=-54.8691533025\nIteration: 1150, d_k=-54.8887367985\nIteration: 1200, d_k=-54.8745224513\nIteration: 1250, d_k=-54.8614982156\nIteration: 1300, d_k=-54.8558172355\nIteration: 1350, d_k=-54.8451513578\nIteration: 1400, d_k=-54.8543131313\nIteration: 1450, d_k=-54.8538891652\n" ], [ "logger_dsa.lambda_k_iterates[-1]", "_____no_output_____" ], [ "logger_dsa_entropy.lambda_k_iterates[-1]", "_____no_output_____" ], [ "p = figure(title=\"comparison\", x_axis_label='iteration', y_axis_label='d_k')", "_____no_output_____" ], [ "p.line(range(len(logger_dsa.d_k_iterates)), logger_dsa.d_k_iterates, legend=\"DSA, gamma={}\".format(GAMMA, R_safe))\np.line(range(len(logger_dsa_entropy.d_k_iterates)), logger_dsa_entropy.d_k_iterates, legend=\"DSA Entropy, gamma={}, R={}\".format(GAMMA, R_safe), color='red')", "_____no_output_____" ], [ "p.legend.location = \"bottom_right\"\nshow(p)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0bc607869bb2a60956caf04846921f2faea5ccb
82,128
ipynb
Jupyter Notebook
energyBand.ipynb
troyzx/notebook
8b2d9b74981f55381db2704690215b2f3f7c6290
[ "MIT" ]
null
null
null
energyBand.ipynb
troyzx/notebook
8b2d9b74981f55381db2704690215b2f3f7c6290
[ "MIT" ]
null
null
null
energyBand.ipynb
troyzx/notebook
8b2d9b74981f55381db2704690215b2f3f7c6290
[ "MIT" ]
null
null
null
488.857143
60,848
0.946449
[ [ [ "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.optimize import fsolve", "_____no_output_____" ], [ "@np.vectorize\ndef K(k, x0=0.5*np.pi, a=1, U0b=4, **kwarg):\n return fsolve(lambda x: U0b/(2*x) * np.sin(x*a) + np.cos(x*a) - np.cos(k*a), x0)", "_____no_output_____" ], [ "K(np.pi, x0=2*np.pi)", "_____no_output_____" ], [ "k = np.linspace(-np.pi, np.pi, 100)\nKs = np.array([K(i) for i in k])\nKs2 = np.array([K(i, 1.5*np.pi) for i in k])\nKs3 = np.array([K(i, 2.5*np.pi) for i in k])", "_____no_output_____" ], [ "plt.plot(k, Ks**2)\nplt.plot(k, Ks2**2)\nplt.plot(k, Ks3**2)", "_____no_output_____" ], [ "def getBands(n, **kwarg):\n if \"a\" in kwarg.keys():\n a = kwarg[\"a\"]\n else:\n a = 1\n k = np.linspace(-np.pi/a, np.pi/a, 100)\n Ks = []\n for i in range(n):\n Ks.append(K(k, (i+0.5)*np.pi/a, **kwarg))\n return Ks", "_____no_output_____" ], [ "bands = getBands(3)\nfig, axs = plt.subplots(1,2,figsize = (10, 8), dpi = 100)\nfor band in bands:\n axs[0].plot(k, band)\n\nbands = getBands(3, U0b = 10)\nfor band in bands:\n axs[1].plot(k, band)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0bc60f426de77f675cfd95d33a309f7a482cd28
7,614
ipynb
Jupyter Notebook
examples/widgets/Part 2 - Events.ipynb
ptone/ipython
b91d6a658d4526746dcbfb62e653d71c5d84eee9
[ "BSD-3-Clause-Clear" ]
2
2015-02-10T18:00:31.000Z
2015-05-01T02:53:46.000Z
examples/widgets/Part 2 - Events.ipynb
khinsen/ipython
dfd5cb1d3e34048593ba537dacdbef08fe766624
[ "BSD-3-Clause-Clear" ]
null
null
null
examples/widgets/Part 2 - Events.ipynb
khinsen/ipython
dfd5cb1d3e34048593ba537dacdbef08fe766624
[ "BSD-3-Clause-Clear" ]
1
2021-05-22T13:52:12.000Z
2021-05-22T13:52:12.000Z
29.172414
363
0.51576
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0bc779a3e5ae97e1a5d9ef411f4372ba3ee6daa
46,327
ipynb
Jupyter Notebook
logistic regression.ipynb
amirshnll/Abalone
17bd03dd583e90b1fd3b3881acca1928f27e433b
[ "MIT" ]
3
2021-01-29T21:24:40.000Z
2022-01-08T10:30:54.000Z
logistic regression.ipynb
amirshnll/Abalone
17bd03dd583e90b1fd3b3881acca1928f27e433b
[ "MIT" ]
null
null
null
logistic regression.ipynb
amirshnll/Abalone
17bd03dd583e90b1fd3b3881acca1928f27e433b
[ "MIT" ]
null
null
null
143.872671
33,460
0.817493
[ [ [ "##### Author : Amir Shokri\n##### github link : https://github.com/amirshnll/Abalone\n##### dataset link : http://archive.ics.uci.edu/ml/datasets/Abalone\n##### email : [email protected]", "_____no_output_____" ], [ "import sklearn\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler \nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.metrics import classification_report, confusion_matrix", "_____no_output_____" ], [ "#read file\ndf = pd.read_csv(\"D:\\\\abalone.txt\", header=None)\nfor char in df:\n df = df.replace('M','1')\n df = df.replace('F','-1')\n df = df.replace('I','0')\ndf\n\n#separate the feature columns from the target column.\nfeatures = [0,1,2,3,4,5,6,7]\nX = df[features]\ny = df[8]\nprint(X)\nprint(y)", " 0 1 2 3 4 5 6 7\n0 1 0.455 0.365 0.095 0.5140 0.2245 0.1010 0.1500\n1 1 0.350 0.265 0.090 0.2255 0.0995 0.0485 0.0700\n2 -1 0.530 0.420 0.135 0.6770 0.2565 0.1415 0.2100\n3 1 0.440 0.365 0.125 0.5160 0.2155 0.1140 0.1550\n4 0 0.330 0.255 0.080 0.2050 0.0895 0.0395 0.0550\n... .. ... ... ... ... ... ... ...\n4172 -1 0.565 0.450 0.165 0.8870 0.3700 0.2390 0.2490\n4173 1 0.590 0.440 0.135 0.9660 0.4390 0.2145 0.2605\n4174 1 0.600 0.475 0.205 1.1760 0.5255 0.2875 0.3080\n4175 -1 0.625 0.485 0.150 1.0945 0.5310 0.2610 0.2960\n4176 1 0.710 0.555 0.195 1.9485 0.9455 0.3765 0.4950\n\n[4177 rows x 8 columns]\n0 15\n1 7\n2 9\n3 10\n4 7\n ..\n4172 11\n4173 10\n4174 9\n4175 10\n4176 12\nName: 8, Length: 4177, dtype: int64\n" ], [ "#separate the Training data and Test data\nX_train, X_test, y_train, y_test = train_test_split(X,y,random_state=1, test_size=0.2)\n# Feature scaling\nscaler = StandardScaler() \nscaler.fit(X_train)\nX_train = scaler.transform(X_train) \nX_test = scaler.transform(X_test)", "_____no_output_____" ], [ "# Finally for the Logistic Regression\nlgisticRegr = LogisticRegression(solver='newton-cg', random_state=0 ,max_iter=2000)\nlgisticRegr.fit(X_train, y_train.values.ravel())", "_____no_output_____" ], [ "#In the prediction step, the model is used to predict the response for given data.\npredictions = lgisticRegr.predict(X_test)\nprint(predictions)", "[ 8 8 7 8 9 6 11 9 11 8 10 5 9 11 7 9 10 11 10 9 8 10 7 10\n 9 11 11 9 11 11 10 9 10 9 6 8 10 13 13 10 10 11 11 10 8 8 11 9\n 10 10 9 8 10 5 11 9 9 9 8 7 8 8 11 9 8 8 10 10 9 8 9 11\n 6 9 8 7 11 10 9 10 7 8 10 8 9 8 10 7 11 9 13 8 8 11 6 8\n 9 12 6 10 8 10 7 8 13 8 10 9 11 9 7 9 9 7 6 10 8 7 10 11\n 9 7 7 13 8 11 10 10 8 10 8 7 8 8 9 7 9 13 8 9 7 9 8 10\n 8 9 10 11 9 8 10 6 9 10 8 9 10 9 9 11 9 10 13 6 8 8 9 10\n 8 13 6 10 11 8 8 10 10 10 7 8 10 9 8 7 7 9 11 7 10 11 6 9\n 6 11 8 11 7 4 7 9 4 9 11 8 8 8 9 8 8 7 10 8 10 8 8 8\n 10 9 11 9 8 8 10 9 8 9 10 9 10 9 9 8 10 11 8 5 11 10 10 9\n 13 10 9 9 7 11 10 7 8 10 10 11 11 10 5 13 10 11 7 10 10 10 7 9\n 8 9 9 9 8 9 8 13 11 8 11 13 7 8 10 11 9 8 10 6 6 7 5 6\n 6 9 7 8 10 9 8 9 7 10 10 9 11 8 8 11 11 9 13 8 8 8 8 11\n 10 8 7 16 11 11 8 5 10 9 10 10 8 11 8 9 9 9 10 8 10 7 8 8\n 10 4 11 16 6 7 11 9 9 10 18 10 10 9 9 4 9 8 5 9 8 9 13 9\n 9 5 9 9 8 10 6 8 11 6 9 10 11 10 9 9 8 9 8 4 9 7 6 13\n 11 7 8 10 8 9 7 10 10 8 9 7 8 8 9 16 9 7 9 9 10 7 9 13\n 8 10 9 10 9 8 16 7 9 9 8 8 9 9 11 8 13 4 6 13 9 10 10 8\n 10 8 8 10 9 8 8 10 11 8 8 9 10 7 9 11 8 8 11 9 4 7 8 11\n 9 8 11 13 10 7 9 11 8 11 10 11 10 9 16 9 6 8 10 10 9 10 11 4\n 7 7 10 10 7 8 7 9 9 11 8 9 8 7 5 7 7 9 9 8 11 6 9 8\n 11 11 9 9 9 10 16 9 10 7 6 7 7 10 9 8 8 16 9 13 8 10 9 5\n 11 10 9 11 9 11 10 10 9 11 7 7 9 11 9 9 6 9 8 8 9 9 9 11\n 9 11 10 8 8 10 8 8 9 7 8 9 8 10 9 10 10 8 10 7 10 8 10 8\n 8 11 8 9 11 9 10 4 8 12 8 9 4 13 9 18 8 10 8 6 9 10 4 10\n 9 8 10 8 7 10 11 11 10 11 8 11 9 7 11 7 7 9 9 8 9 11 7 9\n 9 9 8 9 8 7 6 8 9 10 9 6 11 6 8 9 9 5 9 8 8 11 7 11\n 8 7 13 9 10 11 8 13 8 9 11 7 10 9 8 9 9 9 9 7 5 6 9 9\n 6 7 9 9 9 10 7 10 6 9 9 7 8 7 10 7 8 10 10 9 11 9 8 9\n 7 8 9 5 11 7 7 11 10 11 9 9 7 7 7 9 7 18 9 10 8 16 8 9\n 8 10 10 9 9 11 11 9 10 8 9 13 8 8 9 10 10 9 10 8 9 8 9 9\n 10 10 8 10 7 8 7 10 10 7 9 8 7 7 10 5 7 7 8 8 10 7 9 8\n 8 10 9 7 10 11 13 7 7 8 8 8 9 11 8 9 11 11 10 10 10 13 9 13\n 9 10 8 11 10 5 7 10 10 10 11 10 7 7 9 11 10 9 7 8 9 6 9 6\n 8 9 7 9 13 13 7 10 11 6 9 10 8 6 11 9 11 9 11 11]\n" ], [ "# Last thing: evaluation of algorithm performance in classifying \ncnf_matrix =confusion_matrix(y_test,predictions)\nprint(confusion_matrix(y_test,predictions)) \nprint(classification_report(y_test,predictions))", "[[ 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 6 4 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 1 8 6 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 1 2 16 23 9 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 1 9 27 36 6 1 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 1 18 46 32 7 2 0 0 0 0 1 0 0 0 0 0 0 0]\n [ 0 0 0 0 1 9 38 51 26 7 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 2 9 20 49 36 24 1 1 0 0 1 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 11 26 28 24 0 2 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 3 10 14 16 14 0 5 0 0 1 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 2 1 9 9 14 1 3 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 1 4 8 5 0 5 0 0 1 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 4 10 4 0 3 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 3 2 1 0 2 0 0 1 0 1 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 3 4 2 0 2 0 0 0 0 1 0 0 0 0 0]\n [ 0 0 0 0 0 0 2 0 5 3 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 1 0 3 0 0 1 0 0 2 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 1 0 3 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 1 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0]]\n precision recall f1-score support\n\n 2 0.00 0.00 0.00 1\n 3 0.00 0.00 0.00 2\n 4 0.55 0.55 0.55 11\n 5 0.53 0.40 0.46 20\n 6 0.46 0.30 0.36 53\n 7 0.28 0.34 0.31 80\n 8 0.26 0.43 0.33 107\n 9 0.25 0.39 0.30 132\n 10 0.23 0.25 0.24 143\n 11 0.24 0.26 0.25 91\n 12 0.00 0.00 0.00 63\n 13 0.10 0.08 0.09 39\n 14 0.00 0.00 0.00 24\n 15 0.00 0.00 0.00 21\n 16 0.12 0.10 0.11 10\n 17 0.00 0.00 0.00 12\n 18 0.00 0.00 0.00 10\n 19 0.00 0.00 0.00 7\n 20 0.00 0.00 0.00 4\n 21 0.00 0.00 0.00 3\n 22 0.00 0.00 0.00 1\n 23 0.00 0.00 0.00 2\n\n accuracy 0.26 836\n macro avg 0.14 0.14 0.14 836\nweighted avg 0.22 0.26 0.23 836\n\n" ], [ "# create heatmap\nclass_names=[0,1] \nfig, ax = plt.subplots()\ntick_marks = np.arange(len(class_names))\nplt.xticks(tick_marks, class_names)\nplt.yticks(tick_marks, class_names)\nsns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"YlGnBu\" ,fmt='g')\nax.xaxis.set_label_position(\"top\")\nplt.tight_layout()\nplt.title('Confusion matrix', y=1.1)\nplt.ylabel('Actual label')\nplt.xlabel('Predicted label')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0bc849ebdb987728d3b5fa50b9aa0cdd563eda7
338,613
ipynb
Jupyter Notebook
Learning_Transformers_with_TensorFlow.ipynb
ashikshafi08/Recipe1M
0f3c442066681071c2cd57ecd841e3fba7d7aef6
[ "MIT" ]
2
2021-09-28T13:34:52.000Z
2022-02-10T03:48:42.000Z
Learning_Transformers_with_TensorFlow.ipynb
ashikshafi08/Recipe1M
0f3c442066681071c2cd57ecd841e3fba7d7aef6
[ "MIT" ]
null
null
null
Learning_Transformers_with_TensorFlow.ipynb
ashikshafi08/Recipe1M
0f3c442066681071c2cd57ecd841e3fba7d7aef6
[ "MIT" ]
null
null
null
62.601775
64,696
0.683527
[ [ [ "<a href=\"https://colab.research.google.com/github/ashikshafi08/Recipe1M/blob/main/Learning_Transformers_with_TensorFlow.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Learning Transformers with TensorFlow \n\nHere you can find all the theory and code for transformers. \n\n\n**What is transformer**\n\nA transformer model handles **variable-sized input using stacks of self-attention layers**.\n- Layer outputs can be calculated in parallel, instead of a series like an RNN. \n- Can learn long-range dependencies. \n- Attention mechanism has an infinite reference window \n\n\n## Input Embedding \n- Input into the word embedding layer that is the input should be in embedding format. \n- Next we will inject the Positional embeddings, because transformer doesnt have reccurence. The author in the paper used Sine and Cosine function to calculate this. \n - For every odd time step create a vector using the cosine function. \n - For every even time step create a vector using the sine funciton. \n - Then add those vectors and we will get Positional Input Embeddings\n\n## Encoder Layer \nThe encoder layer maps all the input sequence into a abstract continous representation that holds the learned info for entire sequence. \n\nIt consists of two sub modules, \n- Multi - headed attention \n- Fully connected layer \n- Also it has residual connections and a Layer Normalization. \n\n### 1. Multi-Headed Attention \nThey use a specific attention mechanism that is called self attention. \n\n**Self attention** -> helps in associating each individual word in the inputs to other words in a input. Helps to connect. \n\nTo achieve self attention, \n- we connect the inputs into a three distinguished fully connected layers to create the query , key and value vectors. \n\n[Query, key and values](https://stats.stackexchange.com/questions/421935/what-exactly-are-keys-queries-and-values-in-attention-mechanisms)\n- Query and key form a dot product matrix multiplication to produce a score matrix. The score matrix produces how much focus should be put on each word. \n- Higher the score, the more the focus\n- The scores get scaled by dividing with the square root of the queries and the keys. This is for more stable gradients.\n- Then the scaled vector is passed into a softmax function to get the probs distribution, this will be our attention weights. \n- So then we multiply the attention weights with our values. The more prob distribution the model will consider it as important words. \n- Then we pass the output vector into a linear layer. \n\n> Before applying self attention we gotta make sure to split the query , key and value into N vectors, since its a multi-headed (more than 1). And each self attention process is called as a head and its concatenatd into a single vector and then passed into a Linear layer.\n\nSo the multi-headed is a module in the transformer network that attends the input and produces an output vector with information on how each word should attend to all other words in a sequence. \n\n- Then a residual connection that gets connects with the multi-headed output and passed into a LayerNorm. \n- This is again passed into a Point wise Feed Forward network, followed by a LayerNorm layer. \n- Then again a residual connection is been made, where the input of the LayerNorm is passed to the input of the LayerNorm + Feed Forwards output. \n\n\nSo this whole purpose of this Encoder layer is to make our inputs in a continous representation with a attention detail in it. \n\nThis will make the decoder to focus on the appropriate words in the decoding process. \n\n> **Note**: We can stack the Encoder layer for N number of times, so at each time it will learn new attention detials. So it helps in predictions. \n\n## Decoder Layer\nThe decoder job is to generate the text sequences. Like the encoder the decoder even has the sub layer, \n- 2 multi-headed attention layer \n- point wise fed forward \n- residual connections and LayerNorm after each sub layer. \n\nThe decoder is autoregressive, where it takes the previous outputs as inputs and also the encoder outputs that contains the attention details as inputs. \n\nDecoder stops when it generates a end token as the output. \n\n- The input goes into a Embedding layer and Positional Embedding layer and gives out the Positional Embeddings. \n- Then the Positional Embedding is passed into the 1st Multi-head attention layer which computes the attention score for the decoder inputs. \n\n\n#### 1st Multi headed attention layer \n\n**Masking in the decoders multi-head attention**\n\n> **Note**: Since the decoder is auto-regressive and generates the sequence word by word, we need to condition the words from seeing the future tokens. It can have access to the previous tokens but not the upcoming one. \n\n> To prevent this we use something called a **Mask**, that provides us a with a look ahead mask. So we add the scaled scores and the look ahead mask, this will give us the Masked Scores. \n\nMask is the same matrix same as the attention score. And it has negaitve inifinities, so when we take a softmax these -ve infinities will get zero out. So this means zero attention score for the future tokens. \n\nThe output of the first multi-headed attention layer is a mask output vector, which has the information on how the model should attend on the decoders input. \n\n#### 2nd Multi headed attention layer \nHere we pass two inputs, \n- The output of the Encoder layer, only the queries and the keys.\n- Mask output vector of the 1st multi-headed layer, the values. \n\nThis process matches the encoder inputs and the decoder inputs, so it will figure out which input to focus on. \n\nLikewise the output of the 2nd multi-headed layer passes into point-wise feed forward layer. \n\nAtlast the output from the point-wise feed forward is passed into a linear classifier layer. Where we specify the various number of classes we have, followed by a softmax layer nnd this will give us the probability distribution. Further computing the argmax we will get the predicted word. \n\nThis goes on and on....\n\n\n\n\n", "_____no_output_____" ] ], [ [ "# Some downloads\n!pip install tensorflow_datasets\n!pip install -U tensorflow-text", "Requirement already satisfied: tensorflow_datasets in /usr/local/lib/python3.7/dist-packages (4.0.1)\nRequirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (1.2.0)\nRequirement already satisfied: importlib-resources in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (5.2.2)\nRequirement already satisfied: dm-tree in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (0.1.6)\nRequirement already satisfied: dill in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (0.3.4)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (1.15.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (1.19.5)\nRequirement already satisfied: promise in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (2.3)\nRequirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (3.17.3)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (4.62.3)\nRequirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (2.23.0)\nRequirement already satisfied: absl-py in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (0.12.0)\nRequirement already satisfied: termcolor in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (1.1.0)\nRequirement already satisfied: attrs>=18.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (21.2.0)\nRequirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from tensorflow_datasets) (0.16.0)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->tensorflow_datasets) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->tensorflow_datasets) (2021.5.30)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->tensorflow_datasets) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->tensorflow_datasets) (3.0.4)\nRequirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.7/dist-packages (from importlib-resources->tensorflow_datasets) (3.5.0)\nRequirement already satisfied: googleapis-common-protos<2,>=1.52.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-metadata->tensorflow_datasets) (1.53.0)\nCollecting tensorflow-text\n Downloading tensorflow_text-2.6.0-cp37-cp37m-manylinux1_x86_64.whl (4.4 MB)\n\u001b[K |████████████████████████████████| 4.4 MB 7.7 MB/s \n\u001b[?25hRequirement already satisfied: tensorflow-hub>=0.8.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-text) (0.12.0)\nRequirement already satisfied: tensorflow<2.7,>=2.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-text) (2.6.0)\nRequirement already satisfied: termcolor~=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (1.1.0)\nRequirement already satisfied: opt-einsum~=3.3.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (3.3.0)\nRequirement already satisfied: keras~=2.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (2.6.0)\nRequirement already satisfied: h5py~=3.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (3.1.0)\nRequirement already satisfied: six~=1.15.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (1.15.0)\nRequirement already satisfied: absl-py~=0.10 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (0.12.0)\nRequirement already satisfied: astunparse~=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (1.6.3)\nRequirement already satisfied: wrapt~=1.12.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (1.12.1)\nRequirement already satisfied: google-pasta~=0.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (0.2.0)\nRequirement already satisfied: tensorflow-estimator~=2.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (2.6.0)\nRequirement already satisfied: grpcio<2.0,>=1.37.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (1.40.0)\nRequirement already satisfied: keras-preprocessing~=1.1.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (1.1.2)\nRequirement already satisfied: tensorboard~=2.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (2.6.0)\nRequirement already satisfied: numpy~=1.19.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (1.19.5)\nRequirement already satisfied: clang~=5.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (5.0)\nRequirement already satisfied: gast==0.4.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (0.4.0)\nRequirement already satisfied: wheel~=0.35 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (0.37.0)\nRequirement already satisfied: flatbuffers~=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (1.12)\nRequirement already satisfied: typing-extensions~=3.7.4 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (3.7.4.3)\nRequirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.7,>=2.6.0->tensorflow-text) (3.17.3)\nRequirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py~=3.1.0->tensorflow<2.7,>=2.6.0->tensorflow-text) (1.5.2)\nRequirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (1.35.0)\nRequirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (2.23.0)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (1.0.1)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (1.8.0)\nRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (57.4.0)\nRequirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (0.6.1)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (3.3.4)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (0.4.6)\nRequirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (4.7.2)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (0.2.8)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (4.2.2)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (1.3.0)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (4.8.1)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (0.4.8)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (2021.5.30)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (2.10)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (3.1.1)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->markdown>=2.6.8->tensorboard~=2.6->tensorflow<2.7,>=2.6.0->tensorflow-text) (3.5.0)\nInstalling collected packages: tensorflow-text\nSuccessfully installed tensorflow-text-2.6.0\n" ], [ "# Imports \n\nimport collections\nimport logging\nimport os\nimport pathlib\nimport re\nimport string\nimport sys\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow_datasets as tfds\nimport tensorflow_text as text\nimport tensorflow as tf\n\n\nlogging.getLogger('tensorflow').setLevel(logging.ERROR) # suppress warnings", "_____no_output_____" ] ], [ [ "### Downloading the datasets \n\nTesting the transformer on Portuguese-English translation dataset. This dataset contains approx 50000 training examples, 1100 validation examples and 2000 test examples. \n\n", "_____no_output_____" ] ], [ [ "examples, metadata = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True,\n as_supervised=True)\ntrain_examples, val_examples = examples['train'], examples['validation']", "\u001b[1mDownloading and preparing dataset ted_hrlr_translate/pt_to_en/1.0.0 (download: 124.94 MiB, generated: Unknown size, total: 124.94 MiB) to /root/tensorflow_datasets/ted_hrlr_translate/pt_to_en/1.0.0...\u001b[0m\n" ], [ "# Looking into the text examples \nfor pt_examples, en_examples in train_examples.batch(3).take(1):\n for pt in pt_examples.numpy():\n print(pt.decode('utf-8'))\n\n print()\n\n for en in en_examples.numpy():\n print(en.decode('utf-8'))", "e quando melhoramos a procura , tiramos a única vantagem da impressão , que é a serendipidade .\nmas e se estes fatores fossem ativos ?\nmas eles não tinham a curiosidade de me testar .\n\nand when you improve searchability , you actually take away the one advantage of print , which is serendipity .\nbut what if it were active ?\nbut they did n't test for curiosity .\n" ] ], [ [ "### Text tokenization & detokenizatiion \n\nHere we will convert the text to some numeric representation, so now we will convert the text to sequences of token IDs, which are used as indices into an embedding. \n\nDownloading a saved model \n", "_____no_output_____" ] ], [ [ "model_name = \"ted_hrlr_translate_pt_en_converter\"\ntf.keras.utils.get_file(\n f\"{model_name}.zip\",\n f\"https://storage.googleapis.com/download.tensorflow.org/models/{model_name}.zip\",\n cache_dir='.', cache_subdir='', extract=True\n)", "Downloading data from https://storage.googleapis.com/download.tensorflow.org/models/ted_hrlr_translate_pt_en_converter.zip\n188416/184801 [==============================] - 0s 1us/step\n196608/184801 [===============================] - 0s 1us/step\n" ], [ "# Loading the saved model \ntokenizers = tf.saved_model.load(model_name)\n", "_____no_output_____" ] ], [ [ "The tokenizes contains two text tokenizers, one for english and one for portuguese. ", "_____no_output_____" ] ], [ [ "print([item for item in dir(tokenizers) if not item.startswith('_')])\nprint([item for item in dir(tokenizers.en) if not item.startswith('_')])\nprint([item for item in dir(tokenizers.pt) if not item.startswith('_')])", "['en', 'graph_debug_info', 'pt', 'signatures', 'tensorflow_git_version', 'tensorflow_version']\n['detokenize', 'get_reserved_tokens', 'get_vocab_path', 'get_vocab_size', 'lookup', 'tokenize', 'tokenizer', 'vocab']\n['detokenize', 'get_reserved_tokens', 'get_vocab_path', 'get_vocab_size', 'lookup', 'tokenize', 'tokenizer', 'vocab']\n" ] ], [ [ "- The `tokenize` method converts a batch of strings to a padded-batch of token IDs. \n- This method also splits punctuations, lowercases and unicode-normalizes the input before tokenizing. ", "_____no_output_____" ] ], [ [ "# Printing out the english exampels \nfor en in en_examples.numpy():\n print(en.decode('utf-8'))", "and when you improve searchability , you actually take away the one advantage of print , which is serendipity .\nbut what if it were active ?\nbut they did n't test for curiosity .\n" ] ], [ [ "The `detokenize` method attempts to convert these token IDs back to human readable text", "_____no_output_____" ] ], [ [ "# Tokenizing a text \nencoded = tokenizers.en.tokenize(en_examples)\n\nfor row in encoded.to_list():\n print(row)\n", "[2, 72, 117, 79, 1259, 1491, 2362, 13, 79, 150, 184, 311, 71, 103, 2308, 74, 2679, 13, 148, 80, 55, 4840, 1434, 2423, 540, 15, 3]\n[2, 87, 90, 107, 76, 129, 1852, 30, 3]\n[2, 87, 83, 149, 50, 9, 56, 664, 85, 2512, 15, 3]\n" ], [ "# Detokenizing a text \nround_trip = tokenizers.en.detokenize(encoded)\n\nfor line in round_trip.numpy():\n print(line.decode('utf-8'))", "and when you improve searchability , you actually take away the one advantage of print , which is serendipity .\nbut what if it were active ?\nbut they did n ' t test for curiosity .\n" ] ], [ [ "Lookup method converts from token-IDs to token text", "_____no_output_____" ] ], [ [ "tokens = tokenizers.en.lookup(encoded)\ntokens", "_____no_output_____" ] ], [ [ "## Setup input pipeline \n\nGotta apply some transformations in order to make the data suitable for Input pipeline. \n\n", "_____no_output_____" ] ], [ [ "# Preprocessing function and we can use while making the dataset\ndef tokenize_pairs(pt , en):\n '''\n pt -> portugese text\n en -> english text\n '''\n\n pt = tokenizers.pt.tokenize(pt)\n\n # Converting the ragged tensors to dense by padding with zero \n pt = pt.to_tensor()\n return pt , en", "_____no_output_____" ], [ "# Hyper parameters for batching and shuffling \nBUFFER_SIZE = 20000\nBATCH_SIZE = 64 \n\n# Function to apply the dataset transforms \ndef make_batches(ds):\n return(\n ds\n .cache()\n .shuffle(BUFFER_SIZE)\n .batch(BATCH_SIZE)\n .map(tokenize_pairs , num_parallel_calls = tf.data.AUTOTUNE)\n .prefetch(tf.data.AUTOTUNE)\n )\n\n\n", "_____no_output_____" ], [ "# Applying the transforms on our datasets \ntrain_batches = make_batches(train_examples)\nval_batches = make_batches(val_examples)\n\ntrain_batches , val_batches\n", "_____no_output_____" ] ], [ [ "### **Positional Encoding** \n\n- Attention layers see the input as a set of vectors with no sequential order. \n- Like we know the transformers doesnt contain any recurrent or convolutional layers, so this means no reccurence available in the transformers. \n\nThats why we need the help of a **positional encoding** and its added to give the mdoel some information about the relatives position of the tokens in the sentence. It kinda filss up the void of reccurence. \n\n- The positional encoding vector is added to a embedding vector. \n- Since the embedding is a vector and after adding the **positional encoding the tokens will be closer to each other bases on the similarity of their meaning and their position in the sentence, in the d-dimensional space**. \n\n\nSo we use two formula's for calculating the positional encoding, \n- Cosine function : For every odd time step create a vector using the cosine function.\n- Sine function : For every even time step create a vector using the sine funciton. \n- Then add those vectors and we will get Positional Input Embeddings\n", "_____no_output_____" ] ], [ [ "# Getting the angles \n\ndef get_angles(pos , i , d_model):\n angle_rates = 1 / np.power(1000 , (2 * (i // 2))) / np.float32(d_model) # 1 / 1000 (to the power 2i / d_model)\n return pos * angle_rates # here we multiply by pos to make it complete \n\n# Defining thhe whole positional encoding \ndef positional_encoding(position , d_model):\n angle_rads = get_angles(pos = np.arange(position)[: , np.newaxis] , \n i = np.arange(d_model)[np.newaxis, :], \n d_model = d_model)\n \n # Apply sine function to even indices in the array, 2i \n angle_rads[: , 0::2] = np.sin(angle_rads[: , 0::2])\n \n # Apply cos to odd indices in the array, 2i+1 \n angle_rads[: , 1::2] = np.cos(angle_rads[: , 1::2])\n\n # Into one dedicated variable \n pos_encoding = angle_rads[np.newaxis , ...]\n \n return tf.cast(pos_encoding , dtype = tf.float32)\n\n\n ", "_____no_output_____" ], [ "# Using the above function \nn , d = 2048 , 512 # position and dimension \n\n# Passing the arguments to our model \npos_encoding = positional_encoding(n , d)\npos_encoding = pos_encoding[0]\nprint(pos_encoding)\n", "tf.Tensor(\n[[ 0.0000000e+00 1.0000000e+00 0.0000000e+00 ... nan\n nan nan]\n [ 1.9531237e-03 9.9999809e-01 1.9531250e-09 ... nan\n nan nan]\n [ 3.9062400e-03 9.9999237e-01 3.9062500e-09 ... nan\n nan nan]\n ...\n [-7.5295961e-01 -6.5806675e-01 3.9941406e-06 ... nan\n nan nan]\n [-7.5424343e-01 -6.5659487e-01 3.9960937e-06 ... nan\n nan nan]\n [-7.5552440e-01 -6.5512049e-01 3.9980469e-06 ... nan\n nan nan]], shape=(2048, 512), dtype=float32)\n" ], [ "pos_encoding[0]", "_____no_output_____" ] ], [ [ "#### Masking\nThe mask indicates where pad value 0 is present: it outputs a 1 at those locations, and a 0 otherwise.", "_____no_output_____" ] ], [ [ "# function to create padding \ndef create_padding(seq):\n seq = tf.cast(tf.math.equal(seq , 0) , tf.float32)\n\n # Add extra dimensions to add the padding (so we can add padding to our attention scores)\n # mask + attention scores \n return seq[: , tf.newaxis , tf.newaxis , :] # (batch_size , 1 , 1 , seq_len)", "_____no_output_____" ], [ "# Testing out the above function \nx = tf.constant([ [7 , 6 , 0 , 0 ,1] , [1 , 2 ,3 , 0 , 0] , [0 , 0 , 0 , 4 , 5] ])\ncreate_padding(x)", "_____no_output_____" ] ], [ [ "The look-ahead mask is used to mask the future token sequence. In other words, the mask indicates which entries should not be used.\n\nZero attention scores for the token sequence\n\n", "_____no_output_____" ] ], [ [ "# Creating the look ahead matrix \ndef create_look_ahead_mask(size):\n mask = 1 - tf.linalg.band_part(tf.ones(shape = (size , size)) , -1, 0)\n return mask \n\n# Applying dummy tensors \nx = tf.random.uniform((1 , 3))\ntemp = create_look_ahead_mask(x.shape[1])\ntemp", "_____no_output_____" ] ], [ [ "### Scaled dot product attention \nLike we saw above the attention function used by the transformer takes namely three inputs, \n- Query (Q)\n- Key (K)\n- Value (V)\n\nThe equation used to calculate the attention weights, \n\n`Attention(Q , K , V) = Softmax of [ (Query , key)T / square root of the depth of the keys `\n\nDont worry much if it take sense, will code every line out and eventually get that shit. \n", "_____no_output_____" ], [ "![Screenshot 2021-10-03 at 2.29.45 PM.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAiwAAAI5CAYAAACCbGHBAAABSWlDQ1BJQ0MgUHJvZmlsZQAAKJFjYGASSSwoyGFhYGDIzSspCnJ3UoiIjFJgf8rAxcDMwMogxiCYmFxc4BgQ4ANUwgCjUcG3awyMIPqyLsisH3byWkIbryT9fxv3tG/m3P+Y6lEAV0pqcTKQ/gPE6ckFRSUMDIwpQLZyeUkBiN0BZIsUAR0FZM8BsdMh7A0gdhKEfQSsJiTIGci+AWQLJGckAs1gfAFk6yQhiacjsaH2ggCvU2peoIJ7uJGJuakHAfeSDEpSK0pAtHN+QWVRZnpGiYIjMJRSFTzzkvV0FIwMjAwZGEBhDlH9OQgcloxi+xBi+UsYGCy+MTAwT0SIJU1hYNjexsAgcQshpjKPgYG/hYFh26GCxKJEuAMYv7EUpxkbQdg89gwMrHf///+swcDAPpGB4e/E//9/L/7//+9ioPm3GRgOVAIAm+1ijyzrPT4AAABWZVhJZk1NACoAAAAIAAGHaQAEAAAAAQAAABoAAAAAAAOShgAHAAAAEgAAAESgAgAEAAAAAQAAAiygAwAEAAAAAQAAAjkAAAAAQVNDSUkAAABTY3JlZW5zaG90FFyolQAAAdZpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDYuMC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iPgogICAgICAgICA8ZXhpZjpQaXhlbFlEaW1lbnNpb24+NTY5PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjU1NjwvZXhpZjpQaXhlbFhEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlVzZXJDb21tZW50PlNjcmVlbnNob3Q8L2V4aWY6VXNlckNvbW1lbnQ+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgqSEAirAABAAElEQVR4AeydB2BU15X+j4R6RwIh0XvvYDAuGINNXOPYcWI7juOUzSZbsi3by39btmSzJb1n7Y1TbGzHDVcwNpjeey8CSYgmQEJCFc3/fGe446dBvaDRm+/aw7x55b57f/dp7jfnnntuTECTMJEACZAACZAACZBABBOIjeCysWgkQAIkQAIkQAIkYAQoWPggkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgAQoWPgMkAAJkAAJkAAJRDwBCpaIbyIWkARIgARIgARIgIKFzwAJkAAJkAAJkEDEE6BgifgmYgFJgARIgARIgATiiCCyCdTW1sqGDRtkx44dkpycLDfccINMmjRJ+vTpI6dOnZKXX35ZvvzlL/dIJerq6mTVqlWydOlSeeyxx2TOnDndWo6CggL5zW9+IzExMfLHf/zHLd6rvr5eLl682OI5aWlpkpSU1OI5kXKwtfqASVZWlj0XkVLm8HLgWV6xYoU9s3/zN38jQ4YMCT+lV3/+2c9+Jh//+MetHXp1RTpQeHwXfOMb35Djx4/L//t//08GDRrUgVx4CQm0QiDAFLEE9Ms9MG7cuIA2YaNXSkpKYOHChQEVCIHs7OzrXn4VDYGPfOQjgdTU1FC5sK+70p/92Z8FJk6cGLqXdgqt3mr37t0B7RAD2pGHrgvniM8qAgMjRowIfP7znw8sWbIkcP78+Vbz7okTUJ+8vLwW6xIbGxvIzc0N3HPPPYHvfOc7gYqKip4o6jX3XLNmTeD+++8PqEAMlX/Xrl3XnNebd6COeJ6+/vWv9+ZqdLjs+Ntxf1+/9Vu/1eF8eCEJtESAQ0L6VxaJaevWrXL33XfLgQMH5I477pCNGzfKyZMn5Y033pD58+fbL1Xs0w5Wqqurr2sV8vPz5dFHH71uv5BnzpwpeLUnwQp14sQJ2b59eyMryqhRo+QnP/mJ/PSnP5V//Md/lMcff9z4/e///q988pOflP79+8vv//7vy+XLl9tzu24/F/UpKSmx9vfeDM/GM888I//1X/8lv/3bvy01NTV2zle+8hWZPHmyvPPOO97Te2S7b9++snjxYsnIyOiR+1+Pm373u9+12+Ad1rBoSyqmQ1WOi6PhPgSDG11LoCU1w2M9R+CWW26xXyyDBw8OVFZWNipIQ0ND4N/+7d9Cv2iOHDnS6Pj1+vDP//zPoTJ0p4UF9VGxFrpXWywsXgY33XRT6Nq/+qu/8h6ybe3kAz//+c8Do0ePDp2H7XXr1l1zbmd3bNu2LfCrX/2qU9ngmdBvAXv9+Mc/bpRXeXl54FOf+lToOKwumzdvbnROT31QQRUqV09bWP7yL/8ycOXKlS5BgWczPj4+VLdnn322Xfm29ky0drxdN+vkyS1xe+qppwL//d//HbFWyk5WnZdHAAFaWLpW/3VJbpcuXZL169dbXtOnTxcdAmqUL/wV9ItDPvvZz9p+WF56IsHScr0SfqV3NKWnp4cuTUxMDG27jYSEBHniiSfMV0jFje0+fPiw6LCXFBcXu9M6/a5iQh555BHRzrpTeXktFfBl8ibUFdajsWPH2m4Vt/K3f/u33lN6bLszbdiVhf7hD38o//7v/95llhAVjQIfDpe++c1vus1W31t7Jlo73uoNuvCE1rjh+wi+ZZHSzl1YdWYVIQQoWCKkIbzFgDOtMytjaKi54Yl/+Zd/Ef0FbUNF3uu53ZgABF5bkvoD2XCKcxhEZ/F7v/d7bbm01XP0x4k8+eSTcvDgwVbPbe2E1uoD52wMFbmkFha3GfXvcGD/wz/8wy7jAKHyox/9SIYOHRoaesSPDdyntdTaM9Ha8dby78rjXc2tK8vGvKKHAAVLBLb1wIEDQ6WC9eRLX/qSqPk6tM9t4LyHH364xZkh8G+Br4s6xQk6Lh1ecpc3+Q4v/2XLlsmvf/1ref/99xv9cmzygjbsPH36tPlSoAyY6dOWBF+MDz74QF566SWBteN6pczMTPn2t78dut0rr7wir776auhz+MaZM2fk7bffltdff13Onj0bftg+w8rxJ3/yJzY7BjsgQEtLS+2FY92RnOhy90Pn5xIE8erVq+0j7v/ee+9Zm7vj3nd0yJihhrbToYkmn0Pv+d7twsJCefHFF60dm3vuLly4EGIBJl5LBWZ5OU7u3Zt/+Dbu99prr8mbb75ps1XCj+/du1cefPBBwWwlJJenOieHn9rmz6gffIv+/u//3qxn7sLWrCytPROtHXf3ce/gtnPnTmsntCf+fppLOBe+Tag/Ej6vXbvWns+ioqJrLmsPN/ytY+ZgSwn3a88z1d7ytnRvHuvlBPSLjCkCCXj9LvQRC9x5553mx9HWomKGCGbXYGxdh0EC6kxqY+yYYaTOpgH4bXiTWnICt912m50zYcKEgA732LZOlQ0055+iQw92DsrX1DmHDh2y2UQ47n3Nnj07oCLEe/vQtgqsAGYZ6HRjew0fPtxm+jzwwAOhPNrrw3LXXXeFrv2Hf/iH0L2a21DrVmDAgAGha3Ta+DWnqqgL+bzgXHU0tPPVqTegDrCN/CM+/elPh/LycsC2Ovtek3drO9QBN5SfTqVt8nQ1z4fOwbOjIinwwgsvBD72sY9ZWTFrR4VsQIVN6Dz4H7ik1qXAF7/4RXt2MLsHzwHKi5lhmIWkliJ36jXvK1euDIADzoe/Da4Ho/Hjx4fu5XxY1Bm30Uyuv/7rvw7lB18cd1/kpUN3oWPeDRW2gSlTplje3raYN29eyH9HO/MAnn3kE/5S64g3u3Zt33zzzTZTD3w3bdoUyhvPgwqoZvNq7Zlo7bjLGH443/ve9xpxQv0w+02Hvdxp9veuwtv+tnJycqycKlLsWh1GDJUbPk94fl1qCzc8K//3f/8XQFvqEGVg0aJF7vJG7+15pvD91JHyNrohP/iOgPiuRj6pEL6E8QXt/XJV34XAt771rQA61JYSpubCaRRfHv/zP/8TgAhA8jo9qlk8lAWEhesYcD4S7qEzTez++PJtSmC0JFgwDRcdxK233hrQX1MB/bUceO655+yLFHXSce5rBNi5c+cCc+fOtXv+wR/8Qajcag0I9OvXL8SiuwUL6g/R5NijU/ImjTNhnSw6Zf31aYfUUhD4nd/5ndA1EFjOqVNnKwU0Xk7omM5MCuzbt89eVVVV3qzbtN2aYMH9XHuiDv/xH/8R0BlDjRxDZ8yYYUy9nZWbjnr06FGb6o1r/+7v/i70vKEdvMIXz2h4ggCDSIbghQhGQmfu5Yl8nWDBcbVohdioDwR2hZL6c4XEc1OCBX8PmLqus8hCIgrlh0jHfVA/tRpaGcBch8pC99IZZNYG4NWRhPrhHl/96ldDl7vnF/v/4i/+IrQ/fKO1Z6K14y6/z3zmM/Y98f3vfz+Avx/UHYIS98cLTrJI2K+zDu1HgDuGZwBMsR8/Itx+iBawQkLbtcQN90T4APzAcNc3JVja+0x1tLxWaP7jWwIULBHctGpubvJXIb6c8WuuueSsEeiovGn//v2hLxUdTgod+trXvhba751R4r6Q8UXk/dXlLmxOsEAg4RcvOk0dJnGn2zu+3N0XGzp4b4IlA8dgEQlPOkQVuu56CBbvLCwdJgoVB79K0UGinF5WOEHN+AEdoguVE1xdgmhz9W5qppI7ry3vLQkWiAh11A7dS4dAAjoEYtmi7K4MeIc1A8IUVho8M1u2bLHznEXqvvvuu6Y4OMdZkxDzRYcVQufocF9IKKgfR2g/NnAfb7m9gsXLJlyw4Nrbb7/dyh0uWHAPiCNYVSBsvEmn3Yfq6p1JBR6OQbiV0Xt9W7YRuwfPgneWnk4xD+WPGEnhM/y8+Xrr3dQz0drxX/7yl3YvDcLnzTagw5ShHwZg5rX0/Omf/mmofPgO8M6EU+fs0DFYYb2pNW7esjYlWDr6THW0vN6yc9s/BOjDot9ekZoeeugh8xtQ03ajIqqQEP0lJzq80Wg/PsCXAn4XapGweCLeEzQIneiXrDnqwvfFJe14bFO//MUbT0FN+O4Ui6ob+tDKBvwdMBMGM270T8V8O+DfgdeNN94Yuvqtt94KbauYstkt2PFHf/RHof1uA3E8rmcCP5fKysoELyQ4bKJO4D9r1ix3ir3DGRZRPl1Sa1W3x8jRAHEWZVg7BJsZpBYtiz2DMiD6MNoC7YqEGWcujRw5UnRauvk/4ZlAxGTEukGcH9cuTTkc45yPfvSjlg38dxDd1SVEr4XvBNoYfLwJs5k02KF3V2i7qZlboYO6ET4Tyh1DvBz4N6gIFkQt9ibMooMTtYoZ0aFO76Eu2Yb/B/y8wB0sXUIsH/f3hBhJOl3eHeryd7V+WZ4og/v7wjuSikN7d9GF7YP+4y0rnh3v36NaZtxpcuzYsdB2WzYQ86e5durMM9Vd5W1LnXhO5BGgYIm8NmlUIkxP1SiaomPEJkLcQTjlIfBZeFh+OIAioWPBbJHwhA4G06bVlB46hA4LXyo6jBMKqY1zvJ1RexwT4ayLhDzx5e19feITn7Bj+AdfinC6RNJfi6GZURqDxvZ5/wmf2u091h3bTqAgb3BU3w1zWHYzbrxiznt/tSzZ8gnYh06tNQdE77Ud2YZogFMkpl/D0RZCFA6gasYXjfciag0JZesVBujQMMMsPLm2w/7m6viFL3whdBmcopEgVDT+iG031X440JVtCLZq7Wn2ftOmTTNnWHBxU7zt5C76B38bOpx3zSwytWiI+v6E7oK/Mwjcrk5wMNZhE8sWItX7N4Zt95ziBMcJ297nIVxgTJ06FadYwt9/exKepfD83PUdfaZwfXeV15WN772LwIffZr2r3FFVWvxy17FqwS8gxDn4xS9+Eao/plTilyt+2SFhrRYkdXa096b+Ce848EWDqLpIsHTgSxYCwvsF1lQ+ze3TYQk7pH4o8ru/+7vNnWb73S9jdLpIOoxk4sA+9OA/3tg2sBThixOWLdf5OKtFU0VEZ6lDdnbIdSpNnef2wUoAawFEaFNJfSFEfZKaOmRWEgjOrkoQrS41V0fUzyVXP0zXdjPZWnr23HWdfcdMGNcWzd0P4qE7EtrpBz/4gWWNWUKwTnkTxIRL+HvCjwhYQboyub8xCIU9e/bY+lrN5Y+Zb21JzQmOtlzb0jkdfaZayhPHuqu8rd2Xx3uOAAVLz7Fv950xTIEw7MOGDRPEYHFJnVlDgkWd9Ww3plq2J+GXKKbePv/88xYwDbEkYI5tykrTWr6YNouEDgzDUG1JsAggofOOhISp3S6pD4VtejuilgLKeTtQ/ApvLaljoyD4WHMJ8VuaEyzNXdPR/eF19E6Pdnli2BCdBdrX1c+1H865Hm3onnPcD8/69VxIEYt9Yno+lnF49913UYRrEoS4s0piaLCrBYv7G4N4wveCdwjzmsL08I6OPlM9XGzePgIJULBEYKNg3F2noApMvU0ldeYUxEvAMBESvjTReaATwa8pmHPxqwu/QGGdaS2hs1mwYIFgeAHj4v/0T/9klyCGS0cSIl2iDC7WR1vycJ0c4nVg7B/+Bz2VwA4+OC7ptGDb9Eb29XaY7jz37rVg6aKNbnez71gx2uv7En4igpJdr4Q6ul/EqGNTK3DjOcPwEoSWq59rP5SzJTZdVQ+v1QDlbaqcXXWv8HwQpwfWGzwnEC1NJfyIwHpbSIh5AguiY9XU+e3d540mi78zna7e3iyu2/kdfaauWwF5o15DILbXlDSKCoo/8JY6MKBwX4bYRsehszCwKbrysL2j04C5urmE/BEIDAmL5kGs4Fcq/GI6m5yjnM4caDR+Hp4vFuzDEBeSKze2vWPe+Hw9E36xev2CMNziOsMxY8aE/D7QAXl/OXrL6B1Ogk9LawmdP7g397qegsVrEXP+UOHlh38PnjkkV7/r3X7e+yFImxseCi8rgg7CcthVCYtp4geCzqpqVqzgXnCY9waA9PqMdUVZ3N8Y8vL6moXnDfGPNkVAt55KHX2meqq8vG/kEqBgicC2gfkfnTZ+pTWXvEM18DVxDpVu+ALXYaYEIteGJ/i96JRkQQeMzgdRLpFgGfBaZBCa3qXm/Cvcce+718dBpyCbGPIexzZ+dWJWyWd1/REk74ybpqKEOkGGc9tr+Wlr2WElwMwYZxnCF6036i1+TTvTPjpIry8RyuWS81/B7BTXaXm5egWNu6Y9714Wba1bW/OHr5RLGB5sirWrH87DTCQkWA9gKUKCdcr5UtmOq/94y+3N1+trolOEvZfYthveRF1dfTVuiGDdJCTc71//9V9t2/uPxggx4eAd1upsO3zjG9+wW3gda733dNvw/0GEapcwlIvyeFNrZWnpOHg7HyMMUWkIA2/Wto3hOg2+Z6t2w2rbmdRSWVrLt6PPVGv58ngUEtAvXqYII4BgT/ooWiyF8HgWrqjq12Dn4DwX7A3H9Ms9FAsDxxDoS6cvWlRTneIacBE0EZkSSQWLBZjDuXipM6PtVwtNo2BfLiaHTpu04/jnu9/9bqgM3hVqdfZPoyBliAuivwIDyBMB6BDvQb9sA5/73OdCeeGYOraG8kM8Du2cQsd1CnfoGAJV6S/G0LHWNm644YbQtd4gX+467UgDOpU3gABxjgPugSif4QlxK1B2nIcovN44JDjXxa5BXXSoInS5ipRQ3irobL8KpIA6b4YCs4VObmUDcUdcOb3RTFu5zALxueuaipXhrkdbu/Oair+j05rtuIoVd4m9ewMTIsaHDpmEjqvPTwD7XL5f//rXQ4H1cJJaFe0YuGF1YiTEMPHG4cC14OsSggu6/PCujucWHRV/MypgAmqFCKjobBTV2VtGF50ZEX8RFK+1hFgjOhxmQQ+9z2Zz1+HvwFu+8EByrT0TrR33BuNDsDc82zorzWIf6Qw9CwanP0JCwQ1RTm+QPld/V378bbvy6sKfbre9t8YNz7K7FlG6w1NHn6mOljf8/vzsDwIwpTJFGAFvQDZ0jviiUz8T++JFVFr95Rb6crj33nsbdeyoCgJK6S+i0Dnui8S9qxWhUY290TlxnQurrjEaAgiahuvwhYgQ5i5oG4JuIbx7c3nq1NrQMXeO9x0RR8MDd0F4ec/R6agBtcBYtFyEhPceQzlxj9aSDms0EmToGNVhOfCf//mfgT//8z8PQPh5O1J8wevMpgAi1zaX1EIVyhNfzgjIh4Rgfupwa1E/0YbepD5Gje6jFiXrUPBFjmNtSTjPG5gMPBCMDZFc25IQmt8xRNRkneHT5GWoOwL/4Vy0OwKvQTwgtLoL/AdxFy4a1RE0gDZz9wBLtUgFEI0VQdTcfveOZwuCGQni1e3HO0QdjkNwI1qyO6bWmIDG6bFr1E8qAI7uWPg7QtC7trEL9B8IRHceyqdDNwGch+UEWkqI6OqiwSKqK/4OW0v4m3X3wjvKjojHEMhIrT0TrR1Hu+Nv0nsP7za+O3RdpVAxIbIg6tw5WHrBPXs45v0BAvaos0utcdPYT6F8EV0YP5y8qSPPVGfK6703t/1DgIIlAttSh4Psjx+RXxE2G1/a7kvGveNLAZ2ui2IaXg18geDXpTsf7xqfIfD000+Hn2piSGcehc5FhFpYNPDFik7d5YHOUWdHBNQs3mQHhM7a+6sNnTYsFe56vOOLEFYBNVdfUw7seOqpp0Jh4d11OnQV0NViLR/cA7+63a/wJjPRnegs0CG5PJp6xxo3CE+OL3FYfSAGsIRAW5LGxrEIrO4eeEeHpDFIbCmCpvLQIGIhoYNf6ljXpzkO4dejPt7w5+H1QafbXFRV/PqFNSj8GnRoECZONHjviXJBKMNKgetQN1g/IO684fq912BbncEDiDKL+rn7odywmGFtK+zHr3ewgAByCdteqyGuhRUIVgr8HegQaOCRRx4JwJLnFZOwCuC41zoHMQshoj5GLvvQO0QOIkW7sumMp4AumBg63tQGQvh783fXhluYvNfib8WdF/4OEe5Sa89Ea8chEhGKH+3jvY+GOghFLsa9cJ73b9ydCyEJTviecfvcO+qM9XyQmuOGKL9YIyr8BxLaC2tBeVN7nqnOltd7X277h0AMqqIPKFMEEYDvCHw8vNFoMSsIcUAwVRJBvxDUy/mtNFd0/YViwaXgxwLHTfjG6BdLk6cjIiZWe0VcB0RExbtL2lmaLwN8U7z73fHW3g8cOGAzRzAdW603rcZP0F99NgMDUTsxVg8nZDymWHUaDrDN1aG1cnTHcfhlwLkYZQaf1toE/ivgoULJYs50R5m6Ok84bGJGDAKweadst3QfXIPYLPBrwfOk4kjUKmGz2BDYrLkEZ2b4rLh2x3lwnMU1ahVq7jLBys7givaAI3BL5+JZR5tpJ2+xhnr6eWrtmWjtOKBgdh2CxYGzLl4q3llEzUJr54Gu5NaRZ6qdxeXpPiRAweLDRmWVSIAESIAESMBvBD78Ge23mrE+JEACJEACJEACviFAweKbpmRFSIAESIAESMC/BChY/Nu2rBkJkAAJkAAJ+IYABYtvmpIVIQESIAESIAH/EqBg8W/bsmYkQAIkQAIk4BsCFCy+aUpWhARIgARIgAT8S4CCxb9ty5qRAAmQAAmQgG8IULD4pilZERIgARIgARLwLwEKFv+2LWtGAiRAAiRAAr4hQMHim6ZkRUiABEiABEjAvwQoWPzbtqwZCZAACZAACfiGAAWLb5qSFSEBEiABEiAB/xKgYPFv27JmJEACJEACJOAbAhQsvmlKVoQESIAESIAE/EuAgsW/bcuakQAJkAAJkIBvCFCw+KYpWRESIAESIAES8C8BChb/ti1rRgIkQAIkQAK+IUDB4pumZEVIgARIgARIwL8EKFj827asGQmQAAmQAAn4hgAFi2+akhUhARIgARIgAf8SoGDxb9uyZiRAAiRAAiTgGwIULL5pSlaEBEiABEiABPxLgILFv23LmpEACZAACZCAbwhQsPimKVkREiABEiABEvAvAQoW/7Yta0YCJEACJEACviFAweKbpmRFSIAESIAESMC/BChY/Nu2rBkJkAAJkAAJ+IYABYtvmpIVIQESIAESIAH/EqBg8W/bsmYkQAIkQAIk4BsCFCy+aUpWhARIgARIgAT8S4CCxb9ty5qRAAmQAAmQgG8IULD4pilZERIgARIgARLwLwEKFv+2LWtGAiRAAiRAAr4hQMHim6ZkRUiABEiABEjAvwQoWPzbtqwZCZAACZAACfiGAAWLb5qSFSEBEiABEiAB/xKgYPFv27JmJEACJEACJOAbAhQsvmlKVoQESIAESIAE/EuAgsW/bcuakQAJkAAJkIBvCFCw+KYpWRESIAESIAES8C8BChb/ti1rRgIkQAIkQAK+IRDnm5qwIiRAAr2aQENDg5X/9ddfl8LCQrnzzjtl1KhREhMTY69eXTkWngRIoNMEKFg6jZAZkAAJdJZAIBAwUVJZWSmrV6+WDRs2yPDhw2XkyJEUK52Fy+tJwCcEOCTkk4ZkNUigNxOAYMHr4MGDsnPnTtm2bZu9X7p0yaqFY0wkQALRTYCCJbrbn7UngYgggGGfK1euyLp162T//v0CoQIry9GjR03IREQhWQgSIIEeJUDB0qP4eXMSIAHnu3Lq1CnZvHmzlJSUGBRnZamtrSUkEiABEhAKFj4EJEACPU4AomXTpk2yY8cOqaurk9jYWDl58qRs3LgxJGA4LNTjzcQCkECPEqBg6VH8vDkJRDcBiBAMB2EIaP369aEhIOyvr6+3YaG9e/eKs8JENy3WngSimwAFS3S3P2tPAj1KwFlN4LeCIaBwJ1s44W7dulXKysqsnO78Hi00b04CJNAjBChYegQ7b0oCJAACsK5gCAgOthAnLjlhgmnOsLwcPnyYzrcODt9JIEoJULBEacOz2iTQ0wScKCkqKjJn2/Pnz0tc3IehobCdkJBg05vh21JTU9PTReb9SYAEepAABUsPwuetSSDaCcBPZe3atXLs2DGZNm2ajBkzRuLj4w1Lv379ZObMmZKamipbtmyx6LcQOU7oRDs71p8Eoo3Ahz9noq3mrC8JkECPEXCiA0M+Z8+elTlz5sjtt98uS5cuNfGCqcwDBw6UT3/60+Z8e+TIEZstNHr06B4rM29MAiTQswQoWHqWP+9OAlFP4Oabb5bc3FwZMmSIWVsckD59+ggECsTMoUOHJDs72w7B74WJBEgg+ghQsERfm7PGJNDjBJzoyMjIkNmzZ5vzLawq3unLbvgnPT1dZs2aZWV21/V4BVgAEiCB606APizXHTlvSAIk4AggQJwTJrCo4LNLECfuM4QMPlOwODp8J4HoI0ALS/S1OWtMAhFFAKIEQgRrCTWVnGihWGmKDveRQPQQ+PDnTPTUmTUlARLoRQScg24vKjKLSgIk0A0EKFi6ASqzJAESIAESIAES6FoCFCxdy5O5kQAJkAAJkAAJdAMBCpZugMosSYAESIAESIAEupYABUvX8mRuJEACJEACJEAC3UCAgqUboDJLEiABEiABEiCBriVAwdK1PJkbCZAACZAACZBANxBgHJZugMosSaArCfh9Wq8LHOfew9m5/S4eS/hxv3xmnBm/tCTr0V0EKFi6iyzzJYFOEnAdtcsGn6MtuTrj3Ru2308cnFBBHbHtPvupjqwLCXQFAQqWrqDIPEigiwmg83IvdGCIAosOG/v8mlz9vJ026l1XV+fXKlu9YDnCsgRIaF8X+dd28B8SIIEQAQqWEApukEBkEECnhc4bHXVZWZmcO3fOXlVVVb4VLE6UFRYWWoeNTruiokJ2795tFgfw8KvlITExUfr27Sv9+/eXnJwcSUpKsnb2a30j46+MpeiNBGL0y9G/P9l6Y4uwzFFNAH+OeKGz3rNnj6xcuVK2b98hp0+fkurqal+zQb1LSkrk7NmzZlFKTU2VwYMHS1pamgk4v3bg8fHxJlQmTJggCxYssNWrs7KyODzk66edlesIAQqWjlDjNSTQTQTQaVdWVsqKFSvkmWeeMcFy/vx57bywQGA33TSCsnWCzStOvNsRVNQuK4o2uaaAQKBNmzZNnnzySbn//vtNxPjd0bjLIDKjqCDAIaGoaGZWsjcQQGcNn40NGzbI97//fVm1apXU19dLXFycJCYmSXx8Qm+oRifKiPo3hJxrIVT69IFQ83f0hfr6OqmtrVGhelnWrFkj5eXl2t6JJloyMjI6wZOXkoC/CFCw+Ks9WZteSgBiBenUqVPy/PPPW8cFH5a4uHjJGzBQxo2drH4O/a7WjqO4V0H06jc0eayKsYrKS3Lk6D45UXjMhMuuXbtkyZIlMmzYMLnlllt6dR1ZeBLoSgIULF1Jk3mRQAcJQLDAorB161bZsmWLwMEWaejgEfLgA5+WG+cukLTUdJEoGBbqIMLed5npzhgTKbv3bJYXXvq57Nq9Ra7of5s2bZJnn32WgqX3tSpL3I0EKFi6ES6zJoG2EoBYgWg5fPiwFBcX22VJScly260fkbsXPyQZGdk6PKTTe02wULW0lWvEn6dtHhvbRwbk5utQ0EU5WVIoJaeKbFYYLG3f/e53I74KLCAJXC8CFCzXizTvQwItEIBggf/KpUuX1LpSbeIlO7ufjB0zSf0ZknV/Ga0rLfDrrYcgPdHuqWo9Q1vn5w1W0VJkvkuYLcVEAiTwIQEKlg9ZcIsEeoSAGw5CrBF0XpgxgpSakmbOl9iG4y1EzfVIzp+mPffryDVtqUtH8u3INeFlQR7tqX/49e35DAsL7hcfnygJCYl6abD9r9f921NWnksCPUmAgqUn6fPeJOAhgA7K20mFOl4dB3LbntO7bdPdC+/e8rR0Q3dNS+e0dqyp+7l8mzrWXH7umuaOt2V/V+TRlvsEz3Eh+fEpKFYUvbZ5Q9uz4JkkEAUEKFiioJFZxd5BAJ3k9e0om+ISo9acPipUYnV6cXA5gKbO8u6DqOnTJ/hVgms6UgeteZNDXsgXsUhgfULebUmYWYXU0bIE6xO0euC+TCRAApFBgIIlMtqBpSCBCCAQY518RWWZzlyplZSUVElS/5mWEkap6upqpfJyhVljMIzlxEtL13mPQeBAlOAVaNBlCZxlQfdXVJRLdU2VlSM5OaVViw9EyqWKMhtaS9GyJLQzdg3ECoblcF/UA47PTCRAApFBgIIlMtqBpSCBHiUA0YAF+CBWlq94Vfbt3ynzbrxd5s29XZJVtARFRLgPDYYy4mTfwc2y6oN3JDMjS+5Y+FEZlD9M6q/UtyouXIUhVKqqKqVcHYvhfJqamqaHYmxW1NoNK2TTljUycfxUuX3+3ZKVlWOCInyoCuWHZeVE0VF5972luoxBpdy56AEZO3qyWWfcvVp6dwzKyi/Ie6tel9z++TJn1nzLF8eYSIAEepaAv0NI9ixb3p0EehUBBDGDNWP7zk3y1rKXZOUHb+n02lMSY06h4VUJTsetqqqQdetXyOtvPS+bt67RxRqDywi4s9HRN/VyxzHkAksGhMZrbz4rBw/vDrpxqD7AsX0HdsiK95fKshWvyPHCIypjwkWTy0mtQ4ErWvYN8raWffXa5XL6THB6uPf+7mzvu/e4Mai+LNt2rJfDGsztilpsmr+nNxdukwAJdDcBCpbuJsz8SaDXEFCLif7XR31YsvvmSPHJ43L02AEbJmrKogGhUXDiiEVoTUlJkaRkDJ+ooDBNERQquA6Wj0Sd/YJXnC70FxPrnIhhoRGdHROn07Yvyv4DO+X8hbN6fnBGFI7hWkzvLj1/Vg4e2i1VKiZgkXHOqUALwYFrIK5wDiL5Z2Rkat4ffr3hnuF1wLVI2B9cq0nLdfUzyopFCSlWDBH/IYGIIPDhX3REFIeFIAES6FkC6kOiPhwjR46VZBUgBw/v0aGai0H/Es+wCDr4Kzrss2v3JhMIY8ZM1Cm5CWaRQPnR8WOICa8atdqcO39GSlWMVENwxOh+tdoEE8QCpvTGmc9Mgk7tdeJBN6RO19jB0gS5uXlytOCAnDlXYnl6imLZQMTsU8GD4ZwxoydIWnqGlQ+CA/oJlhO8rk3BGTqxYYLGWV2uPZ97SIAEeooAfVh6ijzvSwKRRkB7dlgb0FkPVD8ULMBXWHRMiooLpO9V3xEUOWjRiJez507KAbVoDBo4XEVEjJ5bgKM4xQROVdVlPb5L9uzbLidPnjChkZ3dXyZPnCnjx02V9LQMs6hs27FONm7+QM8plC3b1tmyBKNGjJfBg4aaAEpPz5QRw8foejsHzOIzdPBIvQNkSDAhjgl8YHbv2SqZ6VmSP3CIbWNICUKkurpWjh46aA60gweNUAuSfu1pHa0eaiWCmCosPKqiKF+GWN6oRbAe7h58JwES6HkCTf3k6PlSsQQkQAI9QwDjMCoGMBQDYYEFGGFlqdWZQF4LBSwaGMJBOPnx46ZIRnpfi86Ka2M1j9rqKlm15i35zSs/t+shDup0aQFc89wLP5V16kyL2UVY+G/Dpg/U/2WtnDl7Snbu2izL331NhclBFSsNJqAadAXnCSpw4JB74OBunQWEGTw6o8iJDhUgJ4p0aEr9YEaOHK9h7geadQUA4X+DYaQVK5fKhi2rpLa+JijKIEn0egxRFauYevXN50xcBQVbz6DnXUmABFomQAtLy3x4lASikgDWLRqtnf/O3Zvl8JG9cl6tEHm5g010OIvGTl2oL02tJGNGT5LS0jNmtYBtAmJn38GdKhJel2FDR8l9dz+iImKwCoQGDTt/Qn757A9lrTrqjhszWfr3y9e1ku5XP5Us+WDNCpk1Y57MnD5Pzx9ifimAX19fr8NCg2XMqAmye+9WFRgFOmtohs0WgkBCvjv36KKBOkSFVa3h+BuMnxIcDsIMpwsXSyVeh6wCEEGeFoVviw1ZqVi6fLlSj+EorSseRNwkgYghQAtLxDQFC0ICkUPgioqEzMxsFQbT5VzpaTlWcBA2iaBVAhYNnbGD4aKROnSTlzsoKB6uWmca1HJRdPK45OTkyt0f+bhMHDfNlhlIS82QGVPnyNw5880yU3KqWDDcM3vmjSpS5ki//gNk8qTpsnDB3TJi+GgdzkHwNuigBo2nkihTJs2ylY0PqYCCdQa3Q5C7UhVTBw7sknwVNcOGjLYyOsmBd/jEwIEWQso53oZEi56A4ziG+zGRAAlELgEKlshtG5aMBHqMQLDDjzFfkz6xcTpcskcuq58InGhhvdizb5tZNMarRSMlOdX2mYOrqograp2B0PnovY+pBWWgWjcuqPWiwl6l58/JhQulGpjtkjrU1kq9Djmdv1Cp51xSS0e1Os2Wy5kzZ6SyssKECoQFRAbiuowYPlYtMf3V4rNPzz9nAgMiAwIGomWMLh6Y3be/lis4lBSCp6oHweDM6uKUTOhg0GcH05cZCj8EhRskEJEEOCQUkc3CQpFAzxKASIAVY/jQ0baK8BGNSQLH1MkTZ5m/CJxjhwwZacNB8AUJpaubw4eNsWixJ04cFlhSTp46LhfLSjWPAik4ftQW+YvB9GSzksTZ8A98ZOLUCTZeo9NaxNuG+mC2ECxq8clRsTJj2lx5b+UbJlJy1Vfl8uVyWbtuhVpw0s0CA0tKU8LDWVZcOT0ltl0hi4s7ge8kQAIRR4AWlohrEhaIBHqegGoEDcSm043jEmS0+o7Az+O4xlxBcLaC44fU2lIho0aM05k+mUHrSqjH12nCKkSOq1B54aWn5dXXg8Hg4vsk6vkT5VOPfEnuvfthnc2D6zQoG24UlsyP5NrdJmJGDR9vwzcFxw+ahabkdLHOViqR4cNGS7/sAZ6hqbBMm/2IYa4Geq00y4cHSCByCNDCEjltwZKQQEQRgGaAw+rYMVMkJfV1tWrskVnn5tk0ZawZNG7slKA1REUMTCXwcYm18P7l8vbyl6RIpzkjVP+kCTNM2GA4KSsrWy6qA+yWLWsbDdHgXtAusNbY8EwYCRsWUisLrDr5eUM0Cu1+OX22WJcQ2CE1Gqtl4oTp5gCMYZ8mtE4oNzezSG9k98LnuvpajT2j1hwtQLjlJXQhN0iABHqcAC0sPd4ELAAJRAgB11tbj692Du3AMVuof78BOkNnogqEEp2O/F5w+rA620I4YGaOjeugCno9Is7C4nHo0D6bybNowX0yVEVGugZyg68L/FQwRFSl057h6GoqJXipbUPUII/gFGotiJYB5UCRIGRSktNUAE1XH5dLOhV6tezas1lnEA0S+Leo9oDiCZ5/9TrdoyIKX3M61VqFDQQYnGxxbtAf54r6zJzU6dXlV++pB+x+BgGXM5EACUQIAQqWCGkIFoMEIoIAOn1LwQ4bFoi42HjzD8EQzqtLn7UgbRPGT9PItGkWK8XUBE53fbzmgTgtWJcIwgJCAT4osIRgbaC9+7drMLcqExamcvRSOM9CHMGxF/FagvuDofutOFezRxnGq8UnS2cwvbP8FZ2tdNQcfOFsi2MQG6iCKw4sLklJKWp9SZfi4gKN9VJiM4awijNeBTp0tXvvNi1rjQWUQ31dNYK5hIC4YvCdBEighwhwSKiHwPO2JBBxBK721Aj8pv9fHR7BDJ06gQUD05c3b1knd+pQ0Ej9jGSXaJ+Od1guIEz6D86XUSPHydbt6yzcPmYMwUqCxQvhe5Ku4iEtLU0XGFyncVpGWnRZCJCkhBRZtXqZWjxO6TTnmzQOzDizeuBaS/qOWUW5alGBxWfL1nXmXzMeQ1Pqa1NXVxM8z/MvREySrjaNIHj7derzK0t/ZdO001MzLYz/3v1b1QIjai0apFdBnOgLb3ov3PXqHt1iIgES6GkCFCw93QK8PwlEAAFoAlgXEFxt9MgJkpiUbBYIdNno9BFDZdaMm3VWzmW5YdatFqofaw45cwRC+dfW1NqwD2bs3LnwAbOUrN+4UjZsXKWzguJ1baIUneVzo0yZPMui28J59/TpkyoWhupSAENl0e33ypp176lfyi4ZrOH+IVgglDI03D6sOTqaEyyjipMpk2/QJQNOaBwYFVIafyU4M0hXbFaLSo5aWyCSsrNz7XzUa/bMmy143PoNK+VXz/1EF2JMsrWSJk6YJlOnzNEIujv1Ppl2foIuSQBBNEAFWnBoCrKFiQRIoKcJxOgfM/8ae7oVeP+oJoA/QVgRajUuyde+9jX5zne+o4HVym0Rv9/70l/KnFm3WRySkKWhm2hBtCAcfsWlcisPQuFjaCdYvlizYGB9oCQVM+jw4WSLhHLZUI5aPyBKMNSC6y5qxFms9oyQ+1gYcdDAYRb5Fr4sZRrSH2H90/QeuA/8SRArBYsXIuIs1hnCMA62sR+CBb4tjhX2Iaw/8k3WIZ8Pk0aura3SIadq3a+iS1ddxjW4Fv4zsPKcKDxmvjdYO8jEmdbl/IUzVvb0tCy7X+XlSzbFGvUJqbIPb9KlWxBbCVqGI0f3ynd+8C/qJ7RSy4xbBB2Du/RmzIwEejEBWlh6ceOx6CTQlQTQSWIl5X45A0yMwKfECQR0qhhagaUFQdyCzrbBu+McTG+GSME1EBPIK0ctHAP6D7wqbDDEgtguNfqqk8yMvjoNOdfWKMI1sIxAVOTnDdaVnON0P86r1VlFOSaIMBQUdJgNDtQg3kpu/zy7F67/MAXMyoPyoIwoCwQVhqogtDA0hBcShrBwD1wPCw/KgPMhnnL65tr9kAfqdz3S9bnL9agJ70EC3UOAgqV7uDJXEuilBALmLIvCY6VjZ12wTl99WTAFGB19uLUHAgMGF+zHCwLHOdYGrTQ6RRpOscjR1u+ptuM45vKCsy2sTO4cDMfAKmKfr+ZrH1T4QFjU11+2j8jDmyBOIIpcWXAM28g7GNI/GIIfAgWFtmMqXOw8zRsCBQ7D3uvtIP8hARLoUQIULD2KnzcngeYJQC4guaGX4Kfu/zdcALg7BoWK+9T43Xw9XIH1kLezDwqD4MwhdxVES3iyPMKW82m+LEFhFJ4HPnvv7T3u8gr6uzQWZN7zsO3ODd/ffZ8b21dUX2m6llH33Z85k0DkE6Bgifw2Ygl9TgAdLBLevR1ltVoXMBUYv/jRyQYCvbsDu15DK219XCKlPE5AYfo3LEMuuefCfeY7CUQ7AQqWaH8CWP+IIQAfjtTUVHMkRaGwUOAJXRH5Bu3IEtV/BEMq7MQiprm6pCBB61nQrITVr8/qythOuGZkZHTJPZgJCfiFAAWLX1qS9ejVBNyv/REjRkh+fr6cP39eZ8hUyNq1K2T4kFEybeocm5lDwdKrm/mawkOwwOfmwMGD8v6qt+SURgFGysrKksWLF19zPneQQDQToGCJ5tZn3SOOwIwZM2T69Oly+PBhdTit0aiwO+TpZ76jguUGycnJuxoXJOKKzQJ1mEBAyi9d1Gi7W2W/xp/BjCUMC06ZMkWefPLJDufKC0nAjwQoWPzYqqxTryMAywmsLEOHDpX77rtP1+I5JDt27LCZLXv375SjBYdsWi6mBreUzFKjp7R2Xkt58FjHCNjwztWZUu3JAX5KWA07aGWLkcGDB8nHPvYxmT17dnuy4bkk4HsCFCy+b2JWsLcQgGhBfJE777xTLl26JM8884zs3LnTtmt12q3XIbPpOiEqLRb3wwyaxlN9mz6fe7uSQNAxuuXZR03fD07VGj8mJUUGDRokjz76qAmWvn37Nn0695JAlBJgpNsobXhWO3IJoPO6ePGibNy4UV577TXZrpaW8rIyEyPNlRoiBWIFsUbgvItX8Bd7c1dwf1cSgEBEzBi0QWJiksWwaSt/tF2iLgcwYcIEueuuu2ThwoWSl5dnxcMxJhIggSABChY+CSQQYQRc3BIERzt9+rQcO6azR86eDQVVCy8uOkZEZ4Wj7v79+2XgwIEybNiwkLUl/Hx+7loCjj+sYVVVVSY80tPT2yQYcS3EZXZ2trUZ2i45OdkKSLHSte3E3Ho/AQ4J9f42ZA18RgBOlxAtECEYIsALqbkODOfi2Pr162Xr1q3qAzFYHnroIbse+5u7zmfYeqQ6zooCy1ZRUZEUFxfLjTfeKBMnTjTurbHH9TjH5YN3t69HKsSbkkAEE6BgieDGYdGilwBEi+u8QMHbqTVFBR0mhpCWLVtmv9Dnz59vlhbXETZ1Dfd1noATF4WFhSYYd+3aJXPmzJGxY8ea5aQtd3Dt7PLyBg9sy/U8hwSihQA986KlpVnPXkcAIgWdV0sdGKwr6OjQYW7YsMF+5eN9z549ZqXBMabuIeCEBobuwHzv3r02HR3CEUN47nhb7o62hkWtpbZuSz48hwT8TIAWFj+3LuvmGwLo0JpK6OAgWrZs2SK7d++2RQH37dtnQ0Nz584VFy21ueubypP72k4AXM+dO2fWlePHjxt/iBf4EsFxFsfJvu08eSYJtESAFpaW6PAYCUQwAWc9wYwi+K/AORedI6ZEo9M8evRoBJe+9xfNWVAwDLR9+3aNTHzZ+EM4btu2TSorK62Srp16f41ZAxLoWQIULD3Ln3cngU4RQGeI4R/XYbrM4HyLWSutx25xV/C9vQQgDhGNGGLxoIbWd6m8vDwkIClWHBW+k0DnCVCwdJ4hcyCBHiMAZ1t0mAcOHGjkM1FSUmJOuCdPnmy0v8cK6rMbOyECK9bmzZultLS0EWcM0cHygnWCmEiABLqGAAVL13BkLiRwXQm4DhN+E+gw4UeB5PbDr8XrfHtdCxclN4MYgYMtnG0dd7zD8oLpzZs2bdLFDE81EjJRgobVJIFuIUDB0i1YmSkJdD8BiBKIFXSY2HbOna7zxHpE8KXAEAWS29/9JfP3HRxHBOqDYEH8FSS3H9uYOYRjcL5lIgES6BoCFCxdw5G5kMB1I+A6Rjc7pUzD9ufm5uriiElWBkROHTBggIV7X7duXWi46LoV0Oc3An+8MOwDPyFEtUWkWkxLRsKaQPn5+SZkcA7aB8m1m33gPyRAAu0mQMHSbmS8gAR6ngA6P1hWzpw5I4sXL5YHH3zQOkmUDKHd7777bnn88cctVDzOq66u7vlC+6QEsGRhRhAcmyESP/e5z8nNN98sCQkJVsMRI0bIY489Zvtg5Tpy5AjFik/antXoWQKMw9Kz/Hl3Emg3AXSYECyIwXL//ffLzJkz7Zf+mjVrLC/snzJliq34i2EJdKroYCFkcJ0bOmr3jXlBSHhgmG3UqFEyffp0mTp1qnzrW98Kcc3KyrIVt/v162dDco63eydGEiCBjhGgYOkYN15FAj1KAJ0fOstZs2ZJamqqBY2DHwuSe8cw0X333WdTbyFW2GF2vskcQ4gSrKyM4R9MHYcQxMvxx7DcjBkzZOTIkRai313X+RIwBxKIXgIULNHb9qx5LyaADjAzM9OcO53DresU3TuqB+sKxUrXNzSEirNYNRXrBm0Ax1v4tzjflq4vBXMkgegiQB+W6Gpv1tZHBNwvenSOXpHiquj2ufPcfr53DQFwdewda2/Obh/5e6lwmwQ6ToAWlo6z45Uk0KMEWuosXcHcOe4z37uOQGtsnWBx7113Z+ZEAtFJgBaW6Gx31poESIAESIAEehUBWlh6VXOxsJ0hANO838zzzn/FOdqCj6sn9rn9fvqV35plozPPCK8lARKIXAIULJHbNixZFxFwHbj3vYuy7vFsUCfXgTtRgndMbca7Eyw9XtAuLICrm6t3F2bNrEiABCKYAAVLBDcOi9Z5Al6RgtkcFRUVUllZaTM4Op97z+fgBMuFCxdMnGBGCjp01BHr2fhlhgrqibpg1hNmRyFIG/Y58dLzLcESkAAJdDcBCpbuJsz8e5QAOjVYGU6fPm2RYffs2SOFhYUWAbZHC9ZFN3edNhZBRKj++Ph4q+8777wj2OcnwQKRgtgykydPlokTJ0peXp7FQaFo6aKHidmQQIQToGCJ8AZi8TpOAJ05VtQ9ePCgLFmyRJYtWyYnTpwwK0tdfZ06e3Q870i60g39QJipl45UVVfJ+vXrbfE9idGS+qSeEF/JKcmS2z9X5s2bJw8//LDMnTtXMjIympzWHUltxLKQAAl0ngAFS+cZMocIJOAsKwcOHJDvfe978pvf/EZKS0ttCAHFRcfuq6TV8dbpilzxXSceUxdjayKdLz1vwhMWpK985SuyaNEiBsfz1cPMypBA0wQoWJrmwr29mADEChIEynPPPScvv/yynD9/3sRKZlam5OTmSEKiLlSH02CB8EFClYP1DtYdFQs6pfqhclqHq5FjL5ZekIvnL9raSCtXrhSEyB80aJAtU4Caos5MJEAC/iRAweLPdmWtlMCWLVtk+fLl5tuBjiy7f47cftftcuOCeZKuwwiW2L9F/rOiGgw6pLamVvbt2CvLly6XIwcPS11tnbz//vsyf/58GTdunPmzRH5lWEISIIGOEqBg6Sg5XheRBJx1Beu4bNu2TY4cOWKWh4TERFl0zx3yxO9+RoaMHMJf4hHZei0XClakmTfNlrSMdHnmhz+X4hNFZkXbuXOnOVUPHz7cMqCVpWWOPEoCvZUABUtvbTmWu0UCmNZbUlJiDrY4MX9Qnsy7/SYZPGKwrV4caAjGL2kxEx6MGALmn6OCJS0jVebMnysfLP9AThYWmxgtKiqSs2fPCgQLBCsFS8Q0GwtCAl1KgIKlS3Eys0ghgNlBiLviLC59c7Ils2+mzhpSd1R9YSqsO9YVZQ7vJF0H2xV5N8oDQ1jOTaXRgU5+6K58O1ks7+UQmRgWSkrWFahTk0NOxlVVVUERSrHixcVtEvAdAQoW3zUpK+QIeEWE245VZ4juiNsB8eMVQLifu6crT1e8Qwi1JV+UpS3necsUiNFrItkLWVc+i4mNEQiXgE7hRlnBw8vdWx9ukwAJ+IsABYu/2pO1aYaAdWoQFc0c7/BuzbAhEOw8Y/toOPyrHT7uh44VH9srHJoqi+UHW+7yowAAQABJREFUEWIZ6hleiwi2PSlYVxhi2iZa7Pyr17f1Gs/trvtmIKy+170AvCEJkECPEKBg6RHsvGmPEOjqjk71CDr4uLh4tdoE1+1B8DYIlD5xwT+tKzo0BUHQWdGC6+M0TwRPwz0wrIV7N5Xi+sSZJQLn4dVScmVzEXFxvlfAtHQtj5EACZDA9SRAwXI9afNe/iKgAig2JlbKL5TJ8cPHpeDoMSm7WKZBzJJkwMB8GTZquL7nSnxCMFx+h0WL3qfhSoOUnimVCxqHJC09VfoN6G+xZCBazJKjZJF/fV29nDp9SiovVUp2v76SmZ1lQ2DNgccQy+WKy3Lu9Dk7r9+AfpKUktQ9fjLNFYL7SYAESKANBChY2gCJp5BAOAEIBfjDHDt4TN556W05vO+QXGlQq4f6gUDE1NdekZS0FJm/eL7cvOgWyeibYdaOjogWCJIdG7bL8teW21TeG2+bJ4sf+EhoBtTAoYPU+tLHrCoQKq/++hXZsGqD3HHfHXLfI/ebszGmeYffG5aUPrF9ZNu6bfLKsy9J3+xseeQLj8qYSWNM+ISfH86An0mABEjgehKgYLmetHkvXxBARx8XHydFx4rkhaeW6PTaEhMlE6aPlxS1ftTX1ktxQZGsfOt9+c0zL0qtBjhb/LHFtg6O82sBiKaGayASnFDAuX3i+8jpolPy1otvyvlz5+WGW+bI+GkTTBxhHyL2PvjphyQjO9PYIpha4dFC2btjj6SnpcmMG2dKVk6WaKT+xklHkzAMdKnskmxavVE2r94sYyaMMcuMuz8uQF2bGiJqVM6r50ComW9Nozth8cngkJg330an8AMJkAAJtIEABUsbIPEUEggnAOfXzas3yZEDR+SBTz1oloyk5MSQM+zU2VNl4vSJ8sP/+IEsf/UdGTFmhEyfOz1ohdEOHgnWDYgGWGqwCxYas9LoB3TusOIk9kmUsyfPypmS0zL/zgXy6Bcekz4JfaT0bKkO45zV6b0pEqNeqPGxcWbd0QvVf6aP5OXn2TDPwd0HzGKCfRAeXqdg+Nkc2X9ETp44KdlqXUlNTW1UTSdU4BPTR6eBW5lQzkDjcqIezQkb1AuzsvBqSqA1uiE/kAAJkEALBPQnERMJkEBbCaBjRueLeCAFRwqkv/qSQJzA6Rb+JRdLL0qZ+rRcrrwsw1SkLLrvThsiQlRW12FDNEBAwBpyuvCUioajapEplurL1SZg4FdiTiR6r4bKBik/Uy516rybGJckdVV1UnWpyqw4cMKNU7Fw5fIVqa+qx3QlExUY/hk1brT6ueTKwT0HpfR0qeWr+ieY9B33qKutle0btklCfLxMnDbJrDUNKpqCwiSYFwRVlfq4FB0r1mGvI2ZVqqqosjq58+o13o3V7WqxHUsTSBBeV4LxU5wAcsf5TgIkQALtIUALS3to8dyoJ4BOGsMeNrSjAgHB6Wqqq01fJCQkmFUEHTMC11VWVKqVZZI8/uUnpH9ef+vU7XoVDAWHCmTdijWyf9d+uVR+SYeLUmTE6BEy97a5ahEZa8NHFzR664rV72pU11VSdLxQli97R85cOC1JSUlSXlYme7bvVpGRKD8PPC2jJ4yWW9Rfpo9OrUaMEtyvX24/OaAWlqMHj0j+kPxQ28FyE68ipViHtA6poBk6apgJqAO7DoTOgSjDTKTdO3bJhpUbVJwdUxFWqUHbktVaNFJuueNWKyesQ9s3bZfi48VqQZohQ0cONTZgAFFWosNlGHIaPX60jBg30oK+UbiEMHODBEigHQQoWNoBi6eSAAhArMQnxtssoG1qoVjxxgpJSU6RHJ1hk5iSYBaYQCBeYOnIysmUG2+/0YZMMNMHfh57dQG/3zzzgg3zjFRLyOiJY6SqskrFxX7Zv3Of3PfoR+XWO29VMVQvRSeK5WTxSamqqpazp87KcbXqJKtoKNMVi2HJiVcflqOHjkpyWrJZfRJVwFhSUTVFLT9HDx6V/Tv2ywwVE0mIDqtlR4KVZ/fW3VJRXmFDVxAWV67Uh6wrOGPb+q3yyi9fVmtMrIwcO0oSkxOk/OIl2blph5xUgfLYFx9Xf5rxUlejixC++Z6cKiqRT3z2k5Kls5MgeCrKKuSNF16XHRu3y7QbptsQmN2c/5AACZBABwhQsHQAGi+JbgKwEMBSAmtIweFjslrXtYGlYuT4Udqxj5QhI4ZI/9xcnRmUaZ08LBoQLxjCOVtyVl597lUpPVeqM3Iek9k332B+KPX1dXJo7yH5xQ9+Lm+++IbkDRogo9Qq8cjjj0regDx54dfP60rTC2XxQx+x4Z1LFy7JU9/6mU5BTpaHP/0J6Z/fXzJyMuTi+TIrGyw/sHaMGDPcRAscg8eoMKq7UmfXV6iz7c7NO80hd7Q6254qPm1+NKhbsJzn5F2dlRSnlphP/fbj5geDwHgY3ln19ip59n9/LTu37FCxNVpm3Tzb/GDefX255OYPkHs+fq/A2vTBslWydf0WuXnhLSruhlmAPVpXovtvh7Ungc4QoGDpDD1eG5UEIFYw5JOnwyyPfvFTNqSyee1mG/rYsHK9DXv0zemrwzRjbJYOhmswxRlWB4iEArV63POJe+W2uxaYLwnywhDNTJ3Rc1mnJf/smz+VLWu22NALFmvMH5pveebm5dqwEe5/SS0dEEQpGEpSX5mM7Aypra81Sw6GrDCcA0vMlFlTbcbQYRVDI3VIBn4sGKo5qtOxi44XyaybZqk4ytM4L1enPetxCJNLOuSEoae5OoV6yuwpVnaUMzk9SabPmSZvvvSGWYiwjk92v2y5++N3y2l1DF75zvsyMF+nWSfEybJX3pHxk8bLPQ/fa8NouDcTCZAACXSUAAVLR8nxuqgmANGA/nfw8MHy8c88LAtUfGD45sTREyZICg4dUwvDSvX/WC+36PDOXQ/dbT4l+3fulcysTJk0fbKJCwRtg5CB0yqsMOOnjJeBQwdqILpjNhMI05UrqytNgFy+fNkC0+F8+L3UVNfYMEtZRZnEJsfathbraorRGUcNMnbqeMnQ+2G20NzbbjSLCqZd71C/E9QBPjZJqUlB/xq9Es64iM6bk5tjFiDEj6moqJBqdbStqa6V2rpaHbbab2IFQgoWE5QjLTNd7tehLFh9Xvjl81YXCKq7VMhk9882Xx/cj4kESIAEOkqAgqWj5HhdVBOAD4h11jU1NoSSryJjkIqX2TfNthlCiJmCoHLLXnlb3n75LY1OmyYL7r5dh2wuSHpWunbwaTZEginD6tgSEi2IMts/t78U6ayhqstVlnesztRBZ48ZOxiugWCBlQTvmJ2EfTjmTdAG9eqTMmjIIBk7eZzsUX8ViCkMHSFOyz71o8HQ1fAxw1WsQHp9KCYadNgnPTNDdA6SOQUfPXLUZkDB/wX+LBfVf6a8rNyuwFUoR01tjeW18N475Gf/8xNbPfljjz9o/jl1OtxFseJtHW6TAAl0hAAFS0eo8ZroJaB9O+KXYJryUY3Bgk5/0LDB5vBqQkKFQ5KG5h+i/iPDRg83P5Jv/tP/2EyayTN1aEWvbdDpzC7Bv0WD44aSSQcTIhpxILQ/uGH+H7qJa9wxHAn5hXyoOVQg6H61sMRrgDtMu96ukXLh1DtNh3Pg2Fumvi6Y6ZOlQ1fB4aBgEdRgokNCGqxOfVqWPvea+tUctPoNGDjArDNYEuBK3RWdFVRk5bD76xYEHCwt5RfL1W8naLHBNvYhuB0TCZAACXSWAAVLZwny+qgiAHGAMPhFGskWUWwX3HW7DB42RGLUwIFhnYa6Bo1sG/QlQTTc/CEDZbCGzj+m05jhV5LVN0sddQtsdg6sGjZzSAUK8oXVxNb1OXNWYuNjbU2fwNWVoJ1AaQq23VfvDcHkTbBqYG2hETrDB/FiECTu+JHjNh06U4eaYHmBZcYJHogPJAwLbfxgg2AGFJYWuP3ehTqslGHlS9FAdRAz8MmBuMG9YXmBpWjTBxvlvTfelSkzJquFpVZWvP6uhvvP0llS80L52gb/IQESIIEOEGj8DdeBDHgJCUQXAQ3mpiICTrUI/LZvh1ordMHDxMQk7bQ1cq2G0ofDKaY9J8TrNGC1ZCCgXIpOKe6b3dcCumGxxCP7D+uYjU6P1pWeMbYCsYGYKvB9KdFQ/IOHDdKFCzODviV6HCLCDavAmuESHGQTYhMkPia+0XGcixdEUqb6oYyfOkGD2l2Q915foaLlhM36QWwWN5UZJhm7Ru/ToBYUlC81PUVuXnCzDB0+VBKSMF1bg9Spn80xLeOZk6dtKAriDRYUCKHXn19qQ0kf06UCsCZRWkaqOue+aZYolJOJBEiABDpDgN8inaHHa6OOADp1WC0QbG3mjbNtmAWLH5acOKXioCHkZwKLyPlT52XZS+9I4bFCc27NGZCts3am2OyiD95ZJbs27bThFVhiIFiw4vPSJUtNOEyfO9NC5cMCAzHhTWaNUfGAay6cuyBnT5yRyguVZq2x8+x0+NhcHT7Sz7hvvE41XrrkNamtqjGn32QVUZZ/o8z1g56fkpZq6wwVHCzQ1ajLtc4aTVfjrRzec1g2vL/eZgRhuOeKWpQunL1gw0fwbYFzMRyRh40eJh997GPmHIyhpQZlw0QCJEACnSHAIaHO0OO10UdAxQOGQZKSEuXOj96h1pNSWb50mUWUxeKBfftr0DRMC9Zpx5hKjLWGYN3ATKE4tboMGKJxSnSa7wtPL5Gnf/i0ThGebo6wGArasXGHzr45I/d+4j6ZoAsc2lCNig4M2yTq/cyxVsUE9sPigTgrK3QI5vvf/p5Fmf3IQ3eZxQbB4xISEoMWE1UfEFiYHo3pzzs2b7fpzSM0Xox3mAl5Y2o1EoamMMV67/Y98uKvX5AThSckUxdQLNNlBxArBtalqbOmydF9h2Xlm+9LpUbARcTeRbo69AxdLwl8ruhq1TPnzbRIt0uXvGqrR2N6dFpGWrBe0ffksMYkQAKdJEDB0kmAvDz6CNhQi87AGTJqqDzxu0/K+vfWyp5te2T7xm2CqcfosBEfpW9Ottyn4uPG22+S3IG5Zs3AtbPmzTLH3PffeM+iySK0P4aDBgzMsyBts2+Zbf4rEBr1OgTTX+OvwEEWTryqX2zNIDj+LrjndknQBRf379prw1QwqcDhFUIhToeaknW6MsQNypOo581bOE+tLPEyacYkHSbKtIUWUR4cR6yYRBVB/TRaL4LOTZw5SR5+8hMWwXaVTs+G1SUzI9PC8S+8d5Gu6lwhq95ZaUsEwJKz+GMfUVF2iwqpRItRg2LCcrTg7gU27HTuzDlzwE3X6c8mxHACEwmQAAm0gwAFSztg8VQScATQScM/BH4gD+j03QX3LLTVkzHdF8MssCT018UHs2Fx0aGbWhUBSBAHsGbAsjJq3Cg5WXjSpghD4OQNzjORA0db+IrgOiwsOESHWPIH59vsHYgSiAecM0j9XB564iGprrrH8oYvCQTIHQ8s1lOCCyzifsgH+Y3TGC8IZgcLEJYIgHBwgmXKDVNk8qxJNgMKQglWmnkLbzLr0KniEnOizVIHWpQDDreoO9YGgrhBXpiOjXwRXA55IiEfCJT7H31Alx64bFYilIeJBEiABDpCgIKlI9R4TdQTQKeMDr9G/UFgSUCgNVhRYrTzRnfdoMfg0Fqns2VqNFYLVAauwQudPTpuxGOZnKtTnZGX/ocOHn4hEDxwsrWk+gTWFFguIDq8PicQB4jBgplHcATGCtIoU4qG69fL7D4ha4bugHCJTwmuceTNB/dJUMuLc6pF2UwwqQDJHZRrwgjldvfAfRD/JV1jyaC+eksTKgg4B6HkktVVy4zw/n11fSE4KVOwODp8JwESaC8BCpb2EuP5vZeAdqwfdqedrwY6ZGQIoYGpzN7O2mKl6C1wDoSCN0GMQEhUX662lzuGa3BuSKzggOYP8eAsF3ZPd4G+474QOTjPrtX7YUoxEs71nt9SPvUqogKBoHXEXQdRU6W+NVXIXBPKhzrCkoJgc5d1wcZgwn30/iperp56dX+wDHVaxtqaq3XDiZ1NUEhMJEACUUeAgiXqmjy6KuztsFFzCAW8vOKis0QgMHRCc/PZNNG/mijo00znHXa+ExB2g7BjJoa8ekiPhwQSzvWc31I+TR0LlrHpeuGYOQF7ax12P3coVB7s8JTHHW/Tu6KytkO0XVNFQasUrm1U9jZlxpNIgAR6IwEKlt7YaixzqwTcrBd0ZkiXdHXiCnUURcIeDG90WQrvhHGD8H3hN/Meb+1877kt5eOONXd+c/txXVPHvPtaK6O7d1Pv3nyaOt6WfZoHBAvED4ak8LKk5YK/DYbGXFu3JTueQwIk0PsIULD0vjZjiVsggE4LHVtqaqoMGDBA0tLSbNikuLBYZ+RsMyfS7Nxs9H5N9tEtZM1DEUCgtqpWdm7aIYUFJ4KWFhUs+fn5kpOTY6WjaImARmIRSKCbCFCwdBNYZtuzBPCLe/LkyTJ06FC5cOGCVFdXyTuvvm0xRmzVYp3Wi4X8mHoPgeqqap0+vluWPv+areWEkmOqNdo5Ly+PFpbe05QsKQl0iAAFS4ew8aLeQOCGG26Q+fPnS0FBgZSVl0lxYZG88PMlsmbFasnMytThBfhndMV4ReTQUMORjXlhBMdv6bJOjS4pOiml50ptthSGgm666SZ7wZKGRAuL31qd9SGBDwlQsHzIgls+IeCGhfCr+5Of/KScOXNG3nrrLSkvLzdflvKL+62mfhwUCjkU+1CxOEdptG9ycrJMmzZNnnzySZk6dSqFik/+dlkNEmiJAAVLS3R4rNcScL+0Z82aJV/5ylckPT1dVq9eLaUaSt9Fo/WTYLHO3IkUOKj6zHKEBxFtCqtK36y+JlYee+wxWbhwoUUVdu3dax9YFpwESKBVAhQsrSLiCb2VADox+LLMmTPHHHAXLVok27Ztk+LiYovQ2lvr1VS5UdfaulqprKi0laOTNUS/3xLqCOda+KygTUePHm2WFr/Vk/UhARJomgAFS9NcuNdnBIYNGyYDBw6UBQsWyKVLlyziql9+lWMYCOnEiROycuUqGT9+nMB/B8kvdbTK6D9JSUnSt29fe7fhLxUxfqujqyvfSYAEGhOgYGnMg598RgCdGTo2vGBtwS90NwXWT1VF/fbt2ydr166RzMxMsyhhfSIkv3Xorj1RL7/VzU/PJOtCAl1NgIKlq4kyv4gj4Do219Hh3U8J9amsrFSxslY2bdpk/jq33TbfnFFRT7916qhPo+i5fmpM1oUESKBZAhQszaLhAb8RcMLFT/XCYoKo18GDB80/BzOhtm7dKrt37zZfj3hdeNBvgsVP7ce6kAAJtJ0AI2e1nRXPJIGIIwAxgoUR161bJ3v27LHywal448aNUlJSEhoOi7iCs0AkQAIk0E4CFCztBMbTSSBSCMC6guGgkydPhgQKBAxWZd6wYYMJGJzDRAIkQAJ+IEDB4odWZB2ilgAEy+bNm20ICEIFn/Hav3+/DQ1hRhQS9jGRAAmQQG8mQMHSm1uPZY9aAhAgsKbAZ2X9+vVy7NgxY+H2IzgehoWOHDlCsRK1TwkrTgL+IkDB4q/2ZG2ihACECV579+6VHTt22CwhVB37XNq+fbvs3LnTd0HyXP34TgIkEF0EKFiiq71ZW58QcM628FVB/BVM8/VO9UXMmbNnz9o0ZzjhOoHjk+qzGiRAAlFIgNOao7DRWeXeTcBZUTAMBP8VrFScn59vq1KfO3fO1tuZMGGC9OnTxxxvYWkZPHiwYIozEwmQAAn0VgIULL215VjuqCYA0XLgwAHJyMiQr371q1JdXS0//vGPBYIlISFB7r//fpk+fbq8+eabcvr0aamoqLCQ9rgO1hkmEiABEuhtBChYeluLsbxRT8BNXR47dqwMHTpURowYIcuXL7elBwAHoqR///6yePFigaWlqqqq0XBR1AMkABIggV5JgIKlVzYbCx3tBOCvAsHiEqY04+VSTU2NbeIc7IfIoWXF0eE7CZBAbyRAp9ve2Gosc9QTcE62LjAcPnsFifsMawt8WfBiIgESIIHeTICCpTe3Hsse1QQgRpwwaQ6EV8Q0dw73kwAJkEBvIMAhod7QSixjhwigQ/d7cnV07976evd5t73n+Gmb4sxPrcm6kMC1BChYrmXCPb2YADpm90I1/N6JtSZEHAs/c3AMUEf36sWPMItOAiTQDAEKlmbAcHfvI4COyy0IiNKj83KdWe+rTdtKjPo1J0ZwzL3allvvPsvrz4OhMiYSIAF/EaBg8Vd7Rm1tnFCpq6uT8+fPW5RXLPyHz35OqDcCwmGxw8rKSqtqfX29HD58WFavXm0xWVoSNb2dDeqGqL4pKSmSnZ1t07mx7ec69/Y2Y/lJoKMEKFg6So7XRQwBJ1YuXrwou3btkvfee89WKj5z5ozvBQsaAdYE1L2kpMS2a2tr5ZVXXpE1a9bYZ3Tefk6oPwLoIebMokWLZN68eSZcMDOqOeuTn3mwbiTgVwIULH5t2SirV1lZmUV1feaZZyxcPVYxVo8GHReKHhAQJm4oBNFt8YqWhLpv3rTJRNojjzwijz76qAXVAw+Klmh5ClhPvxOgYPF7C0dB/TAUsmLFCvn+979vlhUMicTGxEpycrKkJCUFfVn8zkE77IYGWFKC1pQYrb911D4XbKhejVqUsDRBlb5gYbtw4YINhT3xxBPSr18/a3mKFr//AbB+0UCAgiUaWtnndcTQx4svvijbtm2zqK4pKlSG5Q+RqeMnSn5ungVNw6iIz/tun7dyM9VTx+qy8jLZe3i/vg5IeWWFFBUVyZIlS2yI6M477wwtWdBMDtxNAiTQSwhQsPSShmIxmyfw1FNPCVYkNsuKDgFMHz9ZfusTn5GbZsyR9NQ0Dgk0j84XR2rqamX/0YPy4yX/J29/sEIqL1+WnTt3yvr162XOnDmSk5NDJ1xftDQrEe0EKFii/QnwQf2x8B+GBOB8m5WRKQ8sukfuX3iXpCelSIxnfR0fVJVVaIJAQK0seXNulUp9Bo4VHpft+3YLHI+PHDkicLzG7CEOCTUBjrtIoJcRoGDpZQ3G4l5LANOYzdlUx31ys3Jk9LBRkhqXILU6PGDDQNqhMfmXQABTu5OSZfLIMTJ80FDZpoIFAgWO2BUVFf6tOGtGAlFGgIIlyhrc79Xt0ydW+lwVKBAxsRQrfm9ygYUlVsVqgk5jRkwayFPMGnLT3X0PgBUkgSghwHCQUdLQrCYJOALozPX/iEsoFxMJkAAJNEeAgqU5MtwfVQTsF3mgQScFt63TDJ4fDH3f3aCCAqNt5WqtLCYKbMpza2d23fE2lRyWMLWItencrisacyIBEuhFBDgk1Isai0XtRgIuwJiKltZ6TRM1V8+PgVWgmy0DMXovS11wHwyf1DZckTgdOImLjQzfHpSiQetWp+wR7C9OyxgZJevG541ZkwAJtJsABUu7kfECvxDAr3k4Z1bVVEvB6RKp0Pdh/fOkf2ZWix0mrjlTdkFOnD0tGersOVRjvSTFJ2pu3WAf0HudulAql3UGzIC+2ZKq9+uIQIJlpY+uuVOs9TxcWCAj1Dl1WN5AgcNqdyetgsRA1zV3Iz2hQYP97Ti4R2OmxMuEEaMlSZ2mW7iiuZy4nwRIwMcEOCTk48Zl1VomgE48tk+cnDhzWr758x/JH/zzn8sbHyyTqtoaKBlz3LwmB91fV39FXn3vbfmjr/2V/FBjf5SooIhRh08bbrnmgo7vMJGh+a7YtFb+95Vn5ZiKjQ7fR8tdrVO8l2leH2zfKNUau0R0aKiry+ytLSxDldVVsmXvTrlYeUmtKLCgXJusnuose7jouLyy8h05fLJIRJ2nu7Ns15aCe0iABCKdAAVLpLcQy9eNBPQ3vw6LVNZWS/HZU7LjwD55b9M6KVTLSWgYxnN3dKCxKiBOnDmp562RnYf2yqlzZ+WyxvzAUIuzIOA8dM6YpXJFX3gPzljxZHZ10869ep6dq9c16qg137MXz8uJU8VyWS1AEFLepLeyezW6xnuCbuNYHxUEhwqPaR13y/DBw2R4/mAJIEYNyo3yXi1jo/I2kU/wPkHfHVe/ULlDBLRMmh8EUdG50/LtXz8lh0oKpVZXzkZ5w8uKzzGxfeSGyTOl4nKlbNy5RcPsV1kbOKZhReFHEiCBKCRAwRKFjc4qhxHQDhNToPMGDJCCU0WyV6OmosNtLA2C16AD3agWg9Lyi3p+nsQnxDfqgNH5avcrAbXcBBJ0WENfARULqnT0SvTWLh+IGj0XgkGP41x74TqcqcdcwjTtOL0+PPiZ3QuF1GvgsNps0nPqtT6rt2+yek0eO1ESE5P0HvDXMY+cYHnjXXn1/arFyFsOE0sq8FA0K7cO3zRbbpQrLlbqtTZlal2px32Qp1pOwkUX6tVQXydjhgyX0UNHmBA8dLLQymA3a7ZiPEACJBBNBOjDEk2tzbo2SwAd86xJ09RXpEp2qhXithlzJUej5jaoFQIdKo7DunLxUpls2rlVBvTLlYz0DB250KELWBM0Weeu59Tp59NlF+WMippa7YjTklNkUEZf6YuFGIMnaqcNjREr5Wp1wJDSxYpy/dxH8jL7Sm5KmiToPTVHnG0pKCvcp+C9IBpq1IH2vAbIS9S8sjV/LProTShTHxUWJ0vPyJ5D+2TIgIEySl9yBVIC2kHrpu/nLlfISfXLqdbhMCuvliMrwfnlQIIF73VFF1hMUF+YGq1jycULUqo8YlXEWLlT04Pl1gwhxirU8lSmryu6Xa31PF9dI5l6vxSbpWQkQkUFwxgVM9PGTZL1amHZfWi/TB4+mnF0QoS4QQIkQMHCZyDqCVinrR3m6MHDtfMOyMHjx+Rg8XGZlzVdO3YdNtGEjj9GLRm71fpSoL/+b505V4p0GOns+dKrFgM9rp3xRR3SWKuWjFWb10mB+mRgyYD09HTtiCfL3bculPFDR0r8VVFxTPN5V31Ktu7bJad0GAoCZsSQYXLnTQvkxskzJB1WkCZax4ZbVDRUqqPq+zo0tffwXrlxyiyZN2m6Oqs2nhpsIkrP3X3skJzTiMC3zZwnGbq+EpxcUd5KFVQ7DuyRd9etkr1HDupQzGXJzMjS8k6U2+feIhNUNCRrvTFEtmPvDjmvCw3mDxws+48dlnVb1kuh+psgn2GDh8rCubfKTVNmSk56pkWZfXfjGlm9dYMcO1Egz772vO7Pknl6fMHsm3SNp9SQGEQVY1T06GJQMkH5ZKhg23/kgJzX/PqrcGpQcYV7MJEACUQ3AQqW6G5/1t5DIFUtFFO1o3664IjsPrBXZo+ZqKMaarFQsQIxUadWgrXbNtvqz7MnTZXSdRdDQ0cxKkJqVdy8vmqZvPDOUrPA3KRWmkQd7ik+d0reWrNCik4Xyx889gUTAcd16OlHL/5Ctut9pk+YLJNHj1dfmGp1UN0hP3ruaanXey2ac7OkiA4neVKDWjgCWpaL6s/yli7098o7r8r4UWNkSG6+JGKIxnMuNtHRw3qx/+hh83XJzR1g1+O8OrXOwBfnmVeWmK8NFo3sn5Utp86fk/dVcO3Usn3h44+beEpS0bJl/27ZvG+npKrF6JJadQbqjKpF824z35rtKrq++6ufyvm7H5SH7rjPWBxXv5vDurZPRWWlFBSfkDMJZ2R43iC7rxasUUlhwYE1q69atYbkD5J9KgwLz52Rftm6cCFcbRqdzQ8kQALRSICCJRpbnXVukgBWe56qImWQTlPepQ61J8+flWE6fHJFxQOm2xao4+hOnXoLS8zYISNVhLx/NZ+AxMXHyaGDx+SNVctlgq5p86XHPi/5/QfYdF4MvvzqtSXy4luvyDbt9MePHicbtIOHZeW+O+6WJz76iKQkJEmDduIFJ47Jf/zkW7Js7fsySc8brQ6yEEwQGBhmwSyhMzqE9NKKN2TZ6hUyb9pseey+h2WIDlE5a9DVQplVCEsT1Ku4OXW2RDLS0iS3rwoAFT2xanXZq3X51dIXpa9aMb706Gdl4qhxEqfCpE4tGpt2bZPvPvMjWaJlzu83QEXWKLMkbd+/R8YMGyFffPgJmTt1tiSpFQjlKlZH5G8/82N5YfnrMlgtMLfosS8+8jmZN2ue/PsP/kv+8PNfVuvJaMlS60kK/H6uDrW5suIdlqO4xEQZpIJli7I5c+GcCRWKFS8lbpNA9BJoPOAdvRxYcxLQ6cr1kqm/6KdOmCKFOoV4r1panMUioEMWcLbFkMh0PZ6tnXy9dbpXwamFpVSPDRqQL4tvWSgjBw6RBPVJQRC0dO2kJ42dZDNhSs6cUkFwRS5VXdYLYyQrp5+k6PBInF6fpOdOHjFG7l+wWAZrjBRYdHBO4GqPDafVIhUrP9cpzsvXvCd36X1+6+OfluFq6YhRa0lTCX43FXqvczp0hRguGWodgfvvFT1/hQ7ZlKv148HF98us8VMkWe8VrwxStRy3zbpR7tdVrw+qgNq0Z5v6oQRnL8Fac/ucW2S+CpEM9XGJ0/MTtD4QcY/e85BqpgbZsG2TrpZcY/FscrP6SqI6FOcorzyta6aKpnA/m8bljpF+auXBLKtLyjNGxRWHgxoT4icSiFYCtLBEa8uz3tcQMH8PFQ4z1fn27Q/eld1qgZg/daakJ6dKWcUlm27bTzvgSTpsZLOA1LJggxWwYqi4mDpyrIxQX470tAw5q51tmfqD1Kp1A9N5N+3eJqd1iOOKdu6wLkxSYYLhl+Ur3lEfkQQZrZ/76XBIjoqbu25ZJPXa4SfpcBKGSWz4REXL8ZIief39t+X5138jn/jIA/bqp/4i9RpTBZ16U5YIOOZW1lRJ5eXL0lfPTVTLCoa5zl0oUz+Rg7q68WBb5TigwuOK+rPAIhOoV0uHiq05yuGV5UvlYMFhKde6IBIthNQYDewWr0KoXv1zbMgJlh91Vp6slpeheflyDMM/ZeclS0XKFa1Hg4qjWi3jlRrdbjzCdU0bYEeyWm2qL1dJhTLUi5s8hztJgASijwAFS/S1OWvcDAHr9FVcTFTHz9FDh8uBY0fkuFpapuo04D1qZTii1oabZ8yRUXmDJUaHTUxIuLy0Y83UWUPlF+tlzY7NclzPLdX4KehuMQ/n+MliqdEO22YV6T2mqbj5zEc/IS8uf0N++ZtfSbaKFwwhDdfhkLF6v8kjxkqGDkPp+I3Eq3g4qZaZX730K0nQqcc5KppOqMPusaICyYLlxu7hCvLhe9A6pGJKy4ZYKfEqVuJVrKhDjlzQWUlll8plYu5YyVKrS4zeB346JnrwrkJpgEb87Z/dTypUjFTWaKwZPZqloio9JdWGqVB/s35A5Gj+GNaCuDtxRqMGV2kcFd3/YdJtOz946Yf7w7eCFpV65QuLV/jsqPCz+ZkESCB6CFCwRE9bs6atELCOXzvqTLVyzJ40Q3659AXZrbNVxulMmfU6lRnWihkTp9m03yodZgl1x6oM+uiU3H1Hj8hTr71gw0m4ZogKm1wdYhqqPh3F2olDdJgfipYDwuPOeQsEjq77dAbPfrViHFUH1WXrV8nLGkX3vlvvkE/eea8MyOlvIudcaak6146VLz/2OanSmUjf+cVP5bk3X5Y8FRSD++WpYAhOv/ZW0eqjO0yIQSxYiSEcVDTgRP0HRiKzLHkvDB6yY7gmVv+7eoUJCDj+4lpXf7uP5g/R0lJy57tzcF+8XPnc/jq19GDWEPyCmEiABEjAEeA3giPBdxIAAXTmam2YpULiZXUg3XvogA3fYMbMKF1/Z6IOh3g7eO23oQikRsP1v7zibTlUcFQ+//FPyfzZN2vMuET1FwlIolowEBYfwy1QAbAcwDckOI15uPmE3DTzRqnWoZvTpWfl6Zefk9dXvysjdYrzvSpcEMsFQ02feehTcovOPGpQS01BUaE5xC5d9a48fs+DkmaB4IKdP4oUSnq/ZBVHyerMWnulTl9qtVBhkZWWbhah0vILckEFUJqKNAw/oUxwfo2N0/WSEGdFfWYGqOhK0bpAcJTqvjKNvQJGEF8mZfCuVqBLaokp17wwbTotWdc8QkKdg1At/4Y+WBEbu3W/3suO6/XgEkwxtm4SAtul6xAWplO7KdhXT+AbCZBAlBLA9w0TCUQ1AXSp1nFe7Vzr6+plqM4OmqizdI4Wn5Aly5ZaZFs42w7ALBvt9HFq0DKg16o/B3w8sKjgEJ22O0tjomSqIEjAMIueCP+NQ8ePymmdLozZRjZr6O1X5Hu6DlGBWl7Qhcdrh52ZlCITdDjorkV3qXUhXgrVZwXOsQH1us3S2ChwRsVQjca8lYfUIfaGydPlzdXLZe3urVIfLFDjdtR9EBbpKSmSowsnVmk5KlXsYHior+Y3duRonW6skX3VunMFa/doPUw26HudWjg2awC9Mp2+PGrICFvkEQ7ERSUnZd/hA7qcwf9v7z2j6zyuNN2NnDMBEgABgjmBOYpiEk2KVKKyLI3l1Hbb7VlXbt9eM72m1/yZH7fnrtsz3aP2uGWP21Y7SGplUYmKJCVRYs45Z4IESAAkkfPd7z4o8BAEyEOkk97SOjjnfF99FZ4CVS927drVIBG6xIT8sDwhou8hXaK6qAHqRmrE2mwNlGcWF71nnFR4tGF3ULS6/BpwkQr1CzqqcVwqdGkKyUSMvkMkgVuKLrFhCzcTCZAACYAA/2/A3wMSgPhwFHSitO21apWYq1uGq2qr5BV1coUAmazB37DrxmNh8XpIxQacUFM10usFPTvnsFpPqtUhFcKgUq0OG/fukK+3bTDLRJXGWmnROvJVEMHKsmbnZinViLjwM2lQy8s5nfCxpdoTkyTdY5XRqmz5RO+jna1qKcH25GcefFwDsKXKWxr35eBp3dEEUdBhqfD0CX1JUCE0RHcSwXH4kkazbVGVEaXbl++ZpXFeNPbMB2s/lt269FWv4qZNnY5r9JmNuq35g7UfyTD1qZmpwghOuDgiIEaXaTZpYLw1O7fIZV0Ww1ZsiKXjGmPm9U/fV6uLyKxJ0yxWCxyM8Vy0stmvDr5HNdDe5boa5eI5OfobDTz39y/8Dwt+B1aIZYMQ/dhJFa8WnSwVaCaI3NjwnQRIIKwJcEkorIc/zDvfPhtim22ibvmN00kSSedfC10/VYO5FQ0pkN0H98tUtXyMysvHfmAVDcigRoXoWA0MFyeROtmmq7MptgK/oFaT32oAtSPY9qvLLDgo8ciZ4zI0J0/jlkyXzeqQO2v8ZAsKd0ItEu9++I6cP33SIrxi+QNiZ7OGpodvy/QJk02wQAxhazAcdlEz6m/VkPdTRoyVZ3Q56IVXX5T3NS5L/hPf051G6SZ2nLXClndUnIwaNkK2aVA6+NLM0L5gp9K00ePlmfsekT+897r8r9//b7lr8gzdipwpFyouyVcaoRa7iZ5e8bCMzi8wawnis2CHELYov/fJe7JXdz6Nziu00623aNnHtO1P3btSZmr/oDTgV5OhVpIxGsPlpbf/XdZ9/YWsmLdIHly4zIQS4qzs1wBxd02ZYduYY3W3FI4oKFNL1DA9nHGoxpaxbc1Gmz9IgATCnQAFS7j/BoRx/21S10kVk/zSuxZJvu7SwbZfCBYsZ2Tp9YcWLVOn2XxZotaIZBU1mIQhGbBkMUfD51+rqZJMDb2PZaKlGsq+Wa0UH67/XN5R/xcIjCyNPbJQhcyKeffoktFJ+XTDVxYlNleD0/30qR/IqjWrbbv0NnXqhdUjPS1N7tNtzagXUWFb1KpSrAHd0nXiR5h6bPP1iBH1G9G6lsycK1U6yV/RZRVsXUZfvJPlVXEyScv4WMs4c/6MnYScoOIMSzwPLlxqy00ffPmpfKqxXRpUCCXpLqDxo8bIw4tXyHTdwq1EtEjPic4p6o/z+LIH5YIeJfDZN1/Iph1b7V6ubnf+6ZPfk6UaowWxXlpV2CFAHbZdf0fjs8AnpUy3dcNHBoDh/zJx5DgVRI/IpNHjPH1SYXZCo+PCEjR5+gRdVkpvX37zCETvfvEzCZBA+BGgYAm/MWeP2wlgMsfSS646lX7/4adsWo7Rn5hoLemkumjmXTJfX/YPxa7rfb0JMbJcz/zBtltM6IiGm6wT/aNL7jOBcqn8slkN4DsySJdv4iOjJU8tBrPUvwXrsDiscFTeUPm/NCJumQZuq1RnVkzmmbplOQv54R+iYgXtm6tLLG0yzXxXvCPEwnqSGBMvT6pVo1Hz6hnLJrQ8gsZ6YEIAyyyI2DtBt1KfKy2RMyocxqpfSov6tMS1Lw3NUGtO6aUydXit1Yi4qZIzKFsFWiLWn6wNEGgeXq2SqlaY+SrgFqujcEVlhQqtKMnW/JkqMKKUGdrs2qCPqTAZIyPUyoKAeerLK3Fq0QKv2brUNB3LTdrUSH0OhyTuOXbIlpMmqHULfjytms+V5ekRf5IACYQrAQqWcB35UO23LfO0Cw4f+4iFlnj1tVAHEBMZ7jH4g0CYwBIBi0vnUi2miWZ2zqV4j9LJGFuNEc7ekl7D4X0t+sLEm6JLOxBECKaG3S/xOtkj9kpRXoFXfs89zwWxpRksA3XVBjjVIkpujPrcwPrj7cPinoc1I0onf4Txf2X123qA41GLxIt+wc8kQu9n6M6eTLWGaCM9HPQ6hI53sqUwGDu0L/H6NlT9YgrVCmRJ64Y1CCLKW2Bo0Zq/RRKUY6KKI9tdpN9x2dqtzr2eZasoc9hFMLvRhcNVUBV5uHpK9+0nCrUKfcvOXCRAAsFFgIIluMaLrb0FAScoPO/u2y0eaL+FnN2dCAwRAMOK9yTsSoS1A89638OE3Nx43cLgBITl0ckczrWY800Y6BsmeRMNuKbphvyeSyZwbhVADUKmVU8I9G5H+6P2huttzY0yWZeFtmg02mMnj8jFsRN1J1SuiSYsM6FdEqHB8FzSif+G8rSjJt5sycwjdFp0+QjXkLpq9/WiVKBpP11y5Zp4aW7vmZa7ef8uPe6gWZ18p5mT851ZVzASeDGRAAmEKgEKllAd2TDqFyZAEwHaZ5x5g7/YYcUwi0T7hOoLDjfpdpX3Tu/BqoHk2uX9vN3xtgToZ++Qa3jGO78V5MOPWz0DJtFq3Vmgyzh79ADGao2XYpzQDlhVNHk/790GXIdFaIxaPhA2H9YYsEWeW/XTCu3ih3c9uK1GFjugEdu1l8ycp1F+NdZNuxjURnVRwo2X0A/ww5ITzndyyX4v2vvmrvGdBEggeAlQsATv2LHl7QRSU1Mt7H2jxhip1KBmJXoycb0uZ8TCD8TMI/5Ddfvp9ua29eSZm0vpdEUnblg5xusp0yNyC9SPRq0uEAMq6Lqq74ZreFZFAQ48xKGE2D0EwWKB39qruSF/p6p9+YotzdhmjS3QMfrZoulqvb6Ui+3crbqkd+piiVxU/xwkiKlkPWgxwQWws6v8QQIkEMwEKFiCefTYdiMwb948OXTokJw5fVou6anEq7/8TIaqD8Q03RqcoluP4TDK1E5AWURp8DosxzRBdNxJ0mcj1XkYVizETenrFAmBqf81qDXH0u3Uig4rBGm9tqXkwll557P35aD6wCDFqDVp+PDhkpOjW6M5/saEP0gg2AlQsAT7CLL98v3vf19effVVuXjxojTprpK1W77RyLRXbbcOAp/F2CGCOvuFvW7BDI9fGPzQz8bjdqrA/YLhWZf3Tp91ZdzmvaN4a5hmdhe6fg4LgYgEjOMCNu/bJRt3brUDHSFQRo0aJXPmzJFBgwbZwxQtXTPkVRIIJgIULME0WmxrlwQeeughqdfIssePH5cjR46YaNmukVr36fk/sbo7JlwmK+gJWCgwkYeLUQHLWvBbqW+ot3eMNUTKo48+KjNnzgybse/yHwYvkkCIEaBgCbEBDcfuwE/hgQcekIqKCnn55Zfl6NGjKmDqpEHjjOAVLsnjHOvZVmwB2sKl49pP9D1GxWlebp488ugj8vTTT0t+fr4JlnARrGE03OxqmBKgYAnTgQ+1bmdlZckPfvADGTZsmKxatUoOHz4s1dXVtmXYnEtDrcOd+oNJGY6xWBKLVl8QBHMLh34DA/qaqKI1TwXK8uXL5cEHH5ShQ4daIL5OmPiVBEggiAlQsATx4LHpHgLuL+iMjAybrObOnSsnT56Uc+fO2VJROEzcYHDlyhVbEsvLy5OCggITMOHwOwLLyuDBg83JFn2HxS0cxjwcxpZ9JAFvAhQs3jT4OegJxOruEExaubm5YWNdgWUFacuWLbJ3714pLCyUxx9/3CwtuO4EHT6HWoIwQf9gZcEyGL67a6HWV/aHBMKdAAVLuP8GhGD/3V/XmMRCebLG0LkJGktBO3fulLVr10pSUpIsXrzYlkWQJ9QZeHMIl/6in0wkEG4EKFjCbcTDoL9ugnaTeSh32Ymz8+fPy+bNm+Xs2bP2fuDAAbMyhZPzrRv3UB5v9o0EwpkABUs4j36I9z1cJjAsCW3fvl327dtny2AHDx6UHTt2yOzZsyU9PT0sLCwh/qvM7pEACSgBz8llREECJBB0BJx15erVq7Jx40ZzNIZIq6qqkk2bNllcmnCwMgXdwLHBJEACPSJAwdIjbHyIBAKHwP79+2XXrl1SU1PTYU2BPwsccOHbguTETeC0mi0hARIggTsjQMFyZ7yYmwQCikCDHvgIawrOUoIocTuGSkpKzOoC3xaKlYAaMjaGBEighwQoWHoIjo+RgD8JuKWe03rgI7Yzl5V5TinGdSwL4R1+LbC+OBHjz/aybhIgARLoLQEKlt4S5PMk4CcCTpTAyRbJORnjOhKOKMDSEHxckNx1+8IfJEACJBBkBChYgmzA2FwScMIDZydhK3NlZaVkZ2dLfHy8wUH8mZycHDtbB864OBDSPUN6JEACJBCsBChYgnXk2O6wJQDxgRe2MZeWlsqyZcvkkUcesQi/gJKYmCj33XefHQBYV1dn/i3wdUGicDEM/EECJBCEBBiHJQgHjU0ObwIIBge/FLxwSvWsWbNslxCsKS5NnjxZnnrqKXO8xVIRDoJ0FhiXh+8kQAIkEEwEKFiCabTYVhJoJwARMmfOHDs/B0IEW5g7W09wGCQETWNjo4kV5+NCiCRAAiQQjAQoWIJx1NjmsCcA8ZGcnHzbHUAQM7SshP2vCwGQQEgQoA9LSAwjOxGOBJwvC/renfXEO084MmKfSYAEQocABUvojCV7EmYEIFLc61Zd707M3OoZ3iMBEiCBQCPAJaFAGxG2J6AJBJrFAo63ECSd/VfwHffce6CIFl8EVkD/ArBxJEACfiNAweI39Kw42Ag4AeDEgXv3Zz/QBuwa6pycQHFt7nzfH9+dWPF+90c7WCcJkEBwEqBgCc5xY6sHmAAmfvfCIYPYJtzc3DzArbi5OggWCAAEj2tpaTHxgu/Xrl0TnCMUExNz80N+uAJRlZSUZI7C0dHRHSILbWUiARIgAV8IULD4Qol5wpoARAHECs7rQdRYnM9z6tQpqa2tDQAubYLItidOnLT2QRhASK1Zs0YuXCixe9p8vycIp7y8PBk/fry98NntXqJo8fvwsAEkEBQEKFiCYpjYSH8RgFiBADh27Ji8+eab8u6778rJkycFkWPb2lr91awb6sWED+sK2on2NjTUa8j+TbJt29Yb8vntiwqmiMgIiY6OkUGDBsmSJUssCu+MGTMkJSWl2x1OfmsvKyYBEghIAhQsATksbFQgEMDkj9fx48fll7/8pQkWLL0gBepKhu4b0vj7YgIGIiZQEhZ+IPKqqqrkpZdekrNnz8ovfvELWbBggSQkJFC0BMpAsR0kEMAEKFgCeHDYNP8RgFBBwknHr7/+ullWysvL1fciQtLTUiQ3d5AkJSdIZAApF9dmDzVsefZ88vdPoKxvaNQlqwopL7+qkXcbZO3atZKVlWXLRBMnTrQmcmnI3yPF+kkgsAlQsAT2+LB1fiawfft2+fTTTzv8Q3JyMuX+++bLiuVzJT8/S31Ebt6h4+cmB1z1ra1t6hRcLRs37ZW331mny2tnpampWdatWyeLFi2S0aNHS1xcnFmzKFoCbvjYIBIIGAIULAEzFGxIoBGAoy3O6IHPii4OSXxsnDy8cqH8/OfPyKhReRIdpS0OECtGoLHr3J7W1giZPafYTpJ+4YXXpeTCJTtpeufOnXLvvfdKQUEBl4U6Q+N3EiCBGwhQsNyAg19IQF1AdA0Df+nj0MCLFy/aFmH4hcC6snDhNBkzerBmapLWZuTrLTFXgGcJqrelBerzbWplyUhPlCXfmilr122RCxcvG2fsvLpy5YoJFsc9UPvAdpEACfiXAAWLf/mz9gAmAAuL23mDZqamJkpKapK1uK21xURNu6tLj3txXa6EtmAxT2BpksE5qZKZmapixSMMIQqbmpp6zI8PkgAJhA8BCpbwGWv29A4JwMqCuCY3+FXoRGsiQ+/dcP0OyvaIHM8OpOsypf0T6uy92cbn1pijLqq2/vj8WM8ytjPTHvbseT5FAiQQ1gQoWMJ6+Nn52xHwFiXXxcXtnur+PgQCBEtrm07aESqG8KYKCCFdsGwSIRpRVzPgmnfd3ZfYuzttbXoOkdYaiTUvJhIgARIIYAIULAE8OGxaiBHwmFakTdVI5ZUGOXuuUiorKqSpuVGjvibpUkmGDM1Pk7SUGJUQAyMgrlU3SmNTm6SlxkhcDC0fIfYbx+6QQEgRoGAJqeFkZwKZACRIs+6WOXKsRD75aLNGoj0slzW2S3OTCpZEFSwZqTJ71ni5d/kcGV40WGJjLAxcv3UpQs07GzfskXMl5bJs6SwZNixbTT/NA2LZ6bdOsWASIIGQJUDBErJDy44FEgHbARMZJQcPnJZ/+Ze35cyZUpmr23wfmTBfkhJjNE5JrWzddkhefe0z2X/glPzsZ4/KpOJh6s+CBZvryS0peZxYPcHhuls66i4vDD1YcsKPvftP6dlIZ2TatAkqWHLMruPq834edXRXz/XW8RMJkAAJ9B8BCpb+Y8uSScAIeMRKpIamb5JV73ypBydekJ/85DFZrpaURBUriJ7brFukH3hwoYb/XyO/+/178s7bX2pgusdkUFay+ra0mlhAADZYadrU/wW+JxAd8HmJ6OTz4oQGpI4nL7SJfWvPC0kC1eKRPa3qQOMp2S7pMx4/GzxhvjaaNTLy5no4vCRAAiQwkAQoWAaSNusKSwIQGRERURp7pFwOHjotxcUj5e67J0lSgkaea21U1aAHA6p4yEhPkCeeWCKHDp+SAwdO6gnMpSpY0lRAeASFOciqSqmqaZZrVU0SFxspaWmx7b4nnQ5i1Poam1rl6rUGqa9v0fN6osxPJRZ+Kp0ObcSuJPwHuQKxYp7AujsKz125WqcnVYseRxAjifHR1hY46NLaEpa/yuw0CfiVAAWLX/Gz8rAgAMWiYqCmtl6tLM1qVUlQ/5QoFQItJgAgFSzp94y0OHng/rtl/76TkpQUp0+pWjBLSIRcqqhVIXNKdu8+ZJFik5MSZey4ETJt6hgZVpilAgYZ9XRpPfPw5Oky2bnziBw+fELPQ6qS9PQUGT9+pMycNVaG5mVIlIkUrVXbBo1iTTQ7S4Se+9Mqx0+UyI4dhzSM/mn1sWmR4cPzZerU8TJmTL6kq1OwPQQTDxMJkAAJDBABCpYBAs1qwpeAZ15vlYyMZD3wL0X27Tuq/irHZc6ccZKcHKVzv0doYGdQa0uzzL97gsydO0Fi8a+zDQHqItU6c0X+/dU18vmarRKrN9LTk6WkuULWr98ro0bmy1/86EEVLiP1uIAoOXDwlLz44ody9Ng5ychMkhgVR2fOXpLPP9+ulp3J8uMfPygjinKcSmkfGI9qadIdQ998vVf+9OeP5PLlqzIoO1Wi1Nqyd+9JPVNpqzz17aWy4t6ZkpoSa89Ts4Tv7zV7TgIDTYCCZaCJs76wI2DLJ2o9GZKTJkuWzJDf/J9V8qtfvSwHD86UsWOLVBRkS3Z2ui7/JOghgNESrf8qY1UPeIKzREh1bZO89vo6WaVT2S4AADlnSURBVL16oyxdOlMeeWSRPpMhLS2tsuGb3fLr37wtr776uQweMkhS9QTpf3/1MxUopfLTnz4sc2ZPlCg99KhBT0t+4421smrVehk3dphaZJZYPdcHo00iY6Ll4J5j8sc/fqhWnQj5z//pP8jE4uEWPO+sOgn/2799KP/+yie6mylFliyebA7B15/nJxIgARLoXwIULP3Ll6WTQAcB3SSkyz13qQCIllXvfqEC4nMVHW2Sqss1sJJMmjhcRo8ZocsuBRrCPskOV4yKjpPde4/q+Tvb1CIzUX74w4cld0ialqnrPrpW9NDKBXLu3GXZsmW/nD5Vrr4miVKulpF7750ty+6dozuQVPnACSUyXlasWKgWmT16mOM5qa1tVP8XPWYADrb6H8pqamxRK8w2qatvVqfgp2TxPdPVuqL1aJ6c7ByJjUuRv//738vGjXtlxozRdjaQO6Kgo5P8QAIkQAL9RICCpZ/AslgS6EygrblVUpLj5NHHFujSTLEcPnRG9uw9rtuKT8jxY2dl5/YDEhkVrcKkWJ588lsyeXKRJKrI2b7toPmlzF8wXYVMirQ01ZtfC4RETFSMniB9txSr2BlWmKFWk0h5+unlUlQ0xJxyq6vrVIC0SE1NrZw9W65+NLV6qGODnZHkKcT0iBYVJWVl19Tqc1pycwfrklOWLild1i406wu+KhFq/Umye6dPX5Tz5ytVsOgOJhVO9GTpPNL8TgIk0B8EKFj6gyrLJIGuCKgvCBxcY6PapGDoIBk6NEcW6OnPEBEVl6/IkSNnZd26XfLFl9v1lOhy+Zv/+xmZMnmEnDl9XlKS4lUsZHnUhcqEyI6Y/i1SqA63BYXZumW5xYwpSclpGkW3XMvaLRUamK6qqlGqa+rl6NGzuqW6RGZMG9HeOo+rrRaq31WwlF6V0tLLUlFxQhobqlQ8YdcQskKStEqU+sfsVYGVkZ6qJ1jXeB7DfSoWQGIiARLoZwIULP0MmMWTgCNw5uxlEw5D8zPN1wTX49QiEpeeqH4hyTJiZIHMXzBDhr+UL7/73Sr55ptdMnZ0vp5m3KLLQxESo3mRbtpSbNuUPeLjalW9fPLJNvnyyx12JlFqSrw6+6ZJXl6O+bMcPnLKyvDkvlFpNDQ2q+WlRYVJpESrPwsOfvTkwyOIyiIyadJoGT0KS1ap2hDEh7Hi+IMESIAE+p0ABUu/I2YFJACRESkfvL9etu04Jn/5l4/I3Nlj1KcWyy0qCdQhFzM/AsMlqSVl0aLpKjo2SImGzG9QsZKmTq7nzper2GnwHIyoZg+IFgtIp+VeunRVY7aUSH7BENm+46j8+aXVMnXyaF0a+pbk5g1Sp1s9JyguQS5cKJcPV3/tJUK8RwZ1x9qW61mzp6jD7hP2HTuXTJVonRAvVyp1OUqXgQbnJKhFB1uuqVi8KfIzCZBA/xHw/MnWf+WzZBIIewIWOVaFRZs628JH5IguzTRBq+CMZAgBvNQxFoKlVd+xowfXsH0ZTrejRhZZDJcTx8+pL4vm0W3Q2CEEh90WiZGvNxyRf/n1e7J7z3HZvv2gJCbEysOPLJVitYZkZSVKuh5smKjOMFeuXNWYLNVeGgNiBJoDYeNaZciQDN1plKXbmSt1SahOMtPjNdicBpxLidJloFgNdBct677YqMJrnS4bVWu8O0/7w36ACYAESGBACFCwDAhmVkICEWpVKVY/lEz58ottGtTtlNQhyK1EqwCJUbmgwdhU0JSV1cjHuqRz9WqtbnkuVCfdWJmlwd6yB6WqT8pWFTwQLfqM5o+IjlVflysqInYY3nxd9kFqVDVkjrWNraIHQZtfy/lzFbLm850aEO681hdposfC+2t+i3Qb0WLHAEybMkodgM/IJ59ukXIVJS0tuntIY7PU1bfJpk0HVKx8qf4r1SqAENSOiQRIgAQGjgCXhAaONWsKUwLmc9LWrLt+CjWGykKNc/KRPP/8n2XZMj0heXiBBnZLUDJtKgQqZdPGPbJBT1CeO3eizJs3WbcVt8qY0UMs9srvf/++vPDCG7pleZ5k52SrkKhTEbNZnWlPyXf+wzKNZFsok4+NVEG0U1579WN1tq2TpORkaair1m3P+zT4XIX6smTLsaPnZfeuo3K3lq9eKCpeULsuM+mZRt/SOC8IPPfuqrW6zblRJk8ZZ9uwy0rL5B29lqTRdZctm2M+N1jKusmfJkzHmN0mARLofwIULP3PmDWQgCoCzxLPY48t1CixyfLue1/J6xrIzVxA1LoRoctDLerKEhcXK/fdN08ef3yRDC/K1uc8TrD33zdHhUOExm5Zp4HnXrfty3CQjY+L07yL5f7759p5QfcsnqpxWcrko482yYH/+aLEx6slRi03Q/MHy7e//S1ZuGiKvP3WGtm985BuhR6pu5UGS0tzs1pyEqS1uUlGDB+ikXAflj9rpNsPP1wvqz/6yiwwcPwtKMiV739/hUyfMcKWkDisJEACJDCQBChYBpI26wpvAuqfgjgsD62cp9aTYjlz5qIGcSuRyivXbMtwXm62jB5dIPlDs9UPRf9pqlgxJxM1gSTo9wcfnGfnBu3bf1wdci9JamqyTJw4QkaNypeEeA3xr4Jj0KAUdep9SB13p8nBAyektq5BxUqOTNB8eeqA26x57pozVncdRZqPykMPzNQlpKkqohJVhHhiqkwqLpT/8l+eNX+bY+pv06CWlgJ16MWhjbm5GuslEiYZOtyG9y8ze08CA0+AgmXgmbPGcCagoiVaLSVDNFrtEJ38Z8wcb5YVVQsW2VZ3EqsW0EMRsdzixQlOuTG6tXn4iMFSOGyIPROl36M0pkubnj9kF7BzSPMl66GJM2aMkSlTxpj/CkL9R+rSUquacGI0lsqwolytREWH1hEbB98ZrUmfa8PaEJJez8pMthOl58yZbA7Arow2LQP5uBTkQcWfJEACA0eAgmXgWLOmYCdg83n7pN6bvqh1olWj3mKrM7QCArRZUiHQqjt/VE3cJAgsh4oMLNtEqpTxPIPvml+vOwGBfCYqVFjgRGaIGjipoFxbftLsra1Nns+o1IQLPtyYWnWZCGVCXFlSQeMpQ3cUtV+68Qkfvxk+++HjA8xGAiRAAh4CFCz8TSCBASfgJn1sZ75x8vYIj+4VAe60YTmm/THL30lBuK835UM/tYDuS78OwlOGZ7u1u+pEkfves3c0HC3wpRU9q4FPkQAJhCYBCpbQHFf2qg8JeE+tmG49sVN0yvW+0cP6eiICfH3G13y3anpflIHyYcjRLUlq3VERpILLww7CzfO6VRt4jwRIgARAgIKFvwck0A0BTKY4PycCjiWaamrrpVq3CjfrpBuhM3CbW2bp5nlevk4AQgUcL5dXyZWr1/SGx0QUExNjjK/n5CcSIAES6JoABUvXXHg1jAlAqHgizcZKdna2JGssk5qaGo1jclk2btorM2eO0+3A2ep/AmtBGIO6g65H6KnTlVfq9IyjPRo35ly7lSXS+Kanp1tJfWXNuYNmMSsJkEAQEaBgCaLBYlMHjgAECybQ4uJiKSoq0gi0ZRquvlHeeWedxKlVYKkGT0tNS7E8A9eq4K2pqbFBtumxAS+/vNq2c0PoDRqUpYcpTjLRQrESvGPLlpPAQBGgYBko0qwnqAi4CXT69OmycOFCOXLkiJ7Fc0UPELws//r7VfLBh1+rYEmwYG7OATaoOjjAjcXBjWVllcoQy0EIkBcnixcv1ng08zTGDCL9wieoD5yCrCT+IAESCEUCFCyhOKrsU68JYPKElSUrK0uefPJJKS0tlQ8++EAn3Eqpra2TY8fPBORykLMM9RpAPxQAppHqx5KYmCDTpk2TZ599ViZMmECh0g+sWSQJhCIBCpZQHFX2qU8IONEyefJkee655yQlJUV9ML4w8VJVhYMBEYk2sBIEC1KgWSvQHlhS4BMEqxXECixXcLoNtLYG1oiyNSRAAo4ABYsjwXcS6IIAJtNoDfM6ZcoU+bu/+ztZunSpHiS4RU6dOiX19fVdPDHwl5xIadQQ+li2SkxMNHEVSNYW7LYaPHiwRuCdYctAhYWFFCsD/6vCGkkgqAlQsAT18LHxA0UASxmYcFesWCH33HOPNDQ02JLRQNV/q3qcYDl+/LgeeviRngo92awXeCaQrBewpkBM4d21+Vb94j0SIAES8CZAweJNg59J4DYEYG3BhIutzoGQvCf+r7/+WtauXWttW758uSQlJVkTA0m0oL14BVKbAmEc2QYSIIHbE6BguT0j5iABI+AmWW+R4G80ri3Xrl2TzZs3y969e81ReMmSJebY6u/2dVW/49jVPV4jARIgge4IULB0R4bXSaAbAoE44R44cEC2bdsmEC47duyQPXv2WAyZ2NhYWjO6GUdeJgESCC4CnpjjwdVmtpYESMCLAJxtN27cKIcOHTJxUlJSIps2bZLz58/b8ouzwng9wo8kQAIkEHQEKFiCbsjYYBLwEHBC5PTp07YcdPHiRbvR2tpqO5n279+vhw3qiYNMJEACJBACBChYQmAQ2YXwJQBBsn37dsGSkBMwoIHIvLiObc5I3vfsAn+QAAmQQJARoGAJsgFjc0kABJwAgSDB8g+sLLjmrtfV1ZnV5dixYx3XSI4ESIAEgpkABUswjx7bHrYEnDjBrqDdu3cLBAqcgXEd73jB8Rb34ePCRAIkQALBToCCJdhHkO0POwJOlDQ1NZmvCpZ/EEkWwe1cwu6g8vJys76cOYNzj65bX1wevpMACZBAMBG4/n+4YGo120oCYUwA4gPp6NGjtpU5MzPTQt7jHSk+Pt6OEkAYfIgZWFqam5vtHn+QAAmQQLASYByWYB05tjtsCbiln5MnT0pOTo48+uijeoJ0rfzyl7+UsrIyi8T7yCOPmIjBCdOwtFRXV0tGRkbHklHYwmPHSYAEgpYABUvQDh0bHq4EIFiwO2jixIkyYcIEycvLk9WrV5vfCpjAAoOw/HfffbeMGzfOxAyWjPAcEwmQAAkEKwEKlmAdObY7rAnAX6WoqMgYOIuLWyrCxZaWFhMoBQUFHZ8tM3+QAAmQQJASoGAJ0oFjs8ObAEQKXhAmt0oQMRA3tK7cihLvkQAJBAMBOt0GwyixjSTQDQEnXLq5bZcpVm5Fh/dIgASChQAFS7CMFNtJAiRAAiRAAmFMgIIljAefXScBEiABEiCBYCFAH5ZgGSm2s88JeDup9nnhA1jg7fpxu/sD2NReVcWlrV7h48MkEPQEaGEJ+iFkB3wl4D1xe3/29flAzOdLP3zJE4h969wm1w+8u8+d8/A7CZBA6BKghSV0x5Y98yLgJjm8I4YJwtqHwqSHviDGCiLZuv7gHf2rr6/vuOaFIig/wrqCfkZH3/i/LFpdgnI42WgS6BGBG//196gIPkQCgUvATeI4HPDChQty/vx5i/xaVVVlk3ywT3joX0xMjIXor6ystIGAWNm5c6e8+eabds8xCNxRun3LME6JiYkWrXfIkCGC+DKpqan2YLCP4e17zxwkQAIgQMHC34OQJ1BaWiobNmywaLCbN28WfIf1AdaJUEgakUWaW5qlqbHJ4q00NDTI22+9Le+9914odM/6AFECYZaeni7FxcWyYsUKWbZsmQwbNsyuU7SEzFCzIyTQLQEKlm7R8EawE4BlAeLklVdekd/+9rd2WKBe0kk92HvWffshXpCampvs1X3O4Lxz9epVOX36tGzcuFH27dsnf/VXf2XHE/DogeAcT7aaBO6EAAXLndBi3qAg4JZAampq5O2335Zf//rXcuLECbM+REdH6dJCksTFxevUHlrKRV1R9SCh9iHSroVO/zwqE0tdtbXV5p+D5a+XXnpJEhIS5K//+q9tiQjjTktLUPwTZSNJoEcEKFh6hI0PBTIBN3EdOHBAVq1aZWIF7U1ISJKxoydI8cTpkpU5yELWw+LCFOgEPEKkuqZajhzdL3v27ZArV8oFghTLXjNmzJAnnnjCloYCvSdsHwmQQM8JULD0nB2fDFAC+Csb/ilbt26Vw4cP21/dUVHRMmP6XHn26b+SieOnS0x0DEwQTEFEAEL0Yuk5ee2tF2X1x2/J1auVcvLkSRvnRYsWSW5urvWGVpYgGlQ2lQTugAAFyx3AYtbAJ+CWg+BUe+rUKamoqLCtvbCofGvxg1I8YYaKmTapb6jn8kHgD+cNLcTY5ucVybIlK1WI7pOduzfb2EK0wFcJgsVZ1254kF9IgARCggAFS0gMIzvRmQAEC7Yuw+8BafDgXBkyON9ESltbsy0HdX6G3wOfAMYzIz1L0tIyOhoLR1wsDzGRAAmENgFGug3t8Q3r3nkvDUTqklBkZFS7IyrXgoL1FwM7vDxuR/qz3f8IVhX3CtZ+sd0kQAK3J0DBcntGzBEKBOhdGwqj6NUHik4vGPxIAmFBgIIlLIaZnQQBTnH8PSABEiCB4CVAwRK8Y8eWByABLE3o+sQdtay3yxm29NWFGutJuT155sbO3nn/b3ye30iABEigawJ0uu2aC6+SwB0RgGi4oFtuT5w8LIka72X0yPGSnJyu2qXr8P/I39DYIMdPHJTL5WUyvGi05OUWSmTEnf8NgbD8EEmI9mrh4rTspqZGOa5twTbggvwiKSwcqVu5Y83Xo6uORUZGSnnFJTl2/KD6+kRo+yeoY2tmt/m7KgN9OldyWp2dr8rQ/OGSmpIWMscfdNVfXiMBEhhYAnf+f8eBbR9rI4GAJwCrRJQ69O7dt1X+9wv/j/zLb/9fOawBzmD0wL3OyfKrE/CFC6flz6+8IP/jf/1X2bp9vbS2tNzRVmsIhJbWFtm7f5vsO7BdGtq3auN6fX2txip5Q/77//e38ta7f5bKynITQ121x7Vv246v5flf/Td58U//LGfPn2wPrHdz+11+73eUi5OUt+/4Rt5b/YqUlp03J+db1ef9PD+TAAmQwO0IULDcjhDvk4CPBBobG01wlJSckYOHd0tNXXW3k3arCo2Dh/dIyYWzWnqbnhzd1L7pxbO2gzD7nSd77++ezx5LylfrP5Yvv/5EajRsPcQKEp6HlaVNWtVqckBOnzlm1+ym1w+Ug6B6FZWXZf8BjSB7VePW6H8tarXxlHVzO7wetzZ6twtnGDWq5ai1G8uS97P8TAIkQAJ3QoCC5U5oMS8JdEPArB06yaekpkmmBqk7eeqIXLx4TsXAjf/EMLlHa5Td8ooyEzUpqakaHybPSvVIDY9VJkKXhrDEg6UalI137wP+oEui9Boi9kZaviizcLhlIRQI0ZCfV6iHIDbKkWP77BwebO32FhjIh7KPHtsvly5f1KWcAklOSlbLDZay9DQibQfK785l2VP39T626yU6OAMsEwmQQJ8SoA9Ln+JkYWFLQAUEJnmP/8o4jbB7WY6pf8rw4WM7rB6ODUTMERUI5eq7MnH8VDlz9oRaNFpUE0CyeM7NaVIrBXxbyi5dkLraWjvkLycnVwZlDZbY2DiL1ltVfU0qr+JMHY9lpaysBI+r70yaCQ2UWVg4Qs/YiVULy3HzsRk9cqI+q3W1J4iV+oY6PZ9nmySqUJmQM1WFS6m0aV/QHCwtNTU3S0J8ggkt95x7r1Z/FVhkEuKT9JI+0L6C5NtCkiuF7yRAAiRwewIULLdnxBwk4BMByA34oYwZXSwHD+02q8bsmQslM32QwDHWWUrqVATs0+WXpKQUGT1qgi0LOedcWDSqq6/Ilu1fqV/L1xpy/oKKhnoVHdEyKHuwLJi3TGbPWKinTcdZng2b1sm+/dvN+nLlSqUe7lgs9y1/QlKSU8ySkpiQLOPGFFtZcKgdUXRdQDlrz5mzx23JaNzYSSZKLpaWmO7AUhGWtuBIPGfWIhlWOEqXrjz9QF+wjPWFLke1tbXIwgUr7BRsiBcmEiABEugPAtdtuf1ROsskgTAigEkc1ojsQUNk7JhJ6lR7Vs6eOyERuusGyQmEc+dPqQg4IsNVPOBsHDjO2vKL/oT1Y8PmNbL6kzfVUpIqTz72A/npj/+zPP7o981p9v3Vr8qBQzvNmBEfFy+pugQVp++xsficrs+kSLQu4TjZACvLWBUs6emZJqDgowIH4evLQm3qtOtx2B0/doo97ywwWD7Crp/d6kyM52CNQcKz6CsOmDx4aI8cOLi7w+G3o2LLyR8kQAIk0HcEaGHpO5YsiQR0Mm9Va0e0TJo4w3b+HD66T8aNnWxbijHB4/7+gzvVIbZBrSETJUm3QLe2qL8InF91lw18W7bpTpuRI8bJ9575jyp+cs2qAVEyvGiUvKA7kGD1mDRxpiycf5/MnX2P7kr670b+h9/9uWRm5FjZEBiw1sAKkqXLSGNGFatFZr0JqKzMbGlr9uzquXqtUg6p8++gQYOlqGiMLRs5sYNC4SdjRxqoQOkq4TIEGf5jIgESIIH+JEALS3/SZdlhR8CzVNJsyydDBheoJeWQ+YSY06xaLK5cKVeLxC7Jyc6VEerfYo4iMEu0z/c43G+UxnCZN+ceXdZJk7q6GvUxqZXqmmtmRYnWWCo47A87cbCTB/chSvCqq6s1fxQII48zie4RghCKjJYJ46eZRQS+M57tz3DijbY4MGWXL9gyFvxjbHeQa4yWguchsrDUox9vSjj52oQYTSs3seEFEiCBviVAwdK3PFlauBNQ4YFJPzkpVcarZQXbhbFjCJM6LChHdYvxJXWkHaPWlaxMCARP7BUInRZdThqckyePPPSsCYjLam05c+6knNDnYZXZvPULKSk5q8LB4ymCZ2BFQTJLh/6IxAev5BFQTVKkgeOG5OTLiROHTUBhp1JjY70528KJd5wuYcWqc26bChAmEiABEghEAlwSCsRRYZuCloD30gh8QjZsWmNbhqdNmas7fRJl996tEqc7bsaOnmS7d5yzreswBAd2+0CgnL9wRqPGXrPtyvHqZAvHXU9+iArfhAXkC0RRSkq6WlmmytovP1QBdFgtQCM1ONxZE1DDCkZaZFqzzNyod67bWnyrznWD7yRAAiTQ5wQoWPocKQsMdwLOqpE3pMCWhk6dOapWjQsSp5YM7LgpGjZaBUKRbR12rGA1MSfX86fkjXf+zbZFT50yR6ZMmi3paVm6hDREl30aVczolmP3kNd7V9fcbbunP+BL883Gz9t3Ly1Q35W9tow0bsxkswjB2dZbcLnnO947iRnEeXGC65bPdRTADyRAAiTQcwIULD1nxydJoBsCnh00cJSdMG6qWkt22Rk98DOp1/D5EA5YMsLuIIgbT8LyToRs2bZezpeckWee/Eu5e+5SeI6YhQRipuTCKfNdsSdUgFzf6QMXmAjPUlEXjiYoFxFoc1VAFQwdIefOndKjA/bqbqPd6qSbbQ6+sOx4l+c6hmcRk8WsL1iKai8f151/TbxajK6bYtyTfCcBEiCBviVAH5a+5cnSwp2ATuSW9A1LMThEMEe3OW/ftUG+2bRWBmtU25EWTE7/6ZnpA1IDvieRKmCa7QDChPhE2xoNQYOgcXDExa4i+LKU6nJRpDrRwmEWogFCATV6LDTt0XDtihsIlI7t0q2CbdDFE6ZpWY3y6Zp35ZyeFwRfGmzDRltdYSjX85ToMlaS1KpjL3YdIUVHR6kvDiLwRnmC0V08b+3QeLx23/PmnvZc4k8SIAES6AsCFCx9QZFlkAAI2ETvQYEpGz4nmbqFeML4KXL48D6zssC51e3GcXM8nsDSCkQAdg9duVKhvi5b5LKGykcU2qvXKlTwfKPbnb/W84KqpEoDy1VUXlKR0WwxVWJViCAi7oFDu1SEnNJToOu16HYB4WmOiQoTUKMmakyWLPlq/afmYAsLEGK4YHnHaS1rD2w7ak3JzxumXyNlx66NckrPI6qprbHIuoeO7JGtaI/uXsKhhzw7qB0030iABPqNAJeE+g0tCw4nAlgpgWUkI2OQRaFF3zHh42yf8SoKCrdvkBaNCAtnW+zKgZUDCffT0tIlXh1yITJmTJunjrAH5fN1H8hxDS6XnpZpIgWCZNLE6bJsyUrZpY67m3THEILBpaVm6A6fybb7588v/0aKNc9jD39P25FpgeRgWfGcBeTZvQQBNX6cCqgj+/RYgGkeXxovsRKvfUCZ8LeBz0yRRredO3uRfLn+E/k3PcU5XwPdITZLqfrk5OUOlTlzFkpSYrIZi9BfWGQQwA67kJhIgARIoC8JULD0JU2WFZYEsIQCh1VYT1JS0qRw6EhbgsF1hLLPzx0mDz3wbV32iTKB4LFm6BKQWkgy1NrxrXtW6vsgs7IUDB0uTz/5I9m4eZ0cPLhHnWwv6FbnXFl6z0Myc/p8PcCwRoVCoYqEFJU3MJBGqKBYrMs6g+XI0QMqFjI0RH6i7UCar2H8se4EfxmEz0dePDNj6jxJTU7XcgqsHM/uIN1WrctCEFRoU+6QQmt7XFyi3Hfv41b+1m3f6I6nAyqwMmTK5Flyl9ZbojuZsGSUpEcAoD+w2ORr++Abg2MKmEiABEigrwhQsPQVSZYT1gQw6SMQ3GhdcoFzLSZ/CBZYHbDkMmfWYuMDywpinXhETqsGh8vQiX+JCRw8h3KwzbiocLTFSUE5cN6NVp+VRvVlwdlAKx94xhxhG7UsiIR4FRVTJ8+VWXrGEJ5HYDgIqDl6jhES6kSANyesBms8lqH5w/VZBJxrtjyQMtYHjXY7asQE6wPKQPtTktLUsvOwLF5wv7apUfsTa6/mJgiuQZ5+ar0oa9SI8ba05fqCOplIgARIoC8IULD0BUWWERQE4ONqfq791FqPcyyWenT6v2GiblMRUWe1eq5fn8Thu+K5d/0ZlIMEnxa8IFqcsMD1mppGK9+V5crAycqubty7XueNrmoQORAUyOMpA6V6kqfu631AN+D829LgEWA4dBHCBgcyIuF57CLSD/bdPe8p93o/7SZ/kAAJkEAvCFCw9AIeHw1sArAODGS61STtItJ21Z7O95yIQPvxwnd3Dc9H6NJS54Qy2jVDx63O5bobnctz1/HuqedGoeFdt7MceV/zrrir573L74vPHa3Dh4Ed4r5oPssgARLoIQEKlh6C42OBTcDO7lGHVjexNujZO9gajBluoIVMb0kFWnv91R5XL5aq8HIJu5TgvMxEAiQQ2gQoWEJ7fMOud06gxMfH6y6adNux09jQKKWlJXqmz1HbsROnPiWY/FzesIMUpB12guV8yWm5XF7W0YuMjAx1dlYn5M4mpo4c/EACJBAKBChYQmEU2YcbCGBig2AZM2aMBmobrOfxVElF+WX5bO37tu24eMKMjq3HNzzILwFNAMtR2JW0+pO35aRu+cY4w6dm3LhxkpubS8ES0KPHxpFA7wlQsPSeIUsIUAJz5syRWbNmaSj6c9JQ3yB79m7ToGzlMlGjvWZpPBL+RR6gA9epWbqIZxuysX36oB4ncOz4YdsJBcEyfvx4ueuuuyQrK8ue4ph2gsevJBBCBChYQmgw2RUPATdpjR07Vp544gn9a/yk7NyxU31YmjSc/Al7ueUFMgseAhhXjJsb37y8PHnyySdlxowZwdMJtpQESKDHBChYeoyODwYDgaVLl0p1dbX84Q9/kN27d8vVq1dtW66b9IKhD2yjh4ATLAkJCVJYWCjf/va37ZWd7bGWcUz5m0ICoU2AgiW0xzfse5eUlCRPPfWUDB8+XN5//33ZtWuXLgtdMdHSGzjYhYRYJDU1GuVV64DPDOKThGsCj2vXqsyKlZqaYucL9bUVC4IEnEeNGiX333+/LF68WM9qyjTkFCvh+pvHfocTAQqWcBrtMOur+4sc217nzZtnSwclJSVy6dKlGwKx3SkWTMQxMTGydetW2bZtm8yfP1+Ki4utzHCcOB2Pjz76SMAXVq2hQ4f2uSULbNPS0qSgoMB2BblxC0fmru98J4FwIkDBEk6jHYZ9xWSGFyZV7CiBpQWv3iSUBX+Y9evXy759+2TBggUybdo0C1ffm3KD8VmwAF/sxHrttdfk6NGj8thjj8ns2bM1Si+C2XWEeevT7rl6+6v8Pm0sCyMBEugTAjfG7O6TIlkICQQeAUxsbnLDZNfTF7bWIp05c8YsLFhi2rx5s1kWUCaWhXpadjA+5/oL/6CdO3fK9u3bZceOHSZgwMnd78u+odz+FEMon4kESCDwCNDCEnhjwhb1IwFv4dKbarAUtH//fjvnB5M0PsMRNNwmUvBsaGiQjRs3yuHDh02o4POyZctk5syZJhKdUOwNbz5LAiRAArSw8HeABHwkACsBJl/sNIL/yunTp+37kSNHzKqA60jIFw7J9RMctmzZImVlZcYDFpY9e/bYyc7hwIF9JAESGBgCFCwDw5m1hAABN0Hv3bvXBAp2CMGigndYFeC/EW4JSz4QKwcOHDChBkF34cIF44GAfUiOW7ixYX9JgAT6lgAFS9/yZGkhTMAtf2zatMmWP9BVNxnDhwMvLI+EQ3L9rqysNB8e+PS4a+i/95JZOPBgH0mABPqfAAVL/zNmDSFAwE3Gp06dMosCtkYjuesXL16UDRs2yNmzZ2+4bl9C+AdEGhyP6+rqrJewuCDB2gTfHsS8YSIBEiCBviBAwdIXFFlGyBOAMMFkDN8V7+UPXHdOpfDdcI64IQ9EO4jAeVgKgw+PE27oN3hAwMASFY7LZOEw9uwjCfiDAAWLP6izzqAi4ERJRUWFWVfgm4HAcU6o4B3fcWYRRAuWSZC8J/Gg6vBtGuv6dfz4cRNwcDZGcD6XoqKijAccb8Npmcz1n+8kQAL9Q4CCpX+4stQQJAAxcvDgQZkwYYJFzU1NTbVeIjQ/tvCOGTPGJujOFocQRGFRfRF/prS01KIIjxs3zkQK+pqTk2PX0tPTzb8FQo6JBEiABHpL4PqfRb0tic+TQIgSgAWlublZ4KcyceJEeeCBB+TQoUPy/PPPmzUlJSVFvvvd78rgwYNl3bp1Ul5ebvFZvK0OoYLGWZtwoCQsK4sWLZKHHnpI3nrrLYHFpbGxUYYNGybPPfec+a/AvwXbncePHx8qCNgPEiABPxGgYPETeFYbXAQwEeM8ouXLl5swgdXALY3gHVaWhx9+2CwvCNsPgYNlolBLbhkM/XrwwQclKyvLzvV57733OnjA1wfWp5UrV5qgwZEITCRAAiTQWwIULL0lyOfDgkBiYqKdEuxESnedxsF83pN6d/mC/XpGRoZgyQcJAq1zAgO8cG4TYtUwkQAJkEBvCfD/JL0lyOdJQAm4CRowbidqQgWY66d33737hutILp/3PX4mARIggTslQAvLnRJj/rAl4Cbg2wHwNd/tygn0+90JFe92+5LHOz8/kwAJkEB3BGhh6Y4Mr5MACZAACZAACQQMAQqWgBkKNoQESIAESIAESKA7AhQs3ZHhdRIgARIgARIggYAhQMESMEPBhpAACZAACZAACXRHgIKlOzK8TgIkQAIkQAIkEDAEKFgCZijYEBIgARIgARIgge4IULB0R4bXSYAESIAESIAEAoYABUvADAUbQgIkQAIkQAIk0B0BCpbuyPA6CZAACZAACZBAwBCgYAmYoWBDgplAuES3vdUYkcGt6PAeCZBAbwlQsPSWIJ8PSwLek7P357CEoZ3uzIDnB4XrbwL7TQL9R4BnCfUfW5YcogQwOSckJEh2drY0NDRITk6OxMfHW287T9whiuCmbqWkpEhubq7U1tZKZmamxMbG3pSHF0iABEigNwQoWHpDj8+GHQEnSCZPnix/8Rd/IVeuXJHU1FQZP3582LKIioqS+fPnS1JSktTX10teXp4MGzbsJqtL2AFih0mABPqUQISabtv6tEQWRgJhRMD983FCJoy6flNXw5UFRNof//hHef/9983iNnHiRPnhD38okyZNMtHG342bflV4gQR6RICCpUfY+BAJiLgJ2rEI54kpnFmg71VVVbYc1traasthsLpxWcz9y+A7CfQNAQqWvuHIUsKIQOfJ2bvr4SBavPvvS3/vNL83z0D7fKu+uHtdMbnVvUDrI9tDAoFKgD4sgToybFfAEXCTjntHAzE5eX93je5q0nL3QuEdfb6TPsLy4PK792DkgH7jFRl54wZL79+BzmzwPVT6H4xjxjaHDgEKltAZS/akHwlg0sGrqalJKisrpaysTK5evWrfo6OjzfF28ODBtkMmJibGWhLME/OtUNbU1Mi1a9ckOTlZsDuou36CFxLyX758WcAFO4iwwyoYE/qDfqM/aWlp1n/Xj+4Y4H5jY6NUVFQInJMzMjKMg3uO7yRAAr4ToGDxnRVzhikBJ1YgUnbs2CEbN26UXbt2ydmzZ21XDHwVhg4dKtg5dNddd8nMmTMF4gUT1K0msmDD6QTI3r175fPPP7e+Llq0SCDYkLz76phB3K1bt07Wr18vU6ZMkeXLl9suIu+8wcRh+/bt1pe7775bFi5cKHFxcd023/E6dOiQOeQOHz5c7rvvPhMtnXl1WwhvkAAJdBCgYOlAwQ8kcDMBN/GeOHFCXn/9dXnzzTdNpECgYBdIYmKi1NXVmQUBu0Q++ugjWblypTzzzDMyevRom8yDdXK+mYbnyoEDB+Tll182p1JM3E6wuPyOGawqq1atkt/97ncm4ObNm2cWGZcvmN6d+KiurjbxcfHiRRvfwsJCE2rdjXFzc7N89tln8qc//Ul+9KMfmYgNpn6zrSQQSAQoWAJpNNiWgCLgJqkzZ87Ib37zG1m9erWMGTNGnnrqKYs7MmTIEJu0sUyEyXnDhg3y2muvySuvvGLxWZ577jkZNWqU9am7CS2gOuxjY+C/AeuRtwXJ9c+JldLSUnnjjTeMxaBBg+Sf/umfpKCgwCwSLq+P1QVUNljPIFT37NkjR44cEQiWrpLzWQGHnTt3WnDBWbNm2dIh8gczg676y2skMBAEKFgGgjLrCDoCTqzAX+Gdd96Rjz/+WObMmSN/8zd/I8XFxR39QT5YGCBeHn30URk7dqw8//zz8umnn1rwNPxVnZ6ebv4voTJJoc+YkB0jwPD+XFJSIi+99JK89dZbZoXwFm4d4ILsA8YOfUQ0XyxtYVlw3759AgsTrGy419X4QtgcP35cZs+ebSy6yxdkONhcEvALgRtd3f3SBFZKAoFHwE3AmJQ+/PBDgZUAwcAgVnDPTTyYpNxEhWsTJkyQH//4xwJ/hQ8++MD+unZlBV4v+65Fjsnp06flX//1X826Ap+ev/3bv5W5c+daRd6s+q7mgSvJjTmsLIjmC38W9Ler8UVfcWzD1q1bzUkbVhmI2mBnMHC0WRMJ3EyAguVmJrxCAjaxwP/g66+/llOnTsnSpUvNqRZoMOl03taKa26JZNq0aQJnVDjlwjkXkVCRuprY7EaQ/0C/YHE5evSo/OpXv5L33nvPRAqsUbBGIIFPsCfXBxzDMG7cOLOwHDx40Pru3Tc3zvi9wfhj2QjRb/H7wUQCJNBzAhQsPWfHJ0OUgJtwcE4QLCw4I2fq1Km2lRVddhNXV93Hs9i+63YKYYcI/BhcmV09E6zX0Ce8IFYwcf/zP/+zrFmzxsTdz3/+cztfKZT67cY9KyvLxhfblXfv3m1bnTGGnfsKsQLRgt8F+DK554N1vNluEvA3AQoWf48A6w84Am7igdBA/Aws72AJANfdvVs1GhNTUVGRPXPu3Dm5dOnSrbIH5T3HAQ7HcCr9x3/8R9tFNWPGDPnJT35i/hroGCxRoThRQ4SMGDHC+o4dZI6He4fv07Zt2yxOD6xMWFJECkUW1jH+IIEBIEDBMgCQWUXwEcDE44KEIdgXAqQh3W7CcfcRWAzOmOXl5RZgLvgI3LrF6CcsDJiUsQNo06ZNtmMKy1/Y+tvS0nLrAoL0rhtfWEzgr4SdQvv37+/orxMs7jqWj5CPiQRIoPcEKFh6z5AlhCgB+LBg4oXvQWefldt1GfnxggUC5YRawsQNp9N/+Id/EFgYvvvd78rDDz9swgU7hM6fP2/izk3godZ/iFEECcQ7ln6wrd31FUtkcLaFDxMsTrC2OaETahzYHxIYSALc1jyQtFlXUBGIj4/vsBrAmnAnCTtEIFbg/4JyQilBiLmAaNg19bOf/czECkRKbW2tbQHPz8+X733vewJ/D0zkoThhT58+3awn2LoMh2NEN0bCkQ1YJsPRBfB98tU6F0q/I+wLCfQHAVpY+oMqywx6AphgsRSUmpoqiGoKX5ZbJUzK7oV88FuB0252drbFYbnVs8F2D2zwwk6ZX/ziF/L4448bJyx9YOs3dsW8+uqrFvW3qqqqw/IQbP3srr1OfCHaMfxT8PuB6L8QtbgHR+3Dhw/bPcTlYSIBEugbAhQsfcORpYQQATch4S9mONvCtI9lj+6WdrBsBGuK89uAcIEPAxxuMalBtLgyQwETljyQHnjgATsbB1Yk9A+v+fPnyw9+8ANj8Yc//EG++eYbi0cCJqGU0B8sFSIgHKxIWB5DwDz8jmzZssUEK6wrCDSHFErjH0rjyL4EFwEKluAaL7Z2AAi4yQXWFZj9MTkhsumFCxfss5uwcR0vBA/DYYDur2wXph87RRB/A4IFyZU7AF3o1yrQZywLYckDh/+hX/iOhEl8xYoV8p3vfEdOnjwpL774ojmlOlb92rABLNyNJQLCYYyxLIQt7LC24IBMCF1YX3AwJhMJkEDfEKAPS99wZCkhSACTLA7sQ0h+7ILBqcMIv4+J2k3AmLhcwDQIk6efflpw9tBXX31l/gv4CxxxWUItuf579wssIObgs4HzluDT8vbbb8sf//hHWz6Cg6qb6L2fC8bP6AcYZGZmmqjFeEOwYpcULHILFiywrd2h0t9gHCO2OfQIRP03TaHXLfaIBHpHwE00mJAwCeNgQ/glYDLOycmRhISEjskXvi6wMOBUXogaTF6YzHCOECYuJ1hcmb1rmf+fxq4YWJwQ0RdiDmcpub7hHX0HJzjeYlkM1qeRI0daLBIsHyG5/P7vTc9bgH6iH3jBqoK4PfgdKSsrk8cee8wCxrn7Pa+FT5IACTgCFCyOBN9JoAsCmHDgh4JJGaIFlhY402IXEHaDYPkHsVYgamBZQVwSTFgLFy40Z1T4MEDMoJxQSYjuChYQLLAggQ2S66N7h7CDmMNSCc5VggVquAbhw64plycUmECcIdIvxCqsLAgqB8GC/qOfodTXUBgv9iF4CXBJKHjHji3vZwKYaPBXNHxZnn32WbMQvPbaaxbR9Y033rAJGBYD+KrAdwGiBbE54MeB5RDkwecxY8aYL0OoTFywLkF8OGfbrobB9RVOuBB1OARx9erVxgJWmVAQce73A0uE6Cdir+AaTnAuYuyVrn4teI0EekWAFpZe4ePDoU7ATbxwLsW2XVgUsG0XkzZ2hGBnECYsnEyMv6p/+tOfysqVK82X4ZNPPjHrA5wy4b+B5MoLZm7oA3ZQwcIC51KIsu76hXtYDkL/YakaNmyYPYvroZQQ2Rjh9xEobtmyZcYFTLrjEkp9Z19IYKAIROhfkKG133CgyLGesCLQ+Z8JrClYFoJoweQLQYN35MMkBasLfFkgbGbNmmWiJlSAebPwZUL2zg8GvjwTbKy8+xiK/Qu28WB7Q5MABUtojit71U8EMDHhhUnJe2Jy11Gt9/WuvvdT0wasWNfXzgxu1QAXoyYUloK66qdjgnt3wqWrsniNBEigawIULF1z4VUSuC0BTFLeqbNQcfc7X/d+hp9JgARIgAR8I0DB4hsn5iIBEiABEiABEvAjAUa69SN8Vk0CJEACJEACJOAbAQoW3zgxFwmQAAmQAAmQgB8JULD4ET6rJgESIAESIAES8I0ABYtvnJiLBEiABEiABEjAjwQoWPwIn1WTAAmQAAmQAAn4RoCCxTdOzEUCJEACJEACJOBHAhQsfoTPqkmABEiABEiABHwjQMHiGyfmIgESIAESIAES8CMBChY/wmfVJEACJEACJEACvhGgYPGNE3ORAAmQAAmQAAn4kQAFix/hs2oSIAESIAESIAHfCFCw+MaJuUiABEiABEiABPxIgILFj/BZNQmQAAmQAAmQgG8EKFh848RcJEACJEACJEACfiRAweJH+KyaBEiABEiABEjANwIULL5xYi4SIAESIAESIAE/EqBg8SN8Vk0CJEACJEACJOAbAQoW3zgxFwmQAAmQAAmQgB8JULD4ET6rJgESIAESIAES8I0ABYtvnJiLBEiABEiABEjAjwQoWPwIn1WTAAmQAAmQAAn4RoCCxTdOzEUCJEACJEACJOBHAhQsfoTPqkmABEiABEiABHwjQMHiGyfmIgESIAESIAES8CMBChY/wmfVJEACJEACJEACvhGgYPGNE3ORAAmQAAmQAAn4kQAFix/hs2oSIAESIAESIAHfCFCw+MaJuUiABEiABEiABPxIgILFj/BZNQmQAAmQAAmQgG8EKFh848RcJEACJEACJEACfiRAweJH+KyaBEiABEiABEjANwIULL5xYi4SIAESIAESIAE/EqBg8SN8Vk0CJEACJEACJOAbAQoW3zgxFwmQAAmQAAmQgB8JULD4ET6rJgESIAESIAES8I0ABYtvnJiLBEiABEiABEjAjwQoWPwIn1WTAAmQAAmQAAn4RoCCxTdOzEUCJEACJEACJOBHAhQsfoTPqkmABEiABEiABHwjQMHiGyfmIgESIAESIAES8CMBChY/wmfVJEACJEACJEACvhGgYPGNE3ORAAmQAAmQAAn4kQAFix/hs2oSIAESIAESIAHfCFCw+MaJuUiABEiABEiABPxIgILFj/BZNQmQAAmQAAmQgG8EKFh848RcJEACJEACJEACfiRAweJH+KyaBEiABEiABEjANwIULL5xYi4SIAESIAESIAE/EqBg8SN8Vk0CJEACJEACJOAbAQoW3zgxFwmQAAmQAAmQgB8JULD4ET6rJgESIAESIAES8I0ABYtvnJiLBEiABEiABEjAjwQoWPwIn1WTAAmQAAmQAAn4RoCCxTdOzEUCJEACJEACJOBHAhQsfoTPqkmABEiABEiABHwjQMHiGyfmIgESIAESIAES8CMBChY/wmfVJEACJEACJEACvhGgYPGNE3ORAAmQAAmQAAn4kQAFix/hs2oSIAESIAESIAHfCFCw+MaJuUiABEiABEiABPxIgILFj/BZNQmQAAmQAAmQgG8EKFh848RcJEACJEACJEACfiRAweJH+KyaBEiABEiABEjANwIULL5xYi4SIAESIAESIAE/EqBg8SN8Vk0CJEACJEACJOAbAQoW3zgxFwmQAAmQAAmQgB8JULD4ET6rJgESIAESIAES8I0ABYtvnJiLBEiABEiABEjAjwQoWPwIn1WTAAmQAAmQAAn4RoCCxTdOzEUCJEACJEACJOBHAhQsfoTPqkmABEiABEiABHwjQMHiGyfmIgESIAESIAES8CMBChY/wmfVJEACJEACJEACvhGgYPGNE3ORAAmQAAmQAAn4kQAFix/hs2oSIAESIAESIAHfCFCw+MaJuUiABEiABEiABPxIgILFj/BZNQmQAAmQAAmQgG8EKFh848RcJEACJEACJEACfiRAweJH+KyaBEiABEiABEjANwIULL5xYi4SIAESIAESIAE/EqBg8SN8Vk0CJEACJEACJOAbAQoW3zgxFwmQAAmQAAmQgB8JULD4ET6rJgESIAESIAES8I0ABYtvnJiLBEiABEiABEjAjwQoWPwIn1WTAAmQAAmQAAn4RuD/B4yraexiarj1AAAAAElFTkSuQmCC)", "_____no_output_____" ] ], [ [ "# Function for scaling the dot product attention \n\n# That is passing in the query , key and value to our attention \ndef scaled_out_product_attention(q , k , v , mask):\n\n '''\n Calculate the attention weights\n\n - q , k , v must have a matching leading dimensions \n - k , v must have matching penultimate dimension, i.e seq_len_k = seq_len_v \n \n The mask has different shapes depending on its type(padding or look ahead) but its broadcastable for attention \n\n\n Args: \n q: query shape == (..... , seq_len_q , depth)\n key: key shape == (..... , seq_len_q , depth)\n v: value shape == (....., seq_len_v , depth_v)\n\n mask: Float tensor with shape broadcastable to (.... , seq_len_q , seq_len_k) \n\n Returns: \n output , attention_weights\n\n '''\n\n\n # The first matrix mult for between query and key \n matmul_qk = tf.matmul(q , k , transpose_b= True) # (... , seq_len_q , seq_len_k)\n\n # Now scale the above matrix mul output \n dk = tf.cast(tf.shape(k)[-1] , tf.float32) # getting the last shape index of k, and casting into float32\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n\n # Adding the mask vector to the scaled tensor \n if mask is not None: \n scaled_attention_logits += (mask * -1e-9)\n\n # Apply softmax \n # Softmax is normalized on the last axiss (seq_len_k) so that the scores add upto 1 \n attention_weights = tf.nn.softmax(scaled_attention_logits , axis = -1) # (..., seq_len_q, seq_len_k)\n\n # Now the matrix multiplication between our values and softmax applied attention weights\n output = tf.matmul(attention_weights , v )\n\n # It returns the scaled attention weights and the attention_weights\n return output , attention_weights\n\n", "_____no_output_____" ] ], [ [ "As the softmax normalization is done on K (keys), its values decide the amount of importance given to Q. \n\nThe output represents the multiplication of the attention weights and the V vector. This ensures that the tokens you want to focus on are kept as-is and the irrelevant tokens are flushed out.", "_____no_output_____" ] ], [ [ "def print_out(q , k , v):\n\n # Applying the above dot product function\n temp_out , temp_attn = scaled_out_product_attention(q , k , v , None)\n\n print(f'Attention weights are: {temp_attn}\\n')\n print(f'Output is (matmul of v and attention weights): {temp_out}\\n')", "_____no_output_____" ], [ "# Using the above function visualizing our attention weights \nnp.set_printoptions(suppress= True)\n\n# Creating the v and k \ntemp_k = tf.constant([[10 , 0 , 0] , \n [0 , 10 , 0] , \n [0 ,0 , 10] , \n [0 , 0 , 10]] , dtype = tf.float32) # (4 , 3)\n\ntemp_v = tf.constant([[1 , 0], \n [10 , 0], \n [100 , 5], \n [1000 , 6]], dtype = tf.float32) # (4 ,2)\n\n\n# Creating the query, and its aligned with the second `key`\ntemp_q = tf.constant([[0 , 10 ,0]] , dtype =tf.float32) # (1, 3)\n\n# Using our print function \nprint_out(temp_q , temp_k , temp_v)", "Attention weights are: [[0. 1. 0. 0.]]\n\nOutput is (matmul of v and attention weights): [[10. 0.]]\n\n" ] ], [ [ "### **Multi-Head Attention** \n\nThe multi-head attention consists of four parts, \n- Linear layers \n- Scaled dot-product attention \n- Final linear layer \n\n\n**Inputs of multi-head attention block:**\n- Each multi head attention block gets three inputs, [Query, Key, Value]. \n- Then these are put through (Dense) layers before the multi-head attention function. \n\nThe (Q , K , V) are passed through seperate linear (Dense) layers for each attention head. And we specified number of heads as `num_heads` time as outputs. \n\n**The output shape will be** -> `(batch , num_heads , ....)`\n\n**Scaled dot product attention**\nThe `scaled_dot_product_attention` function is defined above is applied in a single cell. An appropriate mask must be used in the attention step. \n\nThe *attention output for each head* is then concatenated (using `tf.transpose` and `tf.reshape`) and put through the final `Dense` layer. \n\n\nSince its a multi-head attention so we want to get the N number of heads Q , K and V are split into multiple heads because it allows the model to jointly attend to information diferent representation subspaces. \n\n\nAfter the split each head has a reduced dimensionality, so the total computation cost is the same as a single head attention with full dimensionality.\n\n\n\n**Reading List** \n- https://data-science-blog.com/blog/2021/04/07/multi-head-attention-mechanism/ To know how the multi-head attention works (in and out)\n\n\n", "_____no_output_____" ] ], [ [ "# Coding out the Multi-Head attention!! \n\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n\n def __init__(self , d_model , num_heads ):\n super(MultiHeadAttention , self).__init__()\n self.num_heads = num_heads \n self.d_model = d_model # the dimensions \n\n assert d_model % self.num_heads == 0 \n\n self.depth = d_model // self.num_heads\n\n # Creating our key , value and pairs \n self.wq = tf.keras.layers.Dense(d_model)\n self.wk = tf.keras.layers.Dense(d_model)\n self.wv = tf.keras.layers.Dense(d_model)\n\n self.dense = tf.keras.layers.Dense(d_model)\n\n \n # A function that will split the head for N number of times \n def split_heads(self , x , batch_size):\n\n '''\n Splits the last dimension into (num_heads , depth)\n Transpose the result such that the shape is (batch_size , num_heads , seq_len, depth)\n '''\n\n x = tf.reshape(x , shape= (batch_size , -1 , self.num_heads , self.depth))\n return tf.transpose(x , perm= [0 , 2 ,1 ,3]) # perm helps us to structure our elements inside the tensor \n\n \n # Forward pass \n def call(self , v , k , q , mask):\n batch_size = tf.shape(q)[0]\n\n # Getting the k , v and q \n q = self.wq(q) # (batch_size , seq_len , d_model)\n k = self.wk(k) # (batch_size , seq_len , d_model)\n v = self.wv(v) # (batch_size , seq_len , d_model)\n\n\n # Applying spplit heads , depending upon the number of heads \n q = self.split_heads(q , batch_size) # (batch_size , num_heads , seq_len_q , depth)\n k = self.split_heads(k , batch_size) # (batch_size , num_heads , seq_len_k , depth)\n v = self.split_heads(v , batch_size) # (batch_size , num_heads , seq_len_v , depth)\n\n\n # Now we're onto the scaled attention, where we wiill scale our attention weights \n \n # Lets look at the weigths\n # shape of scaled_attenttion_weights.shape --> (batch_size , num_heads , seq_len_q , depth)\n # shape of attention__weights.shape --> (batch_size , num_heads , seq_len_q , seq_len_k)\n\n # Getting the scaled attention weights and attention weights value from the function \n scaled_attention , attention_weights = scaled_out_product_attention(q , k , v , mask) \n\n scaled_attention = tf.transpose(scaled_attention , perm = [0 , 2 ,1 ,3]) # (batch_size , seq_len_q , num_heads , depth)\n\n \n # Now we got the attention weights lets concatenate along the num_heads so we will concatenate all the heads \n concat_attention = tf.reshape(scaled_attention , shape = (batch_size , -1 , self.d_model)) # (batch_size , seq_len_q , d_model)\n\n # Atlast putting our concatenated attention into a dense layer \n output = self.dense(concat_attention) # (batch_size , seq_len_q , d_model)\n\n # Returning the output(scaled one) and the attention weights \n return output , attention_weights", "_____no_output_____" ] ], [ [ "Lets create a `MultiHeadAttention` layer to try out, wehre at each location in the sequeunce it runs all 8 attention heads across all other locations in the sequence, returning a new vector of the same length at each location. \n\n", "_____no_output_____" ] ], [ [ "# Dummy multi-head attention \ntemp_mha = MultiHeadAttention(d_model = 512 , num_heads = 8)\n\n# Dummy input\ny = tf.random.uniform((1 , 60 , 512)) # (batch_size , encoder_sequence , d_model)\n\nout , attn = temp_mha(v = y , k = y , q = y , mask = None)\nout.shape , attn.shape", "_____no_output_____" ], [ "out", "_____no_output_____" ] ], [ [ "### Point wise feed forward network \n\nIt consists of two-fully connected layers with a ReLu activation in betwee. ", "_____no_output_____" ] ], [ [ "# Creating a function for point wise forward network \ndef point_wise_feed_forward(d_model , dff):\n return tf.keras.Sequential([\n tf.keras.layers.Dense(dff , activation= 'relu') ,# (batch_size , seq_len , dff) \n tf.keras.layers.Dense(d_model) # (batch_size , seq_len , d_model)\n ])\n", "_____no_output_____" ], [ "# Applying on a sample data \n\nsample_ffn = point_wise_feed_forward(512 , 2048) #(dimensions and dff)\n\n# Passing a dummy data \nsample_ffn(tf.random.uniform((64 , 50 , 512))).shape", "_____no_output_____" ] ], [ [ "## Encoder and Decoder \n\n- The input sentence is passed through `N` encoder layers that generates an output for each token in the sentence. \n- The decoder attends to encoders output and its own input (self-attention) to predict the next word. \n\n### Encoder Layer \nEncoder layer consists of sublayers:\n- Multi-head attention (has padding mask) \n- Point wise feed forward networks. \n\nReach of these sublayers has a residula connections within and its followed by a layer normalization. Residual connections help in avoiding the vanishing gradient problem in neural networks. \n\n- The output of each sublayer is `LayerNorm(x + Sublayer(x))`. \n- The normalization is done on the `d_model` (dimension) the last axis. \n- There are N encoder layers in the transformer. \n\nLets code them!\n", "_____no_output_____" ], [ "![Screenshot 2021-10-04 at 6.00.52 AM.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARwAAAGLCAYAAAAVly0bAAABSWlDQ1BJQ0MgUHJvZmlsZQAAKJFjYGASSSwoyGFhYGDIzSspCnJ3UoiIjFJgf8rAxcDCwMogzyCUmFxc4BgQ4ANUwgCjUcG3awyMIPqyLsisW4ujdWbPsdryhvvgc8+rzZcx1aMArpTU4mQg/QeI05MLikoYGBhTgGzl8pICELsDyBYpAjoKyJ4DYqdD2BtA7CQI+whYTUiQM5B9A8gWSM5IBJrB+ALI1klCEk9HYkPtBQFep9S8QAX3cCMTc1MPAu4lGZSkVpSAaOf8gsqizPSMEgVHYCilKnjmJevpKBgZGBkyMIDCHKL6cxA4LBnF9iHE8pcwMFh8Y2BgnogQS5rCwLC9jYFB4hZCTGUeAwN/CwPDtkMFiUWJcAcwfmMpTjM2grB57BkYWO/+//9Zg4GBfSIDw9+J////Xvz//9/FQPNvMzAcqAQAaGZibBy3TXsAAABWZVhJZk1NACoAAAAIAAGHaQAEAAAAAQAAABoAAAAAAAOShgAHAAAAEgAAAESgAgAEAAAAAQAAARygAwAEAAAAAQAAAYsAAAAAQVNDSUkAAABTY3JlZW5zaG90HTndwwAAAdZpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDYuMC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iPgogICAgICAgICA8ZXhpZjpQaXhlbFlEaW1lbnNpb24+Mzk1PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjI4NDwvZXhpZjpQaXhlbFhEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlVzZXJDb21tZW50PlNjcmVlbnNob3Q8L2V4aWY6VXNlckNvbW1lbnQ+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgoqtq9DAABAAElEQVR4Aey9BWBcx7k2/IqZmSWTzBwzYwyJEzsOMzZp2qRwU7h/uV+b3kKggQYajhM7ZIqZGSWDDDKJLFmyJVksywL/zzOrXdFKWsnWWjCTyLt7zpk5M++Zec7LY3MNRa6j5Ofny7p16+TTTz+VQ4cOCX8XFhbKdTZ7HT3SVTUFNAWuhwL29vbi6ekpAQEBMmnSJHnqqadkwIAB4ujoeD3Nqro2rQWcyspKOXHihLzzzjuyZs0ayczMlKKiogYdsrGxQee9JDAwULy9vcTXz08NxM7WDtdeF9Y1uJc+0PEoYGNjK6VXrsjx48fk6JEjagC2trYSHR0jw4YNEzc3N7y8qjrewNptj22koqJCLl++LJcuXZS8vDzJzs7B79wGPeba9fHxkaioKHn44YflkUceUb8bXNiCA/YtuNZ0aUlJiWzevFlee+01iY+Pl5ycnDocDTvq6+sr/fr1l1tuGSF9+/aRSHTax8db3DGBHBydxMbUmv7SlSnAuXIpO1vefvttSTh6VM0jOzt76d6jh7zwwgsSHh4uVVUacG70HLl69aoUFxdLLoDnfFqaHDt2TA4ePIjPBLl48aK6HaWU3NxcKSgokL///e/qmpdeekm6d+8ufCm0prQYcIiI3333nQKbM2fOqE4bb0xWjJzM2LFj5dZZs2XI4MESEBgkri7OwnO2duRqdNEUqKGAXfXEdXFxMR0EBokT2Hc/P38JCgqSCnDTurQNBapA26FDh8q06dPA8WTL4cOH5L/vvy+HwW0WAmhYyBFlZGTIN998oySZ3/3udzJo0CBxcHBocadaBDhEu88//1z+/e9/S0pKihAljYWs16hRo+X+++9TXA3lP3bIBhPqGt5QREsOThdNgfoUqMLcqK/zo2aRohS5Gz1v6lPsxv52ADPg7e0jXlB9REVGQl8zUDasXyeLFi2SxMREKSsrUzekGLZt2zb5yU9+Ii+//DLW+S0t1utYDDhkq5YvX67AJikpSaEee0HOJSIiUu5/4AFZuHChREZEQGRyNIHMNQ0yN3Z26NY0BW4wBQj2xnVKJqFXz54SHhYGBmIURN3/yNq1a5Suh7clDlD0+u1vfyv//Oc/lTLZrgWSi0WC2BUo9bZv3y6vvvqq1AYbssGDBg2W3/zmN/KDHzwr3bp1UwDEN1L9N9YNppFuTlNAU6ANKKDABwDk5uomQ4YMld///vfy3A+fl5CQEKG+jaW0tFT27t0rf/7zn+X8+fMtWuvNAg5Z2uPHjyuwOXnypImzIdgMHDhQfv3rX8u8efPAjnmYuJo2oINuUlNAU8CKFKiCOEuACQOn88Mf/lB+9KMf1wEdKpw3bNggb775pon7saR7zQIOzd2U5fbt22eS5ZycnKR///7y0i9+IVOmTFG6Gs3RWEJufY2mQMehgJHboU/OY489BuB5XoKCg02cDn3uqNPduHGjUAqypDQJOGSdKEp9/fXXSnZjgzSHRUKx9IMfPCdTJk8Rym8abCwhtb5GU6BjUoAqEvpDPf7443L33fdAwextGsiFCxfk9ddfV0YkS9wXmgSc1NRUWbJkiaSnp5tuQOvT/PkLZO7cuRpsTFTRXzQFOjcFyFSQ03nu2edk+PDh4urqqgbM43FxcbJs2TKzjr/1qdIo4NC5b8+ePbJz506T3sbd3V0pkh566CFxd6cHqPYUrk9Q/VtToLNSgP44kZERSrQKDg4xOf9Rn0O1SxocCJvjchoFHGqfv//+ezgDXVL0owIpGPLbvffeq6xRvLkumgKaAl2LAlz3kydPllmzZimOxzj6U6dOqZhKgk9TxSzg0KGPFilyOEbEIjs1GJ7DU6dNlUrtat4UTfU5TYFOSwFKNfTVeeDBByUYpnL64bFQ37tixQoVo9XU4M0CDsMX9u/fr9yYWZncjR+CLmchXMHP1097fjZFUX1OU6CTU4BcTv9+/WTc2HFKmWwc7uHDh1VAd+0IBOM546dZT+NsBNMRcMrLy9V1DEunZYoxUlpvA0sdLHP2+CMQa3oYp1LrPmn1pGt9/WBAW1sbsceblG/T+udad6euW4vzlKUcQHEjwkQ454kJc+bMkVWrvpcCmMepzWXoAx0C6aHcWCqLBoDDtBP0vTl37pzqJP/xgDg1aPAQCQ0NNSmQTSe70BcCjS0eHvVajK49iTiT/HwEuFF5rsPfWzUTuBiKYaCIj48z1acYz/nHCHIvLy+TWG+6QH+xmAKkryeccnsiXKFf3/4SGBSo5vD16mAZUDt06DCJio6WbGSLuIp4KwIRc2JRvOJzM1caAA4DtWgONyqLWYnWKXoVE7WMgVzmGuvMxwg2uSAsFenfffetCvGggowArUvrKcAFQYDhvDJyi1wMSUnnkNTtE2E0ufF46+/SdWuSvuQQ6UcTExMjs2fPUZEBKgofdG5tYexVQIC/9O7dR47j5UvAYeGLmA6BbJ/3rl8aAA49Bul3wyAtFnbW28tbYnvFdtkHT7BhDNnbb70lK1euUPl/iOJ6IdSfTjfuN90y+KfLjaEAF38GnPQY/X004ag8i9jHvn37tlpioQhFhTHTVKxYvszUSTIqzKfTA/mMzAV1NgAcvrUJOEbrFGVoehYGIM+N8Zip9S7whWCTBo7vjTfekG++/kolJNJA0wUefCcbIudsCdZ2GkB8OZz0KisqVZoJilqtFa+IB9269aijr+FLgt7H1P9aBDjUMNNKZSxEMbJjHhCrmLekKxW+FYqQn3nVqlWybOl3irOpPX5X0MXP11sYW6aLpkB7pEBZ2VVIK4VKzGH/CDzM0Lke+W5oCHoe8VEuri6t4tYJOP5+voa8V1grbJtAQ+VxY8xJAw6HgGMUp9hBAg7dmPl3IzTcbLOjFCYPoyi1evVqU9pF9t3T010G9O8tCxZMhgwbBcBpeeazjkID3c+OTQECzqnT55HLapscOHAUa7tIAQPFnu3bt8nESZNkNKxKreFy6I/n6+tTB3AINEzU11h7DQCHKFX/Ynt7OzRq3+B4x34UTfee3M1VPKxjx45LQkKC6Q3gjpCOkSMHya9+8TAU6cz/Q/N4023ps5oCN4sCXM8jbuktA/t3k9f+vUTWrd8lpSWlai2fPXtWDh44IMORrN6cgteSPjs7OzVwWyB+8L7milnHP3MXdrVj5G6Ki4vkzNkzYEGzTcMPDPKT+XdOBuj0EWdwNrhMF02BdksB+jNxng4b1ktunzsO2TlDTH3NASfC+V0E3Q51la0rLXvb6uXSBJVpscuBE6SR46PFzt/PWwYOYNZ6USEejQB5E602fYrb5zg5Oao/w1Y6TV9vyVmaltmmoyNyTLdsfqjryd06Ozsqbs6S+9W/xnB/ByTTd1LipzFxev3rLPnN/rM/bMvRsQGDbrYJY53rGYPZhjvAQc5Pij4ODnbSs1cEUocGmXpdDvUJTdjF2N6J/mXWKJY9MWv0pB3egw/qanmNrwIBx8HeQS2+Gw00huHbwIkqX46fSFILvXevaPg6eGPCtN7Xh6xydk6BJJ5KxiJ1wdY9MQo4GmN5az8GzsHy8ko5fTpdsi7mINdtpAQH+VocS8d722LfqYLCEjl16jz0YNnYl8xLYmOjxdfH3QTkte/Z1Hf25+rVSph20yQ55QLe1kHSOzaqWRDlGE6dSpOsrFxYVSLgwOqH25hn+Zu6f0c/5+jAF1mNvpFzgNYqa/qSacBpdhbVn5ht8ybg4iwHuG3aHC9//8en4uHhKk89cYc88MA0LPBmO9noBVzwp06nyV//+hn2eAqSv738LNLBukqlBYjJuqWlJbJi5Q7ZuOmA/PC5hXL7bWMtAhyOpwKTed/+4/Lhh99LXPwJlRWOMn+/fj3loQdnIlvkUABSo11vcIL9KSwskq++2SJffrlGhg7pIy+99LAMHtRN0a5BBRxgP0pKymTpsu2yYeMBeerJ+XLP3ZOwyGpeJObqdcZjSJVuZljmj5q58IYc6hKAQ/mUbHxTyqwbQs3raMTOzlYuZBbI7j0J8IO6KK5uLrJ5Sxz29xolPl4ACHBbrS0Esst5BQrEWgpelZXXJL+gGJ7neQCM8ma5idp9PHT4nLz2+mJkg7sgM2eOlj59IuXc2QsY10F5/d+LxRFv26mThzQKFrXbMnynVzL6kw9/krRM+JWUSUhoADidh0Ejd9CoISdIUUHVySvCDpN5AFDLUmE2vLc+ciMo0KkBx/B2K5HT2LCPYDMI4RlM/s44kPZm4q8EEiQmnkfC+iQZOWKgOLs6yYmT5+TIkbNI5TpYKq+2HnDIRFB5aMO/FnAUnGBgKkxhYpbWJd2Likqxh1G8nD2XLnctmCrPPjMPrhWOWPBl4LT85b33liNMZIeMvKUvRFSmqW3ZdHaF7whBeNPmA3BR6CEPghNsrNTuN/umy82jQKdXGtNTmrsG/vEPf1DZ55cuXaoUwTzOv/YwAcl9FULPceDACSzUYrnzzkkyZdJQHMOi3RoPvUVFs/3kOIzKXYotVKi2ZGzsA5XKSrHr7NBqBTGnMkWfkpKrkn4hGxYSe4g+PbAjq5fqnx90OIMG9hR/6KbS07PVGG1tWm4hCQz0lzFjhokjnuE3325C0OA51f71LCXSy0gDKphJz8bwie4QvNao2K9Nf56rX9iO4fk4AWCdzLbNZ0Adi0G5TiV951uenZrDoVKMoNIPMSPckH3ZsqVIKrZb7VvNBPDTp09HFG0vPGQncD03JnS//kSz7LeNpJ3PlgMHE8Xf3xsZ1YbIhYxLEhjgKwfiTkB8uITAO2x5C51I/cKJTnGMoHTuXJYkJyMsBVt8BAcHSLeYUISluFGRUb+a6TcnOU1uObmFcha6nuzsy8gO4CbdoVwNC/NDGo7WTHrS3U7csMVzOZS8+fklSqzh8+DfFfg3XQGnQwW0s7O96q+pQxZ9YXoEB6RLGQgxLUoWQ5/zyWer4TkbCCW7p1k6NdUsaUA3iLy8Yjlz5jyUy9lo3xEWnUCJ6RYCrtixWhw3tMIxJCdnIqtCDuZVCJTQ/oh4L0PSujQ4veUrZXZEhL+6GJcqUCrHszuXlCEpyRnKYz+ium1ngDu5Wz7DnNwiKOjT8EK8jOfmgXikSMwBRl1DuXsdInVTY7f2uXYLOFxI9HKmZYjf+ZBbU1iXm68vvHshFJdxchR7JmdlZanPxYu/lOG3jFDpEult6evrq+5jNIO35n4trcP+lcE8efjwGQDLBZk7Zxx2L/XHm9tOBg/pBWXtPoDkcVhXghs0zbrUTxw+ckY+/Wyt7Np1CIu7UF3n5uaKXRF7QdS4lXiC0hB0WD/nchGUwjvl2283K10Lo36ZhyY0JEhmzBgJ3ctIszExDTpT6wBDYDyhmB4yuJdsgrJ58+aDMm3aLRIU6AmfpkKIWkekGIAzevRA6JVa51bP+eDn6ymTJw7BQk9Cm3HyXd9u8tijhvFaOl3IoRQWlcjqNftk8ZL1AO3zAEPoecCleXhgM7jBsdipYLqMH9cPz8QAjlz7m7ccklWrd4O+MxXA/PeDFbJv71GEAbnKk0/eIRMnDJQlX20WZxcH1B2Kl902WbN2pwIk9t0TOqdJE2+RJx6fi+cdIMtX7JIvvlwnyUnpSrnO3WujosKg4J4md94xHhvTOXYK0GlXgMMFwIfBVAV0SkpJTpbUlFS8/dOUc1KtOd2ir2yXIRvcO5nfjZHIBB5mKfti0ecqr8e0adNl1q2zkCh+sMrnwWvbuvDNlpKSA2A5iDQgLnAqnAiAsMGb2ksmTxosW6CjWL5yq0yfMVz8YEo2vunYN5p7OVFf/tvHAJ4queuu6TJ1ymD1Rj6XlCVLl27Drqj/kd59YqoBu2Y8fKufTcqERWyRrFmzE5n4+8nvf/c0wDkYYh12Wt2RIEuXb8WeQ/sRuOvbIjLwGTpArJg8BWCQiJ0/vlovr722RBbMnySLvlyPDdT2ypzZY+S+e6e3qN2GF1/Ds+oO69kC+eOfPoA1bDn66i130JJ2rSE3WL8+weZc8gXVt9Wrd8JPJVpefOF+REB3g5d5BRT4x+TrbzbJCy/+A8AyG/eZp4CUL6QrV8oQJlACENmP8Jfziv5PPHmnjBrZF5xlkKSkZsvBuFPKALB582G8VMrk6afnw4erGxT4RfLNN5vla9DlxPFk6dY9ApxNMjLoDZJfvvSAOEGciz90Rj77fLX89eUPFUg9+4N5AH6uj/qj6Fi/2wXgcPFUwcKQkZGJib5TbaTOhEzZ2TmQ8QsUO9uWZCUAncDuoonI47wEXM+wYcPlduwmSs6osSC0G9UfgkbCsWS8pZOhLO6Le4YqICEQ9eoVKX37dUe/kqDfSZTZs0bUUR4nJCTJJ5+uUvqAX//qMYDVOHSLYovAXb0P3rKD5d33VsjnEDdK4cTYrVuY6jaVx7T0LIOpmJzB3LkT5Pe/fULCQn0gWhp2XBw3biBibPrKv15ZjJibOHATHqhbA1jNjZ/A6O/nKffcMxXerOexeL5HztvtWEwO8vhj82AWnw4OAqKsGTGxubZrn78G8ZGu+wSvf72yCDl0Vkss/IX69ouUilo+VLXr8DvnXEFhMRb+Vlm/YZ9MmTpSfvubxyQmKrCaBgIdUX9wYQPkL3/9SL5buhlibYjce89k1lacN8VPilDk1H7+s3ulB7jQctIPV6SlZSuQp4jbs0eE/OKlpyHaRymDBcE+tmeEGvtXX61DdPUl7Gx5N7izWUr6JWDfMryPhAT7yZ8ApLv3HJE75o0Hx4O+XSe9OPabWW4q4BiApkrtT7xs+XJsuPeVWvTNZX5vK4IRXMj1rFmzWnbv3iXR0XSSMyhfOQludOHEu5Sdj7SMCeBcKpTYQUtOWVm5kuuDEUYx4pY+SPd6TLbC4jMFHANwCIVc2lWIWgl4k2bKvXfPgCg2GlxceTUnY+ipv5+H3Hf3FDlzKlVWrtqqFgnPUEw9l3RBtm6Ng8UoQJ55+g7ofLykBGKOsfDZjBndHy+BbIDhOeNhiz9ZnxzAyZMpkn3pslKGu0Cn88gjcxChfAfaYcxe81xIczfkY6Ee5NZbRyqHyZXwGfr08zXyy188IJ7gGBsrTJ1w4mSqbNl6EAs5WJ5+ap4SberTYMTwWHkUff7r3z6BCLVLgTj1RNT5MBCyJ4Dj8cfmSEx0oIl+nDN8RnyJ+gF0p04ZDmfHcJw3mORBGsXBDh7UE9zePontHSFTpw7Ds4OjKXRxLGyjV68o6QUnSTpM0iGUeryOXlqjEbwhY+aEZH6Old+vlGeeeRrs/G8l7uBBxC/V3WaC15H4RqvS9Xza2jWOr7wP2+buFJFQMDMz2oPYfysC+/C0BdgYiGgjSVA+xh86LZHhweBKeiug4Tne0w2m8cGDe0Jx6CMHDx6HfuGCQa8F/UJuboEcO54CMdFFJkBf4OjYMDMeF3RwiK+MGz9IiYhUTrKQqzpzOkOBSd++MUiuFm6a6OoC/MP7O6DNIUN6wsrU23jYok/S8tKlfHnjre8g0r2jlKSPPjYfOhw/6D4OQDl+ytQOdVAVFVVYnK0HdI4zPMxfFi6Yori4Dev3yOpVe7HmzU9vdE/5/pw4kQLfpxyIUEy/GaUA29QxfCEN2AR1UYMG9ISiOAOglqKUwAQHAmh/BEXG9gprQD8CKkEnFH5CfftF15lDBEmyMk6wVnl6eSjlNF8OdQHYoHh3grWRyd6K4PBo08h4ave5vX9vfAW2Yc85ITPBSXzy8cfywQf/VWbr2rfjG5iWI1oKnJ2dxc8/AIvPVbHBta+z+Dvux7dNDkS0FCTTqqrlZcq+ODo5q3w/3Xv0wJvyViSHng3rR1/JvJABP5ijFt+mJRfyvnzjxcUlSkb6JZmPxeLkZKcc7NRcRWO2kNl9YGXqAx3Mjp3x4LoSpA/ehhSJiotLoecqEB8fH7yZg01AVbsPXDA00QYF+gCY3KrFQ3oAV0ju5XyxxbnY2BhlijWnKKcaxAupOIJD/OCtnIKmmwcF6kWyLubJO+8uV+bqSTDv/xLewLS2fLZovbz62pfyyquL5C9/flb54xw4eFq2bj8iw6Egnzx5oEX3qD1G43cu1qFDe8r9982U//v7J9AVrVUWrCEwydcvivYlV5TuBY8Bfjw9odyFeFe9aUDt6wnSfgADijPxh05JaloWuNFroKXAkuSprHnOTo4A1brcGkGF92F6Xm8oiPks6hcec4eSOSgoQL1I6p83/UbV6wFkUzvt4IvVAYcPITU1TV599VW4p38JVtuQypS0INDwAfkHBMqIESNk0qRJmAz9JQT73zgBeFi3NYXtnj59Gu79f8FbKkk1Qa7JCUDDfB4jR46SO+68Q217wc3+OBEoXlGfwTdZWxTqaLKyLkM3c1LOp2fIxx8vw7bKa3C/mrtxtDRxl2BxXAVXsn17vLJYUJ9SBP0Dd9fw9vbCOBxxZa2KNU3gbWwLr2WAN/xhOC5glXq7X76cp/LQcsI3Ttdq8zY4LUsK26HJe+WqnQgl2CLjxw+W30E3FODvgZida3IblLnn4UW9ePE6efudpfKzn9yH9B9nkdxsi3h5OMNNYYgCQ0vuVf8ajo2+R9OnDUcKzbNQmG8BwK2FGBKsxJ+618MyCKVwEehKy5MnuETSxXy5pgA5AC4KzuBo6LhoWPzXlPXOGEBa+7kZ2yE9OGX5gmi08FQTpxut10FPWBVw+AC4o+err74in3/+mWIVSTceJzcTFhaOhX+n3HfffUhs1Vs5dRkWft23R0toTdd2imn79+2TvXv2qvu4e3iobGfTpk6X22+/DebjAQYPZLz5jUniWx+ub1nvKNYcg4XiyNEzUBRHQl6H6MbXZr1C2lCnc+ZsOriMVCymJIQDDDZMdoiALEbLVb2qpp8MT7hWS2QhTQi4BOLm6hrB19RYE18IoukZubJrZ4LiqO6/d4bibJgEisXP110eeXgWgDIfSc12qWdBzsrOrkoFVNI6dz1FiZDBvnLfPdPlNII1aWHrD6X73LljOMnqNE26Eow5PqOoWeeCWj94Db3T+fJRHtumpgwK+lqX6q/NUMBqgMMFzExgi75YJF99taQO2FAsGAk/mBdeeFEmjB+vusyJyPQQ11uuYWIdhwVqKfK4UkwbPXo0QOZ2mGynSkR4mJpwTIt4I+5laV8NyuICKH2P4Y1eLi/9/FF54rHZAp6qQROwh0D8KZb/vPMtxJTvEIC4H34dA8QDsn8AvG1pKSmCH4mNDU3XDQGLXBq9mEtKr+ItDtESjBD9bPz8mKnNEWJmHkTMhvXYEb6ZS1EvL7+0Qb/MHeAizs3Jgyk4C1yqj0TD8bB2JDIBIQJhDU8/ebtSJH/22fdKDxIbG4WE3kbTvbmWLT/GoMx+0JncB7D76/99LF8uWQcO2R+gVlufc025Dvj6eCh91kWIgJXQI5kr9JouKi6R8xClyjAfPeGbw3Hq0joKWAVw+ICo+Fq7dq188N8PYJLNV73lcf+AAFhY5sr/95vfwCwbCuXbVQUCrRtO3Vpsn9xNalqayi7/v//7v/A3GQ652U2x7kZupm4ta/yygWiXCUXwScjv/nA+jJXSshI1+c3d3dHRBlxYD+V9egDR1ykpF6EXcENdH8RfnVUm9b4IjKxfCGyFiGk6dy4DXrQF1UmWKCbZI9TAB5dXIZvhaRWcSR2PueBHin007RqWWPMLjR7G9AImB3cVnFn9QgDq0zsSlp+5KsUEgzCpo4qMuDEmX4o2jo4GH6CE4+fgTLcWTpGrEcflZJJcyLHQe5icJbm8g/Dmvpw/yWwAKK1RGRk5iAnLAP3dJTo6uB541R+h/t0UBWrDflPXXfc56lAoRmVkpKu2CAbcPnju3Nvk94hzCoWepvbeRNd9QzTAiUWl860zZ8rLf/0rTJoT1C6P5GbMKUlvxD2ba4PjVsrieOpuLgIA+0p0FMMWzL9h2Z7yyekZBqVob2z1kS27oDymHodm1asI6ly3fj/M6wXKesL2jX9UDjAgdBM8fQnkSmTBgqQDWc+e4VAYRyFPTCqU0ceVgtpYj59U/tLVf+u2QwClMw1EEnPjrILo5ocEZZFw9yeXs2/fMVxW0x+2y9/MMVQM/Qm/8zmcOpWCkIJ0JeKYa7elx8hJBfh7wUt3CvxZ+mJ8h5ULAO/HQlAiTfv1jVZm7WPQ+WyD4hrMpIl2BhqAu0E82xZE7Z84cQ7A2E3VuVlzp6V0aI/XtzngUJTiLhArV66U3bt2mWhA8/O4cePhL/FLyPkBakGYTt7AL3yDEXT4Zr3RgNaabnKik2vYu/cELGMusMwMg6hnUOg21h51DORIRo7oBzpVAATi4MhXofxkhsHBbwtMzW+/jaDUy4awBooBFKUOxp+GKPYdnNCy4H4frgAYWgelEI9C3NGtM0ZBXCiVf78J574dx5QylHVZaAH7cslGeDJvg8+IPxSmCEEwo2Oq3WcquAPh6XsLfIeoXP3w4+/h53K4Dud2GSD25eJN8HD+DMBTriLjj4MTees/3wBMcy0GHYJGU4WiFc39dAgkCJ4/n1FHrU6a9ugRpkJJSq5clXcgsm7adEjR1whMeUjL8dW3W+VzKJ+D4BPFoFofeHsrZ1B2oIk+qFPNdZIDaKIN42k+s85SrCJSJSFEYd26tSZQocIyOjoa/jfPwNwYaVLUthVRyem0h8IXPEWNo8eS5NDhROVJPGhg90b1B8Y+Gzg1B/h8xIBeQXCOhLIZ8VNjx/aXxx+dIxezcuSjj5ZBRDuBfZ37igtc41PSLgLUjkM5bAsl/K3K/M43PwsVyIyynj79FoSNXJQPP1ohP37hHwC/4RITHaRCG+haf+pUMqyF/aHQDVThCM1tE8R+OkGcmjl9BPLepMO7eJU8/6O/48UyWPpCjLoCESsOIBgXd1LCIUL9768fRSL67jCVL1HhFe++5yc/eeEucW8mvoovMY6rqefKR04ubQJ8kJiDOgs0smOdaqKyLi1Uc2aPkjwA9bvvL0UIwz9hqURfIZ4SDOPikWQc/k90wHz22btkAnRnzC1kBCTewzwY8C7GO5He1Tdt8MHa+K/m0rpX4AS5UXP7O9W9sOP8alPAIVtKHco+WIjOQKQyFl+IUjNmzMSCGWsCIeO5zv1JixOVt9eE/ilTsMAN8VHNW+EIEoz+vh/gcehQolyp1nWNHt1P/vnPF7C410FHtlveePMriAWwCEEpPH7CUHnkwVnYT9obeiskfMfk5SIjcNDiwtCDHz43H96sUbII3rmr1+ySQuxh5OLqjNSd3eXHP7oHHMAYWNPOYdVUCvUXzfmDUA8UDN3ST39yDxTB3eCLs1l2bD+E3DfblDgbEhKgfGUefuhW6FAMCb2feXoe/I3clfL7DBJ0MZ2FuVgoclD0Kh4KZ0gHjKU7IrkbU3hzHrEvXkheRtGKC/cSFOw9ETpiXOHqPHRXTzwxR4lWny1aIzshfn2/apta5AH+vhD5x6O/M1RWQbZJoCJ9GXYybdowhC2E83CdQoChvofhIbzWzd2pjpWQF+Owqjtj2gjph4j3+oXP29XNGSJ3H+g2/eAJ7mvgrOpf2MF+24CAdfCVFp2f/vSnSsHLsXBT8tmzZ8v77/+3xXoPvolSU1Lkt7/7ndq1ku2Ruxk8eLC8/vobKkiSuoX2WNh3mvD/Ct3PJx9/pLrIvo8cgb6/9hPI8xHVb7uW9Z4TkO044I+mVoYjWFoI4IYcLLZSDusWuSUWw1Y1tip38cXMbGWJ8vf3UUplSkhV4GzsATQs9cMfKHLyzX+ltByOjrmwSBWoiOfQsACIfE7KZMz7MhiTuhcjl6Qaa+IfKqztUIdWrszMXAQs5oszXB8CER/k5+ulAM+oCyEnQrGSDnVXIN6wkE7miuoLxmKP52PoT0PLXv16bN8BPjrUYak69WKsjG1SXKXnMS1t9Dr3B+AwiJb12NfaS4XPkHSnqG7geureleNvjObGK6m8J12ZusJcGzXPu7rf1RyqsX5LPnmvI0eS5Q9/eh8uCdtUVY5hHmIG//jHP0l0dHSr1jc3GZgxYzp0cGdMgPjHP/5RfvzjHyvsqN/HNuVwKPNz2+Djx6k8NBQP+MAMHDgIC7Y3iGz5YjPWt+YnH4iLc008DmX38nLkcsGi4GRoTSG8c3KZm2DNtccJb/RpqX0tQcDGBnE78HMJ8PfEKepp6F+CRQKHO5bKal8Y9aPWPxxTFRTPTIcRHROIdqodH6HjoP+PsbS0v/TvYZZCij7RENNi0K6xXxUVtEQaWzZwIiWlBvBsjqykAUHzqtT0raYl89/IyVReaZyLNLbJZxoRHihRkYxZYoJx0pFJxmt1tvoWBCAjYJq7qxp/IzQ3Xt/cPGjseRvrt+STdOXLvawa0FmX46WLBOe5tUqb3YmD4QMh4BAFjYX7lI8cOVI52t08s7SxN01/MjObH7YyJRegFiYW0cVLubIfmfkGD+5WDToNJ2PTrbbdWb5t+Ve7cKJZUiiuIH4Upaa+pXWbbp+xUg25kBvTdtN3bvlZA0jXIyGec8tbak81aAgg95aA9LXc7cJY6GzL9egBA079eWO85kZ/th3gYJESUNLSkEUuJ1f1myBEDqcHNlBv74XcmbubO2T1XpCfQ0zm/IyMiypRUvfu4VBI9q8GnfY+Gt2/rkoBOCUokW3nrmMIKdkAwDG4pZAeAbAO947trfzSyE1Zo7QZ4LDzZOHUxuZg7VkoF3vDqzg0pK4HqjrZzv7hA7CHJ25/xHINGjQIeogLisvhmOLijsnPfv6q3IY8MszK5wwP5joyQjsbi+5OV6UAY9uuqGySq9fuQs6n02oOkxoUoxisPAyOsPxuLfVGmwIOWelC7OpnLEadiAsy7jdnYjXWuZmf1yC/R8fEyJ3z5yPm6QiiutOV4pAP59Spc0j4lARlpDMAh9HCN7On+t6aAuYpQJ1j2ZW6oSmUNMLCw2XG9BmINetnVuQ139r1H21TwDFo8GuUe8zn4QhugLJjc05k1z+062+BXA7jr5hsPSkpSd57913Eg+WY3hI8f7WsVP1d/910C5oCbU8B6iP9/f1h7p8rCxcuVPPbnI6trXrSpoDDTjdQuFFW5F8HKdy/ig/omaefUUBJEzmzAjI8QnmcdpBx6G52bQoYPe6DkH6FQPPUU0/DbYIhNQ0V+m1JqTYHnLbsvLXaJuj4+fshmv0FpPwcASe7z+CBekBZ36ylbLPWWG/GfUhDGhiYW9pYyFm6Xk/SNWND+lNZWZmRgbm67777bpk0aZLVORvjY9CAY6REM58EHTqbTZkyGR6kY5X1LRXZA5m6oAEX10xb+nQNBahPYPaA75Z+J9+vWKF0ZDQuDBkyVB597HH4FflDP9ZobEBNQ/pboxRwc3VDKEk4IvIjFNBQB2ltzsbYOQ04RkpY8Kl0NrBScZF069YNwX89LKilL2mKAmT1mW42Li7OdBnpGxgYiOx9U1V+6Uors/2mjnSiLxT/CTI327NfA04rJhWBx1pmxFZ0r0NVYQgJaVlfH8ZobopZ3JjvZr2NOxQhO0hnDbkIOkhndTc7HwWYhdCcIy8Oo5g70/lo0JVGpDmcVjxtsvwsWmHcCuLpKlangHG+8sY3e85qwGnF46d/UWFhISKr81WeW/0mbgURq6tQh5MD3yYmaTMWek0Uw2J1Dr5PpXBaayoFhbGO/myEAng30uLnhXgphhXd7Nw6GnAaeU71D/MtwajslJRklbpjw4YNcvbsWRyrcWysX0f/towCjKwuBcAY376k6cED+7Eb5hM3fYFYNoL2fRWT5UdFR8mkiZNVKonY2FgVzmCktzV7rwHHAmoTbIqQSGz16tXyn7ffRl4RbE4PZaYubUMBLoSCggL11zZ36Hqtnjt3Vnbu2IH9ur6TJ596SuXB8QTHY23Q0YDTzNxTYFNULF9/8zUy6/1DUpAuVRdNgY5IAZrE4+Pj5G8vv6ycVp988kmVmoI+ZtYqGnCaoDTBhvqaHTu2yxv//ncdsGFcGPeGdkBcmMHO0nHCNZoYsj7VqShgw4zJUg6goXuB0fUgNTUFieE/R+rZILnnnnvUfmXWGrYGnCYoTQVbMlKkcuO+U6cSTVcySxq3I47tP0h69sOundjnqiMEo5oGoL90DQrgpVgCVUBS4nE5lXBYLmZmwufJkML1LFKCrli+XIYPG4bc032t5lemAaeRqUfuhg5px48dQxL4/aarmGIjpkdPufeZF2TagnvFGfE+tKrooinQHimAaSylxUWydcV38uU7r8up4wlInWpIj8qX6P79B7CdTi/l8WSNaawBp5FZwh0Xi2H6ZlL59PTzpqv8A4Nl5oL7ZOa9j4BFrUTy8evfjtjUuP6iKdAGFLCzd5RJty+QwvzLkvfmK3Khej4z60HCsaPKBYGBstZ4c2pP4yYeMFNQXEI+ZqNrPbkeP8i9Q8ZOVJn8tRjVBPH0qXZDAepuHOCL03PAEAmP6W7qF7ffzs3NVUny+IK1RtEcThNU5mZo3FfLWKjTccEOlO4wJ3aEjIXGft+IT4JtS4q1za0t6Ruv5XiUCNFF5GFmpHR198AWQO4mUvEZGePVGGJijZh8DTgm8lv2hfOzq4ENKcON82jxaL7AZsfLWghQzbd7Y69Q3KnC0JYB6Y3thXVbI8DUfxEYSGA9GmjAse4z73B3IydwKeO8HNm7C1viGnbfaGoQxJngsAgZOGqsuHv5tDvrnXE8CQf2ioe3jwwcOQZpb50bLMSmxqjPtZ4CGnBaT7suUZMLNOlkgnz62t8k43watp91U+JIU4PvP2yERPWIFU8ff+ymaQ1Gvane1D3HdBg0E3/2+v9JRPeeEjtgsDhys8MuIlrVpYb1f2nAsT7NO9wdbWzssG2vvfQbMkwe+tFL4oHNAY1OZPUHQydIN+i4AoLDEHRp3Xy59fvS2G9bbv2L+CJ7ewcdd9sYkdrouAacNiJsZ2zWw8NLYgcNFp+g4CYjuKnjqoKvR319AfU6tbUFDc43SjTUq1XR8noG5TCbVdonzcU0SmFrndCAYy1Kd4L7MLdwBSx35dg2tsXcCxGDQFS96Ak9ylJEIGkSCNQF4KgMCmvL6qFOdbtUDquaRrCrjVyd4Jl0tCFowOloT6zD9ZfxaBWSn50lyYknJSMtRYlj/vBn6t5ngPiFhGGHU4g29UCHwFJhrHcK9RD/QzHOH9xV9z79G61HVojBiAXIsZNy6oScTz6nOK2g0FCJ7tVXAsLC63BZHY6cHbzDGnA6+ANsz90nB1NSVCj7Nq6VVV9+LMlnTplidujTFBYRJdPn3ysT4QVLi5FRVFL14OW9b+MaWbX4E0k+nYj9sQ36IFUvMhr17paJt93VoN6VkmKJ37lFVi36WBITjphih1ROmO49ZNbdD4iDs3O7N9u35+d6PX3TgHM91OuSdSmrNF8IGlz8m5Yuka/ee0PFnN31+A+kz/CRYmdrL2ePH5bNK76VJe+/IXm52bLgqecRBOuBhq8Z6i1bLEvee1NcsMXJgieelb7DR6GenZw9Vl0P5/JzcmT+kz8UF3dPVY+BiTvXrJRFb72C31Vy230Py6CxE7CBoYtkppyTbWuWy7cfviOhUTHgltqnQrt5ynbsKzTgdOznZ9XeM81BbvZFqaQuBnFk9QtBhls5u3l4QrKxk9NH4gA4X4lPQJA886s/yMDRE1DFoIsZNHqcDBs/WT555WXZtnq5MlFPvuNuJQ6dQr2NS78WP8StPf1L1htvqjdY1ZuEen8DgKyQ8O69ZPIdC5W/z5kjh2Tt15+rlCEPv/iSTJhzp0FPhNoDR42Roch4t+zDd2UDQLAInBdFLF2sSwENONald4e8G3AE5ZqcOHxQfvv0A2Jrh2lTT+fCK2hmHjJ2nDz8wi/h2+Is8bt2KO5lIbiXgQAKlRrBWA+NhsNXZ+Zd98sH//h/sgdi18gpMxQcxe/cLvmXc2QhuJcBcCAsv1oruyLr9ewtM+66Tz78519kz6a1MmLydJjtHeTovl1yEU6KMxBcO2b6HBUDVzvezRvAN23BPZJ67rTsWLfaqnlgSB9dMEc0ETQFLKWAM8SbiG49VSCggU+pW5P6lcCwSJx3lMuXsuT8uVPiHxwqg0aOxuLHXuwNmSIJ6xYt0T1jJTMtVXIy08XO0V7VC2C9EaOQSoF7uNe9D3+Fd4+Gc2FPyULkc25WujjDITHl9Elx8/SWASNGiz36UF5myP1irF2FnNTs38ARY+A5vVspoY3n9Kd1KKABxzp07tB3MTAlNtIb0ca/fu0d+OEEYrGagRwc4gZ2NH2fQdqDdIBIBcSwbWtWiRPzBtWrY2Nrg1wtxXLhfCryF+fJJQCOs6szfqcp8/vWtavFefvWJuqdl+KCfFUvIDhIChF64ePrJ0EAlcqKhihFpbQ9HP58Iaq5YRcDo4m+Qz+cDtZ5DTgd7IHdzO5Sb1OGlB1XSrFTZhNexBS5ypAniErjdOxysQRK42rHmEa7H9GtO0SnciWpEYTSU5Pl4nv/xvVMm2AG3KpbiujWA+BSDsDKl8sAHF//AKVHAkqZvRfTMDghlIGpYc2JhWYr6YM3jAIacG4YKXVDtSlAtQ9FLIpFDzz/c3GCyAOWovYldb47u7hC3AmX4wiqtLNnvdFy//M/haiEdArN1AsKD0d8VIIKvyAXoxTaSK9JS5W5Qn8eRr/rYn0KaMCxPs07/R251t08kXsF1ionRyeJ6dVb/ELDIG7VBRxIVAjuRNbEoiIlQrkAXFw93MUNeVucYe3q1qtP0/WQOpOez86u7tjkzQfcjT+yNBaplJoU16S+zggKZ4ZclMJCRU6NVjVdrEsBDTjWpXeXuBuZB2//IAkBx5J67oyknkkUL+hNKgEOtYst/HEy05Jk6Yf/kcs52fLIi/+DoM8gld4iDR7CKXAUbKred6iXDx+eh378c0Sm++CegcqzOBnK46jYvubwRumM0uGTU4idPq2V5a72mLv6d/KdumgK3FAKXAPX4u3nL1G9+8nl7Euyc933Upx3GWlZbRVXQc6Ci72y4qqciNsnB7ZvVucIUh6+gRLduy/qZcmuJupR9Dq4raaeu7cfdtAYJBWwTPH45YtZde9HbgZIePb4ITm0e5vaNoX90cW6FNAUty69O+jdKArVFYeaGgj1KBSPhoyZIOEwo+9Yv1pWfvYBPIOzoWw2KJzLoFA+smcHHPUWQYHrJJPm3CFu3r4QjzxkyOgJyvyu6n3+IeKi6tfbLmu/WQRfHyeZCOc+go0drE907uuJ/DYEsBWf/leZ5qlQpoK7DPl7jx/cj+MfSk7WBXBfoVpp3NRDbKNzWqRqI8J2rmYZ2U3upPr9ZIHClaDTAxzH/Meels/f+Jd89/G7cuLQfuk1aLjYw0KUlZIkR/btVJapeQ89LkMnTIF+hUoX1ANo3PkI6/1TvvvoHTkRv89ULzMV9fbuVLqbeQ89IUPgrcxgTaY/DY3uIbPve0Tyci7Jyi8QS3UkXnoPuUXFTmWnp8mxg3uVD9HU2xfiXBwUx5aDaOd6njdvNBpwbh7tO8SduSj9Q0LhBTxTfKAjoQexJcuU9Zhd75bJM8THL1DWgSM5hDSlR/bvVdZoZg7s2X+gTJ13l4ycOkuBkNErWNWD1zHvtw4c0CEAjKmeO+r1GyhTEM4wasqtdepRVBuEMAg/bFK49qtFsm/rBjkWf0CJcV7gngaPGSu3InjTyzdAWb/skVqU3JU2j1tvKmrAsR6tO+SdCBzdkA7iuUFDlAMerUJGYGhuQAp0EHDZe9hI6TV4mBJxci5mQndTKR5eXhIUHilOMIfXb7NBPehjci5V1/P2hmNfRKP12KdIKIyf/PUf5a6nfyTZuB91N77QD/kitYUNQJAc0d3Pvogrr+HeZhKFNTcwfb7VFNCA02rSdZ2K9Fu5eqVWPFMLhk7woB4FbAYWfKj4h0ao2vSDoYmaAaHmSp16CHPwB8iwNFeP13BnSd6PsVN+wSE8pDygqcyuqraUNXZfdbH+p80ooAGnzUirG65DAQAPlbdNeSjXud7447rrGRvSn+2BAtpK1R6egu6DpkAXoYAGnC7yoPUwNQXaAwU04LSHp6D7oCnQRSigAaeLPGg9TE2B9kABDTjt4SnoPmgKdBEKaMDpIg9aD1NToD1QQANOe3gKug+aAl2EAtoPp5M/6JbkfKGznS4tpICKQmcdTTtLKKcBxxIqdcBr6JHLVJ0l2FCOOMLgxqaKPbLsuTLxFRKl62IZBeitXAz6MkOhu6eXitmyrGbXvUoDTid89sw1U3w5D7tdfiKrlnxmWAh8EzcBOr4IsJyJrVdmLHwAANU0OHVCkrV4SMzbfOrIXvka+ZpDo6LlwR//D7IVemnaNUNJDTjNEKgjnia0cD9v5n3Jybwg4THdVKBkE6mBEUHtq/464nhvRp+J3yWF+XI+6Qyi4plMzBC/pdjJm9GhDnJPDTgd5EG1vJvMYWODXS8D5J6nnpPZDz6KvaEa51wMwZKVhoXT8pt1yRrMD2Rnb4/MgnYYP2Fel+YooAGnOQp19PMQj8qRgqHsSoVKA9HS4TSmdDZIXeYAzLjwDOfM1W9KZDNe39w19e9vrGdufObbMt9Pi9tVIqq5u+ljTVFAA05T1Oni57iIK5C7pqy0WEqREpR5ZOwdHMQV6UMdsbdT/UVeifMl2BGBqiJ35LvhW7+stAR1i3CtHXZxcFfbBDeWS5jAwC1ewJs1muC85hpkIOTODKqwn0wjynsVKy6NnIezixuU4K5qu5rqC9WH6ieUvegUlL2e6piqCyU7k3+5QQFsj/qETLZ7Bf2/UlKi9DOOTi5QrruJExTFmqmpTVXLvmvAsYxOXfKq/NwcSUAa0H2b18u5xBNSgm1ZfJAcvT8Sag0ZN0l6DRxi2DcKQEEQyTqfIiuRS5i6ont+8AJ2ZEiRPetXyUmk8+SWv8PHTxHuFNNnyHClaGWendrlAtKHHtu/R9wAVoNGjQNo+dRJ9kWgSDl1QpLRlxgkaO/Wp58CgXykFOW+4vu3rJekUyelGNvOEBS7xfaW4ROnItPfBOzq4KeupbI3My1ZVnzyPsQhB1mA/cuz0M/dG1arvdOjevSSe5GcyzcoDFsIZ8iR3Ttk/9b1kowdJKqwq2hYVIwMGzdRhk+air7V3YWi9lj0d/MU0IBjni6d6ig5EUoA9TmS2oOsL3ZwUX4P8Ni6ernKkhcaGS0+gUFSnJ8vG5Yuwfa9y2XG/Htlzv2PiYePLxq3RZKuK5KJbXv5uXfDKmUhy8nKQq7hbuIfFCJH9+9C2s+NsuCJZ+XOx5413Z79Kr9aJgcAGB+98rIER0SJO0z0wyZOMyXoIqAxmfoaWN4O7tgsD7/wSwDeIElPOisrPn4P/Vyh0p8yi6APtqS5AnA8DBA6hETt0+YtlDufeM7QT9zV0M8UAMY12bNuhaz5+ktkFLwoIZFRSKcaroAo9fRxWKDelP3bNqmE8OwTubsc7Jn+1ftvIR/zLolBZkFyY4q4ptHoL01RQANOU9TpBOeok6jETgkV5VcgauCHmXINi4YLWgETPnOzMhWnsnH519gBc6zc/czz0qP/YCQgd4bIVCSHdm6Vbz54S1Yt/lSJIFzMTBVqC+CwBweRmp4q21YtU1zNM7/+k/RGelEnFxccWw7Q2SOJh+Nh4SlQ3BGBjvctwDYyqeAiypGRjxvjnTtxDJzJRFNvaeq/mHFe0rDPFUGlN7ikUly3A4C4HdvJDB07Qe3UGRrTQylyr2KrYW4l88Xbr+D8SnBEfWXi7XcpIxLvZ2/vqPbL2g6gCgwNl2f+908Sy37CDykLnNaKTz8AZ7NRRk6ejvH/SCJ69laiWREAd8+GNciZ/LlsXvkduL5CNW5TR/WXJimgAadJ8nT8kxSD4nftlNIrV1WaTXMj8oAeo/egYRKFHTJp3qUYtXvjGunVf4g8+cvfSWg37PtdVq52q+SOCyOm3QqwcJX3//YH2b5mpQwAKA2ECISVJxUAt0swxffoO0Ae/umvJKJ7L7UzQwXFEXA6oRBJssAF8S8GydDV5njgjrIBJtkX0qU7dtskOJ1KOAynugJxhT6FoMQ0pWlnT8PUnylDx08E6ERIRvJZ7OhwADmOIxXXFG68F5TkBKi+t4ySsefmyDfgVJJOHpdxsyuwnYyDArgK7Il1EfdjQvYHXnxJ9ZP5ja9iO5lDu3aAZttk4Igx8vBPfiGB4VGGMeA8gXUSEr9zG5yP/vUXuZR1UfpC76OLZRTQgGMZnTrsVXmXc2XP5nVyEAuosRKIXRlsH7GRGIDEZYgMCdhOpQrixoTZt0tIdHeIIDV5h5lAnZqXXgOHyoRZt8sy6EIOQuzoN3ykkixYz8vHX0ZMmoY9qXphwzlDXepr/IPDoHcZINtXL8OGdEelGxY7uY2qajApyL8sw7FdDIHm7IkEiGcp2DJmiFJWcwtfckD0eemLrV8ISlfh6evt6y/RAKkA5EomYLCwTfWJf1ygOK6E13V29kXlm6QAB8cpTnGzvuETpqq9s9hPglQB6HXy8EGlMB49bZYEhBnARjWIfzgOWwBkX4x36NiJkgY/HMU2GS/Qn01SQANOk+Tp+CcDsVPBrIX3y8hps7Hg6m+2bRgfN5QLgFL32rVKybl4QVJPJyqdS79ht0AB3FAMI+gwBCK6V1/1pj974qgUASyMVhu/oCCJAbDUrss6LtgaphtEm70bV8vphEMyHpvfOTq5SlFRnqRDVKI/y6Ax4yUjJUmOg3M5m3BEiXL0d7mMXRvI4Xj7B+DYQHBSVeBKYuXJX/1BiXX22O6lAsDFHSC4KwQ5osK8HHA2x9C3fAzUaNECaFSPiYrsaCiegT2qUKwszMtVnJZfYCDGAKW04VSdf5mXmfumU4fj7eunQKjOBfpHoxTQgNMoaTrHCQcnJ+xi2V0Gjx6NBWlu+fAFTZEF26VgEefnXpZcWH1CwqPFAzta0hTeWPHFogzErghXaI6GOdwGy5OLlntOeXh5or26dW2g34no3lPpTJIBarnwhKYYlAfu4wIsRT4BwQAkLvJrypx9DmIQFbwOEOOou8lKT4M+aKgScbjjA5W4dhBnCgASmdD5pKckqWtKCvKkCH9ZGRlyKSNdmbaNXI9xLOynm7u7Motfo+mMBZhUVJCP9vJw3EPFR5nOGa4w/WsDcHRBfSe4B9RXuJsu0l8aUEADTgOSdLIDABP60ly9Wtms4x8XoQIYrD97Rwe1yVxT4oIDFjw3xitXAaLV4KIYCf5Tw1EYKcqAUm6qFwNQ2bNprSQlHlcAREsT96vi1sCefgESDJ0Mxby0s6ewR3gmtnsJUFwXOQuKU87QoxAgCUYnsTc5twtORlv22O7XxcMD9wiTaJi3uVEegWw5tgs2D5x1+0i3nitlJVICXx7659gBVMAPGbtf55NSmz3M6vTb0YBThzRN/tCA0yR5utZJ6jAoXpFzKIOjG/f/pnK0sQVFJ7vC/Dxl8eFiv3r1SpMEYzuu7hBFIFYdgAXoTMJRGTpuMuKRzirzN0UYcixeAJ2wmJ44fg6iFvQ2tlUAnJPwy/FWIhb5EbZ1ElsHf/r6/8HvplDmPvAYdgedgX2oQiGmGXYHtcE1m5d/Y9LpNNk5nKRo5e7uBR2UDwAKe2aVYy8uiHPmQIciYllZKUAcup9qnVFz7evzsCtoImgK1KaAL7bXDYKZmMpTmqG5U2X9ohS9WJCZ59OURYqeuS4AEjAwTRcAAEWR8G491S6YSaeOS9qZRElPPodIa0+IU/2lkroe6IeiesUq4DsD5XJ6chL6ki6REMfoD0Nu5QrAjtYk7qx5O/Ymv+PRZyQA1iRyVmXgfMrLyqCwLpNyfDcEVjbdNZ5l/909vYXbAhdA70MfIlu7ulwQr+P4qSvKz8mGm0CxBhwSxcKiAcdCQnWFy2iB8ca+3FSGXoYeJ27HFqmEQ179Nzh/pcSgMwAAQABJREFU5+XkyIm4A2rx9xs2AuEA3k1JXybyGcWqqB6xUARflMO7t0OXk6nM5b7QB1E3Q5ALg3XMB31JPHJIjh88AA6oDPqb4UpvQggog/k6H0phhljQj0ZsoIcCCBi5MfaR3FcKAI2flgRYcmdOTyiBQ2C6J5gcj9srFQCtuuNn2IWdZGdmKJM8LWsUq3SxjAIacCyjU5e4imIC3/CDRo1V5uYda7+Xw7u2wroF0zYWsFp4+GS8FC1N1MNwcQ4bPxnnMJUo6zRTCAhuyBsTDVAj57FjzQqlqO01YJDSzfA8gc8X3sJhkTFQFp+Wg9s3q1wzPWEiV+IU7mGMlaIDYQoU0KXFJap/RnBgHw/t3AKv5C1SDouV2iIYXFlTxdA3D+k/fJQS33atXy3HDuwG5wMQNI1fwP3lyG6EbCQejhNn6LCM92yqbX3OQAENOHom1KUAFlYsOImZC++DOTlPPvjHX2TdkkWSihglhi2chTl7+cfvyrcf/kf5wsy6+0EJB7dSCe5A4Q0Ao0nkwXnGM0X26AlLlrccBweDFSvd4ANkxCsufGbQi4R3byGsRmehEA6LiUHoQSQ4INwH5ynG0WmPitv13y6WDd98IecBTozHOgFF8tL/viXrvlokNG+HRUXLeZjUj+3bDWtaAf0TcS/Df3UHz+M20gfuAGNnzpGM1BT55JW/yfbvlyGE4rSKDTsFc/23SLq1celXED3DJKZnLACprjWufpv6dw0FtNK4hhad6hsjqclJ0DGuJaZbg7+Mu0y94x5xgCJ4xaKP5ONX/6b0KnSaY1rN8rIrMLX3kHmPPCVjps/BKsXihcaVXAeBwsERb31l4TFPUnIbdNSLhXczwYQ6nVBwMyZLEkEJbYV3764sWvkQ35R1ys3DoI9Bs1QuD0Y4AwFmDaxU//3nX5SZmwGZFRC/2PexM+fKxNnzZBNCNDau+FaWwknRw9dLmezZT+qLDKJWTT85ftJt1j0PA8zsZd03X8rb/++3SpSj6FQJJTGtc2Omz0bYx2hZ/90SeF27QQxsqOupaVV/M1JAA46REp3okyIJ013OAPcxesZsCYCZ2LSYLRinob6nzLznIRkwcgw8iTeriO8imL/9IeoMHjUGnsWjxB+gwXbJcfAt7wdP4rt/8KJyrHOH4pXHzRW1qBERPufBx2UELEueAEVnWsPQb2Ph9+jY/vLcb19Wfj5BYYZ7Gc+zjz5QcC948nn4GI2XA+hjEixZ9gCFKCiXaf3qMXCwsljRcjVu1jyENdhLSEQ0ANFB7nnup2gKnBSiyKsgMtUubJsOhvMRIzYM7VAso/KaqTNi4NVMEbI7nA/pYEhFNTk2F4Bh7f7Xbk9/r6GABpwaWnSqb+Q2aNEJi45RXAEXUUsKr6duIrx7b1iM+lHqUYUYQk9dAo2y/hgbxTEn6DMoXrFQ+dsY4PA8o6z9EEEeGBaOtqrQVt1UD6zrCKfFcIAHg0J5r/pj4G/6AfUfOU4Gjp5Qt484xzZ5jRdCGHzhcc02jX2OqO6niuVih+oV1iOQdEcKjl4QMWvGb9AxsR07OweJBACxNNZOvWa7/E8NOJ11CihQwCK9DvWCYYEyVKAuGDRGMnU9LEWWFvq68K+xYmyvqSHwGvrCNFUIHlUQBWsXSwCi5v6Nj8mSdmrft6t/10rjrj4D9Pg1BaxIAQ04ViS2vpWmQFengAacrj4D9Pg1BaxIAQ04ViS2vpWmwM2kQHsw3GvAaWIG8AEx2VLtUgUfEvqc6KIp0NEoQCU4/+oW2gutV+quJuvdt0PciWZZTy8PZR5mh5nAqqigQC4hNaUd3VV10RToMBS4hjw/OQhKzTP1mI6MLsicyPAMY1Iy08k2+qLN4o0Qlk5crngYYaGh4gZP0iIk7ObbgQGHezatU+7/zLeLg420oA9rCrQTCuDdmH/pksTv3K5SgRh75Y55HRISIh7IIWQtp0UNOEbqm/l0cXWRPn36SEREhJw4cUJdkZN9SW2d4oZtTKYtuEdlxavZkM1MI/qQpsBNo4AtgKRSChFsunXlUlmHmLNsvDCNJTQsTAYOHCiu2CyQG/5Zo2jAaYTK5GbIcsbGxsr48eMlJSUFmeAMuy9mpKXKEgQH7tuyAakceiuPWM3oNEJIffgmUsAGWRFLVXrWJCSgZ0J9Y+FOHQOxr9ew4cOMh6zyqQGnCTKTzQxGjpa77loox44dlwMHDyK5U6mqwQRNR+L2q78mmtCnNAXaHQWotxkwYIAsmD8f3Huk1bgbEkIrjZuYDuRyGE80bPhweeHFF2UQ2E/P6r2om6imT2kKtEsKcC67IzPjgAED5aknn5IpU6ZYTXdjJIjmcIyUaOSToOOIXQOmT5smVLK9+967sn//fuTRLUZO2ysNAgobaUYf1hS4aRRgcnwnWFypHB48ZKg89uijMnnyZJXCtSVZBG7EADTgWEBFPhTmX5k4caL07dtXtm7bJtvxd/bcOQU6FjShL7GAAtxEr7w6KTkXCMFel+ungBOSysdER8uECROUPjI4OFi5eFgbbDgSDTgWPk9Ofmry/fz95Z6775b5kH8LCwqxrUjTOxVY2HyXv4xv4ezsbNm5c6e4wC9k5sxbVf4ZDTnXNzXo1ufk5AhRyl0lDqvAy9NaFilzPdeAY44qTRzjW6EMf5SHPbBZGv/T5foowHw3BJyUlGTZsH69BAYGybzb5yn6MiufLtdPAc5bbo18s4sGnFY+AXI89XeWbGVTXb7aNQDOFWznEh8Xr/RjkZERcvjIYRk3bpxK9NXlCdSJCKCtVJ3oYXbUoVA/lo5teffu3StZ2DLm/PnzsmPHDujHsBGdLp2KAhpwOtXj7HiDoWhKncKxhGMSHx+vFMXU5Rw4cEBS09JUIvOONyrd48YooAGnMcro41ahALcXvnz5MgBmv6TBg5uFuoYzZ87I3j17dKiaVZ6C9W6iAcd6tNZ3aoQCClz27ZXi4mLTFRSttm/Hrpy5OSqJuumE/tKhKaABp0M/vo7deYpTpYhPi4MolZiYWGcw+fkFcuLkCUlISFD7XdU5qX90WApowOmwj67jd1wpi9PTZd/ePXLpYk0Us2Fk1yQtNU22wcGyFPuIE5x06fgU0IDT8Z9hhxwBAaQcW8ocPXpEDh8+bHYMOTnZcvDgAUlKTlae3mYv0gc7FAU04HSox9V5OktlcQ628N23b59K/WFuZLReEWyoPLZWRjpz/dDHbhwFNODcOFrqllpIASqLDx6Ma9IDNvPCBdm+Y7tcQsY65ifSpWNTQANOx35+HbL3FKe4Tzmj7k+fPiVe2Gfcz89PReUbB8ScLT4+PoinuiYnkW2RPjp24Ip06dgU0E+wYz+/Dtl7KovT4E0cfyge+aLdZdq06TJ7zlzxBsAYS3h4uNx2++0yevRoZS7fvXu3FCKvtFYeGynUMT/bHnB0uG/HnBlt2GsqizMQyuCJJPS/+MUv5O9//4eMGT2mzpY85Hruufseef311+WuhQvlCixV59POa+VxGz4XazTd5sGbdd9I1xSLXAk2mUpDHfxojUfc/u5RhQjw3r37SGyvWImKilQ6nPKKupHMvAZbYuC6WPnpT38uF6DLcUaaBW7Vo0vHpUCbAo69vT32vXE1UaeiskpKkRP4Cv5sbbxEJx4wkaZLfaFIFRYWqsZMS1RjIML8WxUVleLk6CAxMdEqHaZOytWxp0qbilQOmCheXp4mubuysgLyeBE248rHsTa9dcd+Kl2g98zPYmnGOYIMr9Vg0/EnRputeu544OTohF0PgpBIyVNRqgKyex4C9VJSU1TCpY5PPj0CTQFNgZZQoM0Ah51g8vGw8Ahl3jR2qhDm0COHjzTKRhuv05+aApoCnY8CbQY4ZH/toMPhVrkRMHEaSz7EKXqXZmZmakcuI1H0p6ZAF6FAmwGOkX7cSG7osOEqgTOPcfdKRgZv2bJFO3IZiaQ/NQW6CAXaFHCox6HSeOTIkdDlBCuSkvO5hP2Nly5dqtJKanf1LjLT9DA1BUCBtgUcgIsD9DiDBg2SUaNGm9JFFhQUIAdKnCz+crFcRd7aur46+rloCmgKdFYKtCngkGjkckLA3cy97Tb4XoSZ6JgFHc7ixV/IemwLQicvDTom0ugvmgKdlgJtDzjgchyxi+KYMWNkDuJluCEXCx2+GC38yiuvyObNW9RvDTqddp7pgWkKKAq0OeDwLnTaCgwMlHvvvVeGQYHMbVxZmMntCPYf+vOf/yRLly2TvLw8xelo4FHk0f9oCnQ6CrRpaENtajFBZP/+/eX5558HsFyWE0g5wOz8BJ3Dhw/JH//we2R/OyoL77oLbuwxCIlwUc6BKuYKIpcunZMCfL76BdM5n625UVkNcGidogJ54sSJUlRUKP/617/k1KlTarMzilfJyOz23rvvIFP/Npk1a7aMx66LERER4u7hrjyWGX+jJ6a5R9ixj/GZMnq8CnF2DNbUpXNTwGqAQzJStHJxdZW5c28D92In//7360jAdFroDMhShHwnB5CUiZuiLfr8c4mNjVV/oXAeVAmaEC2sS+ejAAM0E/DMr14t73yD0yOqQwGrAg7vTNBxhrh0O5IrcdP6t99+S/Yga//l3Fz1puM1paUlcvbsGfW3atX3ypzORE0ODvZ6YzQSqNOVa3j2FVJYWNDpRqYHVJcCVgcc3p6gQxFp3PhxEh0dDfP4Ylm2bCn2lE4T+ujU31OaIld+fl7dnutfmgKaAh2OAjcFcEgl6nSYgCsyMkJe/MmLMnXaFFmxYoXabfE89pRmCAQVygQb/hkSMnU4+uoOawpoCtSiwE0DHGMfCCa2UBwOGzpMBg0cDC7nvBzAXkRMsJ148qTkF+RLYUGhAh+tVDRSrfN98gWTW0us7nwj1CMiBW464LAT5HZoqQDuqJST3brFyIL585V4Rd8c/lHU0lwOqdW5Cp85lcYbN26QTz/9VLKzszvXAPVo6lCgXQCOsUfAHZMIxWNubm5ItO0p0dDzaJO4kUqd65PPlVxuGsRoe3uHzjU4PZoGFGhXgFO/d+R8OBl16bwUMPrhlFdok3jnfco1I7NKaEPN7fQ3TQFNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAhpwrExwfTtNga5MAQ04Xfnp67FrCliZAu16Izwr06JFt+MGbvhfxEZjdosIZ+ZiW1tbszurKvriels7OzO19CFLKXCtqkpdyo0lb3bRgNPCJ2DccriyslKuXr2qdgatrDQ80BY2pS8HBYjXFeUVUlpSij3ma+hI+hYUFkpubi72nde7r7Z2stjZ2WILZXtxdHQUu2rgvpnAowHHwidpBJqSkhLJuHBBzp45I0lJSXL58mXDgiC3o0uLKUC6VlVVyvHjx6WsrMxUPycnR5YuXSaHDx/R2z2bqNLyLw7Yr93Hx1uioqKkR48eEhoaKm5ubqqhmwE8GnAseIZcFHzjpqeny/oNG2T1qu8lISFBLl68qBeDBfRrzSXnz5+XxV9+0Zqquk49CpC78fPzk759+8msWbNk+vTpEhkZqTgea4OOBpx6D6f+TyPY8A387rvvyurVqxTQ1L9O/9YUaK8UoOh/AVw5/xISjkpcfLw8+eRTMmTwYICOLURZ6+l2tMbTglmSnJwsb739tixZsliDjQX00pe0XwpkZWXJyhUr5M0335DTp09Dh2ZdCNAcThNzg9xNIRSXa9askXVr10hpaam6msfd3FzFy9tDXJwcDdaqJtrRpzQFbg4FbBT3cqXsqhQUFOGvGN24hs982bVzh3z3XU8JC/uReHi4W43L0YDTzEw4A+Xw99+vlEuXLqkrCTZ+vj4yZGhfmT1rtPToHiJ2tniwzbSjT2sKWJsCnKvlFVUwbmTKunV7Zd/+BMnJyVXgQvFqy9YtMmHiRJkwfrzVdJEacBqZBXxYV8vLlfUkMTHRdJWnp4eMGTtEfvubx2VA/2gc11BjIo7+0i4pQBXNuLED5R///EJWrd6quB12NAWqgriDB2XUyJFWUyBrwGlkilC2LSkulnMwfWdnZ1dfZSOBgX5y57yJ0r9flFy5UmPGbaQZfVhT4KZTAO9O6dMnUubMHSOHjyQCcM6oPtGl4+zZs0pt4OPrK9dgiW3rYl2NUVuP5ga3f+XKFeVnY9Ti0+PV19dLeveOAlta46R2I2/LyWFna6f+yGXdiMJ2DG227nEb67e2P8b6N2pcNe1ZPh5jndaO4UY8h5vVBjkcTqXoqBDobAJN3SivqJCioiKhb5ntDZprpsYb+aI5nEYIw8P0IL4KhZuxQFUj9nbw2nRyAOAYj97Yz+LiMljCLmOGiAQGeEM57XLdCr3S0qvQQV0WBwd7cGg+YsuBWFiqqq4BdAuksKhEAvy9xd295f0pKiqF7qBAKd2dnJ3F388L43JGPyCQtpCOlZXsT77qk6eXu6JRcyBiGEM+PJdL4Y/iJZ4eLhaOvnNdxufv6OBQMygQv6KyQvmY1Rxs228acJqiL9elubXZ0lXS1D2qz3HREOD27D0BE/zX4gRQe+ShOTJ79qjrUuiRq0g8dRZm0K8lJMRfXvqfhwAaThaBGPtUUnJFli7bJtt3HJLHHp0n06YOsSjUgC9MjicpOUvWrz8gO3YeApBmY8H7yuhRA+B8Nlxie0VAd0BLigUE4qNAowS+LxdvRJ82yy239JfnfrAAb+4gqYS3srnCOqUYw9ffbMEYDstDD8yCsn9Uo9eba6OzHCOnzv9qFzW9+bCsVDTgWInQzd3GFkFF2ZcLYTmIl927E8TLy03CwoNlwoTB4uJCjqruRGmuvdrn8/OL5djxJCzWK3ijVSn2udKC9shmV1RUSlraRSjPk8Gl5JsNsqx9r5rvNgps3njzW9my5YDirAKD/CQPY/zk0+/lyNEz8rOf3if9+0ZL5TXzYFHTluEbaVRRXikpqVmyf/8xycrKk6BAf/nBM/MapRHHUI4xpKLOcdAg82KeQb6o37j+bRUKdAnA4WK1QwAbo2avZ+G25RPhmycpOUOOH0uSnj0jxdnZSQ4dSgR3kibDh/W0iKtorH802zs42CGIj1HZjV1l/jiDK1mHnIKlohivvXKlXLZuPSQ7dsTL+HGD5aWfPyQRkf5yITMXHtvLZcXKbYiV2qa4nJb6ntlgPC4uzpKXly8rVmyDtbCbTJs2VPXTHI6q/mN47JddS29mniz6aCspYLnWrZU3uJnVOMGoGGOwJQMt6binFg6Uv/xsL4V9KS4pk4NxpyB25Mr8+VPk1pmjJBccxQ6IAVXQWzRX2IZSytpD4cw/iFI8xsVpSeFCNNVXSmtMDQvr1m+fnEgh9CWJp1LFwdEO8TujAaIhUNhUSWREAMY2AgGFnnL8RJLk55e0HARADl8/H8QGxUpefpF8/sUaPN8sdLf1aSw4HerQoBYN64+Pvw30rkmrYaJ/Nd2N1/C48btqv97zUSer/zG1UX1vXl9dvfZlHfp7p+dwKuBLE4/YkQ0IuhwwoL+MuGWEClzz9PAQOyjQ2gPXwwV64UKuHDhwUjw9XWXWjBGSdSlXli3fBvHqqNy9cIoEB3lD79DQMmaYkAaOIi+vUPlYkIujJ7SPjwfaQ2RwE7OWk5y6lnx4ol6GuENQdkKwnze8qL193FtpvbimbmmPhYMlo3RQVQAbwuY1KKHZPwdwnEybYCiWgWLNSrsmHu6uMnXaSMlIz5Tt2+NkyVeb5IfP3akUwuboVFO34TfSoAq0LYQbBGlQVFwC44CduHu4iQ/o4Oxs8CY3csfkokivooIS0MhDKdJLS8uRSiNfyq6Ww5IJuqNuOeYeSc9xXr1aIblQvitvXzTgwbYBus7OBiUunwH1U1TQl5aWKQ6X9+Z8sHa8U0MK3bgjxid+41psRy2phefuLj0Rlv/FokXy5z/9CW/aXjIR3pWjx4yBebs3LCZ+UNA6qZiSmwE+nOzkwqhjOX06VUaO6is9eiKFgLuz9O/fXeLjTkhcXKLMnTu6AeBwMtNqk55+SXbuSpBdu45ALEvHgq6S0JBgGX5LH5k8cbDSewB1zD4ZWpCOHD2nxJ+jCWdgzcoRL09P6de/h4wbMwB9CceiMV/XbIM4WIUFxYUS2ytSNm8+KPv2nZCpU4bjmIuyFB06ckYB46RJQ6Grcm2VApeWp8jwABkDep0Ap7RqzU7p16+bzJk9Qi1ygoIlhWOjFS/hGGlwGH4qpyQz85KaExHhIXLLiD4ydvQA6dYtRImldPTkvbdsPiSbt8bJvNsmSK9e4bJpcxzof1QBzsK7JsuYMf3lwP6T4DZtpRdE5IPxpxQtzp5JVQAfGRkiHP9k/Hl7u6lnsGVLvMRDjKZy3cvLUwYNjIWoOEyGDe2FfDZQCVg6KEsGfpOuaZeAw0lQP6isqpVOSXxI3bt3l7vuuguTKkH27t0jR44clm++/QZWjhEyaeIkGTxkiISHh4k78oQYdT3W8ksgd5ONt9r+fcdVXphpU0aoN2pAgJfcMry37AaIbN1+SKZOHYYJz8jemplChudkYpq89/4K2b4tDm9kFwCNv1oYl7JzZdGitXIQXFN/6DgMpQY4SGOKPSu/3yMffczQjVzQIFBZssqvVoKzOiL79h6T8eMHK2VzzV2b/0aaOyPGbNTIfrJ1W7wCwk2bocsZP0C2bTuCiPs90OcEyZw5Y7GQ7JRiuvlWG17BMYwa2Qcc4DR5G5a9zxetQahJqPTtE2WRIpr1i0CD9RsOyseffC/JKRckJNgX3KSfAvfTZ1JUOMCWLXHy1JO3y5jR/cCZob/XKqCLypETJ1Ph13JKNuH85s37xdXNSfr17Q7AdpfMC5flk8/WIsfPVRkypI/s3BmPAVSBY3KHfusqIrZPyv6DxyUlJUv69I2RZbAEpqdnKe4oONgPzyNfFi/B88MLh8r18eMGNCRABzzSrgCHE4Bv4kr4BpTCIYlOSfwrxJ/yh6lZLy0iNWVhe/gg9OjeQ7lzU2xQCbTOnZMN69dJ7z59wPVMwltpjEpSxNwhrmCD2R/+17blmiQnX8Cb9QxEvWAZOqSXGj8XbH+8sYOwAA7FJ8rpMxkycEC0SXnMMaVn5GCRrZMNG/fKxAlDlYm4d+8ItShy84pk08aDsuRrmJCXbgUdS01KX+p1KsoqZBesYR98tFwqYcX56U8flLlzRmPCu0s5ziWeOg/dyDrZDAtTOSxDLS0Uobj4F8yfLK+88oX8551v5cDBRKUId3Fxkscfm6u8tWkFa20hJ+eGRX7rjFtggToHLuOALIZo9eILC8UHVr6mRCtOtQrEGW3feVTeRt+YwuFHz98NEBwNwPFVIta5cxdk0RcbINpulTff+kYByZAh3dFdG4hJdlIOMYk6NoLmA/ffKrfNHQsAIsdsj+eZrOZPYmIy2hKlk1uwYJKE4nkymHLt+v3y2mtf4vmtBtAHgxuMkhf/70cAy0glzqWlXZL3P1gh3367Wb79bgvUAd3EF2DV1JhaS0dr1msXgMOFzbciwYUKXgZMHj92TM4CEM6npUl2TrbSA1zP0iebX1RYVIe2lNsZtrBj+3YVU/L111/JqFGjYIqeKIMHDVJOUcxG11aF46ayOA6AQme/mTNGwpHNS73xCSjR0UGwUPUBF7IdItOR6tgtTHfUo6l33/4TAISDMnxoH/nVLx4C2x+MhVOuRBQ/AMf8+eOVLuYvL38sF8HBDLTroYbC+tQZrVi5E5aeQnnxx/fJQw/OQLtwBEO7FAMGDowRf/+F8sZbdvLZZ98rpz8utJYUAlsUQDQ6OhT32ionTyYhWHCY/M/PHoA5PAqil4G26E4dzq0l91CiFbilhdBznYa4sn7dHizaGADdeBPAmmuP6TYJ9MuXbwcNCqD/WSgPPzwTYG2rABbkRzhAhPzoh/NRvUrp05bhWopPFG+Yh5lcYUVFuTzzzHy5/96pCniYDpUAo8zx+M7wl+HDYsEhzRVXV0fVtouLo0ydPEyOHjkrb//na4BUgNx33zS8bHoo4KNPUUxMkMy/c5J6EaWmZkpW5mWI/54acMw9TEuPceKzFEFZR45jOxb+5i2bVcBkVmamAhlL27re6+jenXjypOrHmtWrseAGIiJ8GHxPcq636UbrK2UxzMT79ycCGFzBpQwGJ4aZXk65yUYpFYcP7yNr1u4G4ByWBXdOxKTzUOdyARRxsGpVQv8ze9YYTNBgsO/lpnsROPgWHgqT+uRJwwDiKaZzFMvOnElHMqaz0ht6FlqNjGDDiwj+XDjBQT4yE+d2wOmvuIipDSwrfKw0i9PqtnjJRsTrnIdYG6HAjCJfSIgvGqLy2AA0vB/HWz0dLLtJravITVHPcffC6fLa61/K4sXrJDY2Agmmuislda1L1Vfeh7ov6s0Sjp1VurJZt44CvQxgw4vYJSp6/fECmDtnHMaSqHx/zpzNAGcWrdphv+m8OHP6LUrcbZh7+ZoKJRg1qp/yrObLgIXZI6ksDg31F18fL9AmXHqjv8ztrEjBa4BaVPoHBvgAGM/DGlfQJICqhjvAPzeNwyHYkPApKSmybv16yLBLJeHoUZhJ828q2Yz9Sk/PwAR0UP3hMcOiuHFdY5vkUo7Boe7EyWTI+T0lPCIQytQS070ISPSi7RYTJqcTUyGOnAEADFfsfh6c+VLhkMcJO3Ag2Xwu2rqF9PX0cIUyNQaT10vRm1dwYaRnZCtQ6NevO7x/PRQY1K0NCAD2RUUEQbkeDQvasfqnzf7muAg227YdlnffXy6X4Stz730zwJ11B/hsgK7jAPQkfhCp5sCXxlGywNmdT8uWIIBbRISf2TabO8hnw7aoYCWArF69SwFdBJTK1IXVL+wjRajTp9OUeDN0SG84JnpXK9frXk1DQnR0MLidaLwQD6HOeQU45Kw8oavp3ZucYEP6GeaLLWjrDZEpyPRM2TpBhU+LekpawgjCrq5OStle++7kEOn7VA4uqgx6H4JyRy83BXD4wKlHORgXJx99+JFs2rQRoo0h34yRoLzG0clZXOHg5Yz4GzvEMLW64DnRHMt7Xr5syAdSuy2yx26wZnl5eUkYkkwPGzYcC3umBIeEICTgTSg9d9a+/IZ8NyiL85VillaJwsJQ+fKLtQAF8OPVhZOtqLhUStBvikTbdx6GuDdQvR2LwRXm5uRhwroq0ywXQP3CiU3zLkHH1dVZ0YCsfgW4ojzEI3FRhIQEKL8dvl3rF9KM+hZaUQyl4T3q12E/4uJPK7AphAj7s5/cD8vRaMU9uOCtnpWVA4X9JqWgnj1rJJTTxxGqsEluv22sPPzgNCUO1m/Tkt/k6MKwcO+aP0kSAc6boMTtA6B84P6pWKd1Fyp/lsD0TJ8nOkWGhQYqbrC8vIb2xntSFPfwcJbw0AA0YwvTdj4An26acD1wdQEH5K38l8yFVpDWTtDFOTViYSL9mW/YHSZ+zsHGStvrERu7840/fh2ruHWdIZBQV0O/mP+88x8suL11RCcH+MZ4eXnjreGHxRCqfGaioqPx0A2iRGvuygefB85p08YNykrFBcdC/wiCTEBAgDKXj58wAeLHJKU4dnV1hdUipTW3s6gOJyytIocOn1YLP+HYGYg4hrQBdRrA4qiEcpMlDhaLlNRL0FFEQIleroDI28lTTXhzHA7rkEuh852KWeIBFIJaKSLhqZynk2BTE5qgR8uMJYWOg7SurFq9G5xrhjz77AKII2OwQCvBVVUpTozK4ldeXSwffLhSgWBq6gXJyMhUIp0t4qpq4a0lt6xzDUWrQYN6yH33zJBXXvtCvobCnNxd926hDZiDygoGLkJfhYXu6PT/t/ed0VVeZ7ovNiCKOhLqQo0iCRBNovdiTDEdXGI7iUuKk0kms5LJ/XPn3pm71sy6a+yxk8lcxzWZ2I4dN7BNMWB6E0I0VYSEekdCqIum+zz76Ds6ko46Kudobzg65Wt7v9+3n/32tyNHUPoUYfGDLw6zBVguCGQ4OqKdwZB05HzJ58DKWtFiXPb0pV8BhzeOKzNTdr7xxusoAXLFTEtOfldXlrMIUtai5cuXQ7aeClZ3vPKJMO/YzQ+8JkWL744ckd1ffqE+s0wGgcYHHMws6GlWr14DE/kcXMtLLYYEJCbfolK5Lxr7VEtlMfQChYWlsmnTMvhzLLR6Pe5bWVUDS9MpgFMaLEsJWLkDZDSiyN1cndHP+0qEaTOjmjpOXQX9TKiP4LnYqNuhOECRkcGZJva/6YAWb6ZYqjrFzrfYYPULAaMIXMO1a9lC0+4SmNS5cLMPbFTILoNfUMWtavnDm58DeD5WTnO0NIUic6LRP6sn78KPHAeteytWzJIk+Obs2X0MVqZD8jJM2pYcBHZTCl6KMYwtq6yss0p70yUh0oDGlfBopr6MHEtv+9mFodjtLv0GOLxJrDt04sQJ+d3v3mgBNqNHjwaL7Y8HZaU8+eST0GfMhNjgoFYTggXl7Z42XjcnJ0e+hN9Nfj6Ul3ACDA0JlUWLFysfnIiICKy0o9UDR4uD0UxT0/j2cN8NZTEd4pimYevm5bISk+QB/DtaN66gfODr6u5BBE2VE8fjkQBskbg4OYKdd1eWn0I4qoWEMM9JS4sax05RqQQBi1VVyHkCboUiAsHdA1Hb/J4FR0Eqm4cPbxu1Te6IgZ9FsJCY6NE5VeohqtTAY3YsxTiMzRLMqAglIDy2Nkby8kvkrbe/xL29C6X1Alh/4DvTxMm1pkF3vlO08fZ2hWi1VK6nZSsvZPrV0APYaAQcior+ftStDJP0DOhyAKqMN7PsL/enl295eRVCJwpAr0YEi7ppwDEI2YP3fgEcPvjkFljL6Y9v/bEF2FB0CYcfzA9feFG2b9uGldcZEwxKMos8ND0YlzqE1+W5UmF9KigohI/FBigWVyvTNx39uOpxe28Arbt9U31qUhYnwXckMiJEIpA9sKEBIk47HBU5knAk/QoJ9pc0eCMnJmYq0YF+LrGxCcqfhlYachDGOTB0NVnozxEXl6w8e41VnhMrINATHKWjJCBqOx0+PlOnTlCiLScjG0UNckUJiTdgNczAd5ywaZtpj7Z/OVmd4f9CICTnlptToryBhw27j4ls2p9ijyvCLWJiImF9OwclbLaMwuTv9ORtL9fuLxR7ImBJ2rVrtbz62ocqlQU5umbxB7oTiO4EORfopy5cSFZ+RzOiQlrQQAE27lVCQgYcLLOh1Ia/zORAXLdvON92B2RHG7CG9U8rQnmKjz/5GCbWU+ZVhGATGTlVfvnLv4cfyPewIo5Vk7/1KtObHjJswBNi2a9+9Q/y6quvys6dO8BN+akHi0DzMK/VlX6Su2G8Dr14eX168o5D7I0BFNbOQeVtQOB4TNII+IxUw3v3slIcz50bAV2XG0TUM8j1kgDxCjoZ6FEIUIKVm+kb9n8bC4e7VCg4x5hW5qaJHxbqJ/PnTwcQl8FZ7ju8l+PSJoc2nqMBYBMfnyb79p0FWNVA14O4s04Qh/30hBKV4EiFLGPBcvNKsdiYlNdUYJNbS0jKgl/OKRV7xJijuPOJiPo+rSxGnOS9bbynI8HFLYX4tn7dIuiVbkF8TTadFuc3wI8eyQvmR6nQkL/BfJ8DcDb6ShpQEX316g3k0jkKTq8aIuJM+BWNV5x3b/s4VI/vcw6HDxBFqbNnz8BcuR/KTlMeYGrnJ06cKK/87GeyecsWWFAeICq6pUjQ25vCB280LFyzZs1SKzYndX9yM9b6zxX+BthzgoAXPFrnw0ejsznGftPSNHvWZOX1Gns+AWLiSjgnhinrznvwSH31tY8AGmsQQW3yVC0HqJ0CCNF/h2klqR/jBFJKSuhUGGC4Dr4nSfDF2QvHQopVax+bC/BzVJzN9ev5iE86qyZrdPR0TMoixaVaG5PxG/vpCg5n+fKZiqv6Fv5DNMGvWR0DMcdNTdTr8GD+Zt8ZSYWOZenSWTCXhynl7kfwaqa4QtM2/WEMUDDObflOzOS97WixoGhFn6UtmxerGLW9+0+qsRvHqO0ezmo7FdwHD52DE2adcr709UFoA55F+tx89fUp5a+0ArFg9CRm1jxy3zwPo/iN81n2j59JZz7TjLjHLbfa1Dlo8epgsNzW0XarJx7EP/Y94IA1L4QTH4tv5TRZfQhCXl7eiG/aIZs3bVLk6UuiEsgeNpj15J5y3BRT8tSqf18WLpgudNgzrGYdnZMizmR4uS5aFAV9Qj68sAsA2L7g2GD2BWdCF/jX3/hIAdMj0DtQUUsRatHCKJjSZ2PinFA6CCUaYTKwRUWFIgxgl7zz7lfy3eFz0HdcAufERwKKdlhxGFbxwx9shEXrDkDplDIPdzA31Dn5h8D4yk+3y7vvf6VCI+g4SMUwz0tlOfVWu2BJevrp1TLewwUhA2PlfcRzfXvovEyaHCRh8Ji2npQLeY0wNi84wwXCu9gJAaIdPTcUrSYi+JQ0ugkXAjYGleIg9ZmwMB0hA7/4u13yLkD7FMbP6Hx6BLMx3SvFzyc2LpbvP78O1/QEXU3Kd1eIaL6+43A+w2Wg6ZTqrRGm8BFwq3BXXCWdOcn9WTbycYy5og8O46vYLMfC/dVYoXyvratVOifL7eoAG/zTp4DDCXYXYkNiQiIihs+byUM9zew5s/HQ7cINHdGlCWc+2IY/8IGhiX4WnPz+9/96QcVOWVNUWhsiV2QfPOA/+fEWKYEvC4MfCV5ecGyj2/x8RDTTMS019YbiREJCAhHwF6UcCnFJALwzF1v14BriGzmJefPCJQigd+ZMMriuZHBJDCB0QRqPaSpYMQD5a4ohHrFKhT9c8DsDR46RY1qJVKThCF+IRcpUBiDm5RYCaKCvg6Pc4sVRMhUTfST2IyishRKZIQO0xjnAhG/0rzUdyB3SJ2bbtqWybNlMlRScnEh7TdEbg6YHN0WhKsSTMdTCOD9xB5gss3E/Av/pBRUqcu5cIlwPCsAZ03IWqBYFmtqdnGDRAofIRnouWTodTn8ByhrHe2PZOCZ6U7/0wgbuDedMRyjrW7E5OMfChVNVOApzRRP8LBv3d3cbK88g5KEWnFdwsA8WgZbXsdy/K59xSSvN+q9WdnwoP/Ut4OBu0nP43LlzUCIWqQ4ThGjq3rRpM5Rw8CfphQXqoVCgg5PQmsPUFUaj4pten/Sk5UPXk8Y4pVDoTzjB+GAaD3FXzkWAmBjmpzgd41imCmVsT0z0JLwmg5MzPdg0UbOPRuBl1PQQrKAmJbpxLWPF9MUqumvnUtmxfQn6BN8UcBF0cmNwJPvnBxf8AESS04rUenIZ57J8N87rj+Mm7FgKY4DpvNRfsV9sBC5j7PQSnoEwBBNndd8MCGpHiz8ECILZxDB/0ABcXBf6w74wjIAR87TKGXQzTstzkpvyBKf1xMYFKoCVNDD1lSpOU3yZ0Vcex2fYDw6TgfDC5jNhuY3beU2a3BlwyfNzrHy3bKSCD0Rqfz+Pds5B0/0IGFQmqOsRbAygtDxPdz5Tx0f/LaNxHPTDom6tv1qfAg4HUVxSjARYF0F000CpKA6D7mYpnOz6ys/lYRGPpnlPD6Z7GKGsWexvGVjzK1evQxwJUXqhNitXFy5OttxYmLsLXJYcRvOxnBQtTeoPWn5VuhR2rfmY5o4yiPIBlLlGM3ENzas5v5t+s368cVzr99bnVd8t+tXcF1PslnF88+/GLy3fu9+flvSxdn7rfW2mSetjLPdvvc3U287H1JVzWN5X69dpSRtr3wie5IbT4IFNdwSjjcRi6gp/tDHgPOku0R+tzwCH6ElHqby8fBUvZQyG4lR0dDRYfK82k8TYZzC8U+E3dsxYsLwhiIfxgC9KoepWcfFN+QIOZZMQtMdcuuQuBFHWrTjiwTAE3YchTgHOQT6YdTDUMMnaV9+cREL8AjNV3N3clF+ak6NTv+k4+wxwOCqKS/l5uRCrTAo7EsDZmcGGUWDbaXa0WO7MZBgcH8gWj4AljVkBw+EcWAJOjRxONaKm42Al+ud/eRdWi8USFOSjWHUNOIPjvuleWFAA843PbDbSWzCglf5G1AexDQfXzsR0tOD2px61zwCHkbANdxqUhcqIACfIuLi6YJIiFwoIMdgbuRzelA3r1yN1RQoUqqbV4TZSBZw4cUElk2IqSKWX6B+OdLCTTPdvUFGAgHMfyvIahJOYFn12jwu/t7c3FO/LwaVPU/v0V7f7DHA4APpgVFVWmcdCVGUQposLfDJsAXDA5Tiiv2vXrlVJwZigq7TUiGpvhEMck2JXmsenP2gKDHYKEGw8oJdcvXq1bN68Wc3H/pQ0+hRwqNyjD4HR6EsyCiknmG5COUUZGwbxO/13aE17+eWXVS/379+nQIdBqLppCtgKBQg0DFpmZoSVK1fJiy+9qPQ3/Qk2pFWfAg5BxTCRmm4MEwqx/pHJNGorN4tjoGXtN7/5jYpg37tvr9zIyFCR77xhLcdoK6MaPP2kNEoP9Nvghh/AgsdGdwRyw9QvaPoqkvToD4GG7h2OyPcUHBwia5DnaT1UBL7IlNDfYMMB9Cng9IhCg/QgcjoeWB2ee/55ddOSEIiaAdChPocmf95Y3XpGAYrXCcj2yNzShoGBvlpMbE8roWGS79nZh+5RBGqmIHGGnjEkOFjFLfqgfNBAGmw04HTjeTTCI7g6+Pv5qSP16tsNArazKyP2P/jgA5S0udACcJ5++ilZs2YNrJ3NzmrtnEL/3BEFsBhyOaShhlzNQHA2Rvc04BiU6Mb7QN6wbnTTJnYlZ0h6tvZgJpAzlSc9gwlIutkHBfotPYV9kEuPQlNAU6A3FNAcTg+ox1WZfkYMxNStdxQgLcnqM/+MZVM0Jp3xotJTt95RgKELbY04vTtnT47Wd7IbVOPDz8bk21W3bsGqUin1qKiglZrdIKKVXal0z8/Pb0FHVthIT7+OsBL3NoGPVk6hf+qAArT0sSqJiptqSsQ2ULpHDTgd3CjLTcZKXFxSolKkxsdfgPfxNcRYsWCf1jFY0qq7n/nw3wKAV1Y21ySjV/ebb76plMkDNTm6O47Buv/o0WOQziRAFXeMnhOtwnXo0DoQSK4BpwtPCcGGikvmZP7kk0+QHe6gZGdlaWVmF2jX010qKiqQTrXZHb+n59HHNVNg7zffIN1FhGzZuhVxgBuQCdJPZQ8A3vdb04DTCakJNrSiXETRvtdffx2VI48iAE57GXdCNr15EFLAlJvqLDI45KJWeZG88AKTwAX0q2OlBpwuPBiZ4GbeffddOQTOxjJhGBMXOSBnzvCmpFJdOJXeRVOgXykAfbzSOTIPsxEwnZeXJ59++jdVm+2ll15SXsj9JbZqwOng9pO7YczUURTRO3r0iBlsGJ7BWt0T/Lzx8pIxyCinm6bAYKQAvbhvoV59Vm6RFBSVqMhx9jM3NxcVUvehrPVsWbpsmTTCENIfTQNOB1Sm6ZvWkyMAnGKUuWEjCHmiDPHaFfPkBzvXShTqFDkiRaZumgKDkQL34TxZhtJCh89clfc+3ifxV5JRr94UUJ2Rno482CdkDhLijUExyv7gcjTgdPCUMJSBVTvT0q6Z92KVztlRk+U3P9kpk1GzuhEpOAZC22/ukP6gKdABBSjtj0c5nJ0bF6lCif96q1ISUtLUEeXl5apIJPU5YahI2x8e9NrTuJ2bRU6G/jW3kLiIWf6Mxnrei2OiZBIy/TUyxqc/VfxGJ/S7pkB3KIDwEFRylugZk2Um6oAxcTobdTplZWVy82apyuLQnVP2dF8NOB1QjiwmUZ9Z04w2ZvQo8ff17L/ocACfsl3yvSett8e3vqZxvta/d/RdHdPRDh1s6/H1ekivDrrSJ5t6Mr6edASi1Tjn0eLp5gQjR/O0pxGEhSq5wPZH0yJVJ1Q2PQ/NN4Of+iukgcxTA/x/7oLT4jUd4DE6vBu5hO7hIWu4yyqRKK+Ch8wBVSN70+gez/4wf/Mo9KXFM2p84cUs2l2UcrmDHDcjYNEb2bSyWmzu8KPRf459FJLVm2KeOzxETMfcBb2s9LHjQ/t96z1wGCzdwvI1vLfNT1nfdOVR0PFRZVFtvpIJaJq/982Vm8/auyew+Tz608OmAB6OmroGiU+4LhcTrok7RLl5syOgN/InL9zp1Tjx0m7ky0kkfGcjKx0zfWKnx7W7A/pDa8f5y6mqLPDyBVHi4jhaiZQKiCheqkneDAz8PQ1lja+kZkrkpAkShZrjXRZBcb28wlI5FZcobi5Oshx10MdQOd8Sz1p2F8cUoKoGj3F2HCvLcIzjmFFdv2bLs/XtN46vqEzOXEgSDxQeXBIzFaDat9ZORbqO6Ne3I1Zn14DTD0Tu0SXwQJbX1MlnB07Lm3/+AmKct7z87Cb59Y/8Os+ahhXzVmWNfLr/tPz+vc9Vtc0XntogMVGTej75cM7Cstvyp8+PwrRaLVMjQlGqFmVzgX1lFVUSezEVw2zEJJ+BSc7igRRHH8iZSyny5kcH5NktKyUqPKjr18f4E67nyL+//bmEBvnJjOmTVXG5DgELVsWkG3nyH+9+iUJ1XjJ96iRxHGsCxR7dg748CH3l+NjXKWGBMmvaJKTeBaBiobDnpgHHBu4uo6WrAT5xmLyFxbckADW/O+JyGsFqZKLgWSy4kbr6elVit8+GiYmTU3JL3v7kEC7RKFHTJmOSN3MVNLeOg8+Smvh91gl9YluhgAYcG7hTzgi0c3Z2kuz8YolD1c8A3/ntAw44g3p4lSYm35DMnELxBWc0HKkf+j1pPRZqOkjOjAiR4ajvFR7i136fB+IegE7NDZ3tCmOhDunJcZbH4Kqt9FzN/bD/T83qavsfq82O0NnZUaaGQ/+Ckq3UydRBt9NSY2sxNEykwpu35Wx8KhKRj5QFc6ar9xZOXZxstFS0mHQW5+BHbgNgqFerTeav2GcYzvMIFMJU7FIByc/DUAOdWlsquMNRC3374/NkGmqpD4qGPlIDVgedU1VdvdQCnFU5dvzeLj24DWOhXqym/o46rqa+QX1X2mlut9bUcY9A6f8AxzVINV4NUBI34vdhON9QbJrDsYG7PnLEozIlNEAeHfZAriany42cYihhAwSOQm16T0VtakauJEBRGzbBR6ZNDpJLidfMCzgX15u3q6QEDmBebs7i4WotTcEwqcZkzC0pFwck4Q70HgdTaptLSfntGskpLpPE69lS2eSrdDX1BgDPRTwBkgFe7nCnL5esgjLxxzmC/MYN3OqOSX4HE78YteGpTM/KLpDbVVXKw3ZCoI9MCQkQXy83jBcDteRAgAsEpRwoeNOgc8krKJV6FHgcjXJHfj6eMmmiv0zA+2gqfFscB6U/ACYr/6ZcS8+RfCjACfrjPdxkEnQ2oXAabRfg2pLabn7RgGMLtxIPqj8mr7vzKPnwy8NyCpaNCABOmzUSq2ZFZZ1cTsiAWNUAB8XpSLrkCHGqSV7gpEPunhPnEuTLb8/K9vVLZPPqmJYThfSA6TQtu1D+35/2iOc4N/ntT58UZ9ZQNzdcGae8CrHtD//9lWTnFko+JiTbv7z6J1iIRsvjS6PlJ89ukFNQJn+w57hsW7tAfrBtedtrmc/Zhx/Q3TpwFldSs+RvX52Q81dSESNXi/gheIlD3KT3eFR4KLxxl8q8mZNkNN0HCB447nZNgxyLTZTP9p6Qaxk5pni6RpY/Qrkj6NYiJgbKs9tXyZLoyGbQAZ1vQpF++PQV2f3tGQBcHhToMNXj/oyEeBmIGLx1q+aaucI+HPmgO7XlUzToOqc7ZKIA8WIMFLGTsZp+iQf4FMSqHesWi7sLLDAGmKhdqSwulnNQLnu4O8vSedMVp2NJR3JA+eBKroATWRA9zfoqiwlTUV2rjvXxqlHWJstzGJ9ppvb2dJca1KsuA8eEboqXp5u4Oo0RV1cAHb6zzGwBVvfbsJr1tFFUexSi2zBwH433eVbrjeIdXwopjF0AGg8eDJPEtBx5AxahK0npMicqXFYsjBKf8a7od7WciU+WM3FJAM2b8quXtgGoI4C5Jo7oZFyy/Of7e6QKAPU44udmRAQr8/wtcHencMyxM5el6s/14BRdZPbUEHXVaoi8X393Xt78yzfq+7pV82T2tFDlB5UPju/Y2Svy8e4juEeucheOpf3l12WQZCDfNeAMJPW7c22sjqHwwaGIdCU5Q63Wy+dHYjY1iVWYIA3QSySlZksmOI6Vi2ZJ6ARvuYwJ1qJx8kI3Q0e8jgoSchIMxwQfQX1Mm4ZJj4lMM3sM4sriUzLln1/9AHs1yu/+5WcQpdzwkU5t7FsTWOB83W3GEbcrq+VySoZkFTlCjOwAcNDXa5n5SmluVpHgujcrKmXPgTOgTaZs37BMXnl+I4DSVfWNfV6/IkY+BufzLoIbP/jisEwM9hU/WAKLim7JviOxUl5xW3750nZ58omlMtJwnkTnVoHG/zF2jHx1+KwCrVmRJsC5mJwpf/v6uPLo/eVLO+SJNXNRcRYmb9UaZc2S2fLOxwfk60Nn5CZcDR5RINm02c7fNODYyA2m/O+DILxoTPJzl1Ll+LmrsmhOuIwgHnAOYmIVl1dJHMQFehSvWDBTHEY7KL3BoBxiewBEUcbcMKvxNeV6lhLVRij9inlj2w845+2qWiktuwXflglqO7mjpOu5iusLh/jz/Z1rADYupqDbpjO4wuX/icfmQfTJlVhwhxcS0lF3bL6UAmgqcb458AFaMT9K0boRoQBNJxYPiKuRU4Jl37E4yc0rUqVu6hvugVtKhFPfTXly0wrZsDJaHCCimo/DwRP8PABeyyQjK1++zS/UHE7TfdBvg4kCmIijIf9HgsMZP85FLlxJgSfuTQkO8MSq/wCSVaNcp1cvVtcQKEGjp4cNLjO0BS0Z7tAA/UkLyxm2k6sa+ehwlPe14KqAOeQOvCC6jXKgYtbiRK0/Yt/hULCXlleYzo3vd8FlZWTmSUVVjSxfOFPFElVaEe8c4XQXOSlQTkM/lnQtSzauiJbx8ADe8vhipQdzdxmLq+GE4A5ZL+vuvftSDVHyZvltZICsldr6OiXdFpdXQteTL67OY2U+PMPHjEapYsXpWXQW9yvMfzz0ReFyGuDEFBJDpWkOx6budCMAxgv6gEly8EQcfHLSJDhwvOJuKmvq5VJCmtxC6eHNjy1QIsOgq1gJQDGFO+TLifOJUovKDJZtHKxmc2eES/hEWODQUNhEvUdFTpTf/fPPxM/TuZXOSm1u/gMxcf+JePmf//d9ZLmD8RvXq4WlKK+gRMpv3VZK37c/3GvVJ4mcUEpGHnQ6FZILPVg99DC0sgU8Ph9WqTvKm/pmTpVUwMJXX3cHIHMHXEypHD19GdkEaoFD0B9BjiuvqlYiHPUzgQjyNSvsm3uJgTXCVWGE+HiNg3/VWKv9sdzdnj5rwLGlu4kH1QPK2NmIiTp86qKKGdqwHMmTEEqQU3BTYq9ck3HYvmTeNFU3azAO7R4wJBWc2MffHENZ36oWXQxA9kQXVzcJB6fBSfkwWj39bWrrURGiSs5eSIBl7XqHXJITYq/cAAL0nbmHPuTnlSgR9kpylhSVlEHEqlLWKQeYxR1GQg8GvdEYhE+YgGWYVIGTYr15b4CJ0tu0NwwAnIPDcJjhh7fh9B7GuAfrOTTgDNY7Y6VfnIPMo0xdREigNyZPhlzLLpKpyDqYlJYNnUABHLrVF48AADQiSURBVP0i4NULJ7suBHhauYT5J4oNrUUe88ZefBiBibZg1mRYaJ5TQaCWpxqLyU5xUPUd+z2MppwRcS4PmPd3QaeyKCZSGiEOtWm4HPellcvXwx3xZyMlC2Dz/t8Owap0GWKsq4p3CgmcKX5Qivt6eYCLdJMDxy9KOnQx5NyIZLSm8R7xa2f043bTcQ9nrG3GNAh/0IAzCG9Kx12CTw4sKHMgVn2w+7CcuZgsnrC4xF1OwWEMnpyJ2CkET1Kk6GkD2NTDo7YOClBOwofWMMF4Op/x7gi5gO6JOhHLRv8W9ptg+TCui0k/BvofNyh3R40ahQoFvrIGHCFchC2vavqMrpRDNCq4eUtckPPoAfpBcWn/kXMyb1aE/PrHOyUYeheThzCrWBJRCCrQn6G/aiQYn5uLs4xzd4M3eD1cAWolwIdxb60uh50ZalJVXQ8a38E5W223469DaKh2chfxoDvTUQ3pJpzGjpUz8MmJv5wml+GEF+g7HmZqhEBweW2ncWJwBaaikkrPNg0Tva7hLnxnbsKJsFo5q7XZp8Mf1NTrcA8CCrMl0nLT4sV0rQSbh9YaVQR2gL+30rEkgyOshFK3EU54La97VxoQHHvkyAX513//ixw5cUmoE0uFhzArcqyF2TwkyBsgDkU38gGptLLoJ2PWSuG5XAuRjfhIEBoHcPOCFawElrJrsHpZvRXYuaK6DsrsAoh6pPHQmYZDZ6QP7SEe+BPRY5W+ItOmBEkyRKlPvjoGX5FKmTdnqgoh6Ii7YeQ5Y7O4Kl+HByyj0M1xVTjvA7xuQJQ4fYHJtusgIliJaWhBgmZwY4UAlkGmgx5mUYu9BuQLusbsdmFBvrBOOcu5+CQ5eSFF7hDT2D+iBMbbiFdGfin8ac6BJvkm+mAbFc+0RpFbUcGv5mMQH4VTJKTnKjoxXQfpRG6QOrao8BBYxx7IkdOXTB7Y9LPBNUzXe0QawGHFXkmT0+jPHQDYI0OIxRkET8WAPIq2cVHFslvpKpZNHw8XmTsrXGXgP3LqgjjDAW0ZPIsfURndmo8hHFBPYKy0nICBfuPFEybfC1Ayf3PsouTD+awSq3R5VR18VvJkz8Gzko7QBi9PxD5ZNJ6LegfjXMYmJo4aDbMylapHz12R84nXJRsxR8Z+nekyjPNYe8fV1DWtbbP2m6l/zYTj9ymh/rIaznbM2/PWB1/LEXj6FoLToTK57DY8qq/lyodfHJGrcGCkDow6Jlbi8PfxgCXtjhw+eUkuJWVKGUSkCoBwXuktOX4+Cc59J+G1XS5OSPZVivPlYMwElQUA/qjwYJi8E+TdTw5Kyo0CRVvSuOBmhXyHCgp/3X1UiVze4yFaNnfX2pDs6jetwxnEt5ML8Ag48dH/hMpIc8Mk4gSfCp+cicF+kogwhajIMIkMg7K4he4G3sJYeR0QBzUC/ikGArDaxDqICe9+sl9ef/szicVK6wc3/was5jdyYUJGWZGFiMPKhjObpQKCfjLsD71tuWCrhr54IgB0OibY1ZTr8jucz8fDVdYui5YfP7NecRgOACTLPLpNR3b6Ro9oHsvrkRZdaabx4himJOVBEHPcnEYrV4Hi0grZfyxW/vV3H8P8Pkm8xzkj8rsBwae5kg7xZzo4k+9tXaXGQ6BbMncaMhxeQyjJVbkJszpTbdCpsrgMvjbghJwQM7blsYVy7mIigDpHPvjyqDy7baVMRoT801tWIAlatew+cArm+FyJwL3hsSVwzkwEV8r+MeThMsBZhW10cXxdocFg3kcDzmC9O5jIY6HwjIYJfBTAIgx6iBb6DUykkABveRqWlysTJ8ja5THQ6SDxlaEDwfEMXQiDNWsdJv8MAIJq+J0esnTTH4E8ugePn5eT5y4Dpx7ASW2UTAwJlO9tWS6RmHxff3sOp7tvCm/Aca6YuEvg3VwH3xaajxWAoR+eECN2bVyi0npeSUqDquMelKdjlW5iIsSZxxbPVFyGAXhdIjmuxwjztUvmwBrkDuVvJ05/PCmO8YNC+jFwM+5I+kX6qWvi97BAL/n5DzZJEGh25PRFeGpfQSBmAyb7cHFDHNTG1fNl54alJtAGLQhWpNkvfrhFPvnmhIrS333gJJTDw8TJaaxMiwhD2o3FiK0KkdCA8bLv6Hl5BArkBtBmOIByxfxpSDzmLJ/uPakU+mnpWehKI6pdApxx7FYcy+h0JjYfC7qr9KJkIe28acAZrDcYD6c7JvjTGxZL4/rFphXeABP2GdvHYfv3Ni1VLzUMy+34gfloKB7MRwS0Wu1xjGp49x/vJj99dp1sWTMPKRSKVByWG5J8hU7wBViMUXFQP9i+UshXqcqi4Jz84OH8o6ceM52DfwE2bFycQ+GQ+LPn14HDWqsmlroefl80e4osnDXF1H/j+jyos4Z9p8MfZ9rEZ0x95/6dHY/xR0J8ivj5U22PwbZAHzdwXWtl2+MLVDIzKsVHo1Szv7cHYqfGKU7QzCHiWuRIGD5CziYTeq3im2UKRL1gNp8AsXSsyrHcKE+silGeyeig6boAXNJ+NmKrIpGKgmEO+ci1TMDxGT9OOQSOZrVW0PSV76030aYr4+uMZjawXQOODdykrooT7Q3FmPwttuPhHwGkCERczwSAhUIN/KZ8VDARRmLCeLjBnZ+Ygt87bRZg1/p6vel/63N12g/s0O4xAEiqwL2h//IBd6FmOobWyABYiqJ8WTaOGy8Cy9QpAL9HgtRWpUDmvi3GzE0WcpGi2wNwLuBOg31kYigyHqKZzP5N1+MRFoeoHez8jwYcO7/BnQ4PE0dNAms7NnEw1jbZ9G8Aim6lXCXwQL/VKFYcBjsjRG+O7ezcNrjdQhNpg73XXdYU0BSwKQpowLGp26U7qylg2xTQgGPb90/3XlPApiigAcembpfurKaAbVNAA45t3z/de00Bm6KABhybul26s5oCtk0BDTi2ff907zUFbIoCGnBs6nb1sLP0LhtqHmYGqdTYjS/6faApoB3/BvoO9PH178PxrAF5W9hGInaKLvedNgOc6LRmrXW23doxffWbGg7+WOkrk7XfRboMBo6OZMoM3QacAhpwBvwW9F0HCBeZucVyDCVlRmDSzZ8TKZMQXW7pkt/66kxlwfpWbKMYcW3pro/fOtuuDuzHP6x9xRw8I1jtwaKGFvuZinw/V1OzZOqkCUgXEWQVlPqxq/pSoIAWqez1MQAXUoM0oQdOXJR/eu2/5V9+/1f5GhntmBi83YZjym5Xq/wvh3BcTR2Ax+BmeFDT9kPID2N1e7sn7psN9xB6kYLUEl8eOIsCeAXNfQXDw2Ttp5Bs6433v5Jj55Obt/VNV/RZu0gBDThdJJTN7QZwyEVyqLgr11Vlx0chUjDvTWFhuSnDn7UBIa1CHnLGvPfZd/LOp4eRBrPelKnO2Bfbc0puyXvY9u5nh+UW0mQ2J8Yxduqnd4yPnM1xpFh9409fyVlk0LPMMkhJyxHVFMYhwt2RaTs6wNl+6rG+DCigRSp7fAww26i7SIQ4cSOnUFYtjlZ6jPir1yT26nUJ8GcC81aR0U10IANkzprXDm3U9na2tfjZkjviho64qxYH4kuLY9mp1juYvvOUbWJM8RtX0lnI++wwejSqWPjieOvjNUmMhKem1p0+8hDLfnb3WOOaQ+hdA4493mzkyC1FQbc4ZKu7h9wsW9cuRuXJKlXu9iSy161bOsuU48ZygmDiDIMORL3wmdn9WPN6GDIGqt3wh9sewXcm/Sbo8LN5u0WqBjUJcTzzAbMYH7PnUcfCzIOqWV6XP2BfxSk1IQex5Q4Sqt9F35lqwpQxsCljIY8lPqAPjyI/M7MCUg8+DO/MpcxrEYGYIDECmfdYVG8YwcbaNXHu+wDmBiRzp87HlGFwpIlR4v7siNHYR77YmvpwH3XOGxoaTMeqzIojTAxfGwQ0Hab/ag7HLp8BgkE66lVdSbkhE/y9UOZksmSjSuQEFJq7gsoFLEQ3KzIY+V9M6RY4r8qhu8kqKZfEa9ko9lYDvfIDuZiYDhHKWbycnVU2wWzk8uX226guyWtcRHrM3BIX8UZplAnI/2uajygGV9sg13OKJA1VDwqRfIr7jkMO5cnITMgMgOOcx5gBgNtKblXiVYWUny5IvTlC0jLzJRXXKS4tV9+9UQMqAopfVh0di8RVrCqRX1oqBRAZc1BVk0nOM3ML5fTFFHFCPfVAJNRyQmWLvIIyyUKBwACU1QlG3h9zQ0drAYTZ2JaKdJ+szElLnpOTo4RCqc60rT4ovTOSqIX+cWA1rOAJcZJZFH1Rj4qpQhOQAzkrp0Ady0x+YSEByGzoK94YaxM0mS+pP5gooDkce3sSODmQp/cyRCfWvd6wci6KzjkqMIiePkk+/ea4nEQ9a1ptDEMxV/erKVny5offqImbjxIxXN7/9Xd/ERdkFVy1aDbSYk6Ut/+617QdIMKJ+G/Y7uw4RtYunSM///4mshnI93tbDhyLVxUQclAylxwWJ9+wRx5V2e7WIP3n1rULFAiQ9Kz9feJconxzJE6WLYjCfsNk33exqJtegpK6qNdNSQjnnYTifsw3vGrxDCi2a+Rve47LQeQnLsH1mG94z77jcgapUsORO/jFZzaoFKAn41Pkb9+cki1r56Om1DL0GScDfah7Oh6bKJ8j/Wfy9WxcA+VpMB5ybM6OTsgPHYI+LlTpXUeTK0OfcorK5b2/HsDhw2Tp/BkqRWk8SiuzyibHeP/BMKQqdZI1i2fJ05uXSwhqWCmw4iB1M1NAA46ZFHbyAROCyuLzUKK6Iq/wsvmYxBiau9MYpdP46vBZlLxNlJ1IW+rjYarVze1jkNXOG/mAazDJy8FtsMAbC9a5ouytOyYSs97xO7eXoUIBG9NluuC87siRrCYyqj58+e1ZVKs8oPL0bl67CJnyJigRLD2zUOUS/uDLQxDvauSn31uHyhHOShzJLypTddIrcDwrpni4OcmPVj0hXkhyzoTuNOsfP3NJla0ZD+4iCJzOOPTJF5xMA0SviqpajNVJpQr1QoVNU37gRlXeN7+oGPXAq9E/9hjcF8B439F4eQs1xmtqa2UxEqXPnx0OoBkN8KoEkCTIdyijnJFdIH//4jZZGhMhj6JTlajWkIxE6UWoupBXglpUNTWqEgQBjiB5LT1f9n53Vj7ac0SJej9/fgMqP6AgITkk3cwU0IBjJoUdfMCkouWGyuLrKD87H7mEwyawgNt9NenDkO83PDQQBd6yJT4hA9zPbOg7sA0gNRfJ2qNRyeAiOJ3/8x8fYsV+IK/90yuYxG4yDJ+5ss+FaBbP7a99oNQbr/1PbEeeYOpIyIhcSrohX+w/hdrcjvKPrzyFqgdTTdUisI21upeDg3njnS9UNUvW1WIidwIVK0rUoihfflGJ7Ny4XL6/YyXAxsWk+cV8XRQ9VVXQ3I9E5UdOXZJ//NE2eRm5lZ/dukLe+mi/fLjnmDy3fa28uGs1+oq6WDjnHY4Z70rHA0Ag2CDPn1xG/z/88jASwdfJL1/cLpvB/ajk89wFg1izZJYq7fLJ7u/kL58dQq5mbwma4KXox+oKxSU3ZQryFP/DyztQDmaKDKfvD45tWHZflSn+tz98hKT0V1RVjKjwCSpTIM6sWxMFQC3d7IYCWImpC4m7lAocuYcJPkPVyFbogJXWHxxK9IzJqLhZL1Qe16PmEie80agM5gRl46Tldxbd42c29Z2fTf8hgnAfbMd+t2FCP3MhSYlx29YvhXg0XYZjP1ap5Gs4rk8x7kmIGw4ODuAiLkEXVNt0fYABgCLYz1O2rZ0n492dTBUuoathpcsAL3dZCNChfoflXFg1gtfktQkq7BC5DPZH9ZU/tW7Yrxq6pVPnE1EzvEgeXz5XiVqOqAahqoA2XYv1vp7ZtBzXm4Z6VZlyAmMimLJRr+UEU/uyeVNl4ZzJ8iiA1jh2JMg2G6AdFR6qRDzqlgy6mY7Wf0kBDTh29BxQAZtBZXFqpgRP8JGYmZPVxIRNXPneOEEfMwt6HJZduQj9Q0ZOMVGkmQLgJjqUALDdaPxo/orJXAhRIxlKYpZG4cSrhSKXpYSr+d70uba+Xnx9PCXI3wc1r4qhpylTQMGJ7IhiclGREwE60H3AumVuuAjBxBnlVMagFlctrEIN0PsQ9NjMfWjzRW02/+E58qAUv5KSgXK8TrJ84QyIfRB5cO0WDd8DfcYBcCKhm3kg5y+lQJSDP5LCtWESCCV89IwpCuxaEAuEc0bpHDeIoLSu1dUDLDskZourDpkvWqSyl1uNSU9l8aUEVL3ECr4E4ssNvN+A9cbcAC6lFbcVKCRh9T4dnwyzsf9DWHWGqRrbeQXF0IvUydcHT8sRgENLNEAvMGmpQ8nNL1RglF9cKpOgA+HEZLmW8R5u4FIAgE3WM3O/MbbhAE3L0AXztq5+ICiiMigtUizzEhbo0xZseC4gGEWnACh93aCbKiguUzoisCuKY3FEhVM3WLNoem/R8HUkWLqRTeEV2F03KxTQgGOFKDb5E57wXKzgsfC9KYcPznl4FV+HBabFKoxJwHrZpbDs3IIi9SzEha2PzYeS1soE6hYRGrGig5OBYrW4tEx2f3saitaOZ1wwJrwSh5q4AIof5EL6rOE6tRAlKY6RU1LcTQcciDu4IA93N7lz7y78dGDFamrscydDM3bV71YooAHHClFs7ifM0zsQQ5JTs6EQzlHF7CYH+0PxayGamAc1DDWZPOQ6fV0ycuQq/F1WLJiGFdvavuaDOv3AiTgCXEg4fG1+9v3NsHo5tCtSDAMHQV0IuQzqd1RrxTB0esEe7GDoqB5QyU1RivjWznXvQCxquHtHgaIGmB4Qu51DNOC0Qxib+hnK4psVFRILfcM9rMi/eHGXfG/zMlNRu1YDIRdxC+bnP8Is/OdPD8rRM5dlIapjOjSJAq127+JXlL+FP44zSuBS5JgPhasPrFtt9COY4axfXgAHuhpYicawXHB/NaCGK6xn7COV1TfB4Y2D+R3ym9Ue3CyrkNKb5dA3ecPZ0EHKKkyuAFZ31j92mQIWGsMuH6N3HGQUMJTFF5MyUK96nMyFJYoWHOoiWr/IibjB05fKY07A+CvXUPYWjnxNymMVGtCF8ZE5aBaAGmX8OFeUsPWGSHVLksA1KesULT+WL3AzOfBA/uP7e+SP7+7GvrfN1+3CJXu1Cwvf+cFvJyzIX3kwX02+YZ25AX2oC0tNy5UqOAgGwyzO0sctRNNe9WRoH6wBx9bvPyZIdZOyuAD1q2dNm4JVGZYeKl6po7DyIugwL86MyDBV9/osOCNESilKcBsbY4yo72F8kgFGOJnZ1Etx7R5iidR2HDseTnqRkwPhZFclXx86I0WIOoeNHcfifDwnPtcAfE7GJcvBk/HQpdxTgNfbiUzdD32G7gLMaCoHwpqup0Zh8Qd08IKj4expYapu+oFjcVCul5r2N/qI4++CZBfhT3QEjoYusDgtnj+9yZHQ4lz6Y48poAGnx6QbJAdiwuXCU/ccQIOerfR/GQXfEuvLd1OfMfnobxIDRz+acE/FJsitiho1UZkVcBREiEJYZw6fuShnoITOhAnbaA4jhiuLUsvtRYhhGqW8dieFBsh3AJS3EQaQAu9ihhFUwiyeCzFq//F4hBOcUF7Im9YsgEjD2uUmszSd8hpbW36Mi+KdqhZycsRP1fD+CETJ0bjuHQRf0sx/FOO4itgoegUTiHg68ylx4GjQZRE8i+fOilD7vwmxMh6cTilCJarQxyKEghyDn847cCakpzH9mGKmTeSFTZfEW1dN3eZ+NnVXv5kooHU4tvwkgHFgRHZaRp5kZBUg3ihAZkwJQq1w63oJ81DV5BspU8ODEWPkLelZeSo73kJ4znpCrzEzIkQSU9LlD+99gSBGN3ls8Wz5ux9shkexiCdCCmao7RlquzeCHNcuQSwVtjOz3g93rZU//GkPYraOQYGdK1OQGoImbYYEXEnOVNkEn9m8QpbOizSjwXBGWgPIuJ/1ZorkJhiORIQ4wQRTX3koM9CSAapn4hIk9XqmTJ8SjFiq9SqWimZ0RqgzxahqALdJ8Lx+fvtqBHw2yIGjsZKUliVROMYZOXPKEEZxGQGZpdDfrFkaLc9uX4UQCkcFMhRR2ccReJmu37an/H0ktjswlSucEHVrSwENOG1pYlO/MHCQAYZLYqYiBCBSPDBBmpf1jobSKGEBPgg0XCHJmHSNEJ/48kD81fb1i2QsFLpXk68r0cqN5ySPAaDyxOcd6xaLI6Kxje0qlgrbxjgMl9UIrmRowxcwjSfAyS4tPRPdAXcBTiR0gj88fKNl3fI5iOY2xRk9iqDOKfDFeXzpbIABfGOwb5uG33w9XOUxBH66u7lgQgOYsBun9OypIfLLF7YgS2G8FCDg0wu6JMZSIYGGTAYYrUWoQjhCOtR5cQzN9fNmTIRe5kn5HGEYp5HA6+CJOLkP4CZnF+DnLVseWyDrV81VHs4mxfcj4gkz+bJ505QlbqzyMWrbTwJbFMCY4mmQL3IOmdmrNiMasj9owLHlW49nnpNr5cIo9VIredt5YH2EmAxME/HME0vkfuNiTBFMX+hCyAuEIjjyZ8+th6X8vml1xyRVqzomPid5aGA723HOMejPEgQ8zoGuJKugFM52N6HruY8ATyec1xeBoBCjiBZNk3EEOIHliLlaGhNp8suhubp1AxBOggn9Nz/eobYoPVMTMDkBGFdj/Kuga6FeieNQ4RY4z6I54bIAgZlU0ZiBDMeR+YgI9ZNQxGTlb1omuYWlSHlxR1xhwaKS2APe0kxDYbay4VyBCK945dl1ba5v7irOS85m0+r5snHV/PbHYj5gaH7QgGPj951zaXiThYnzuLuNQDIcrxatadJz4io+ovV5O9pOIMBrzKjhEgEvZiqSVcNvStRrOtbyeuxDZ46C3KdVL83n5fUUHagw5vWbmum8xjeLd+4C8YquACEAz9BgcFakAUDQ1Efs0ASIxlHc3E4PjF3Ue7vXbLHX0P2iAWfo3vvOR948d63v29F2boOY0tiOn4v1E/byVwuw6dKZ2McmUbJL++udek0BbaXqNQn1CTQFNAW6SgENOF2llN5PU0BToNcU0IDTaxLqE2gK2AYFlA5M/Rm4/mrA6Sbt6fhFz1bdNAVsjQIsHMhKE5aKdY6BCvH+ahpwOqQ045FQCgUerUaj+ZRetow41k1TwGYoAIsjg3ZLkRHyvoUFbuTIkcik2H5k/8Men7ZStUNRcjIMfHR1dVPlQ4qKTDtWVjKPTKI8sSoG+YINR7WOzDXtXED/rCnQXxQAC1OPLIlxCenwrM5WVSZ46eHw2h43bpx4eHiA6emfBVQDTgc3nYXWAgMDJDQ0DMmsrqs9b1dWIclVivznn7+Wp5Cf1x/1mEa08v/o4JR6k6ZA/1EAohI9BaqRdOxKUqb85dNv5VpGlvn6Li6uEhIaKp7jxytPa/OGPvygAacT4vr5+cnSpUvlwoU4uXkTaRzQCuFC/9fdh5GGIQuObcEqKLCrQX2dXE5v1hR4iBQYBj+oRilDocHLSelIQJ+jSuPwAkxGFhISIosWLgIH76S8yh/ihds9lQacdkkDZ1O45Ds6OsqqVasU4Bw8dEiqKpmIqVHKy2/JsdNx6kXvUt00BQYrBUz+kM1iP59XHx9fWb1mjUTHRPdrtzXgdIHcoWFh8sMXXkRhtUoAzwW5detWi6M0d9OCHPrLIKYA9Tbe3j7yxBNPyM4dO5SOkgtrfzUNOJ1QmmDC9Anz582T4b/+jfz3f/9Zzseeh3hVioxw1ZB9UXfJtIR0cia9WVNgYChAjmYEAktdXZGVMTAQnM1j8uSuXRKGhZTZBvqzacDpArUV6GBlIOgETQiS48dRx/rMacnNzZXq6iqpR8WC3oDOXRR7q2c1AaR8oBl+qDYmNm9oaFDDHzNmTK9oass0JEDUoaQynwvSoTfPBI8dPXqMuLi4yOTJk2X58uUSHR0tTs7O/Q42vCcacLr4ZBqA4uvnK88887Rs3LhBiotLIGbdVqIWk5f3tF2/ni5XrlyRRYsWKnaXOqKh14ZBL1Yuly9fRl4aZA9cvEjl4hl6dDCNOD4+XvLy8mXhwoXKbN3TZ8LBYZQCG3d3d3UeAhgdV/tTjLK8hxpwLKnRhc+8UfRYGDt2rEyaNFHliaHGvyfNWMmKURAuPz9f3N3Hybp164bkyk5aUD926PBh8fIaD/BdrGhsAH1P6GuLxzDXT8Xt2wDeKwqAg4KCZMmSJT1+Jkg/vsg9Mk81uaaBbBpwekh93sTe3rxHwO7m5uXJiZMnJCHhqnz33Xfq4Rpq4gTBhknQL166KCdPnFC+T+R0YmLmqrI3PbxFNnkYlbqJiUnqmbiWmgrR/YzMmjVLOenZA/j2bGm2yVs5uDrNSUZu6fr1NLmEiVaBulJ8T0hIUA/X4Opt3/aG1RYKCwskFsr44uJipRvjRCOgk05DpXGstbW1ygUjNSVVSktv4pm4JHngfnujxxlM9NOAM0B3g5OssqpSLl68KPngcmgtyMP7sWPHkGi8YUhNNLL7KVjNCbh3Ue2yrKxM4uLiMNHy7GaideUxI6jQEBEbGwuwKUE1iga1IJHbY15oe2gacAbwLubm5Mq5s2flNmR2Nk40Pmw3bmQOmYlGsZLcnVKSYrKxcZW/di0V7gex6vtQ+UOOjhzN1atXzeJ6YWGhXAD4VuIZIa1svWnAGYA7SNb5DqLOE5OSJCk52dwDmoQzMtKVHsNeVjTz4Dr4kHnjhsSeO6eAx9itCNGyZwHGtwBG9jDRjHG1984xlpSUyDnQgZyu0ehkevXqFUlLSzMldjc22Oi7BpwBuHEUp8gycwUvxUNm2cjlnIQSuQS6DHufaATeevibXLp0WVJTUyzJoLg+6rOSoEB9tIdWwBYntIEvXHwoVtbWoihhU6O4mZmZpSx4d+5Ap2VssNF3DTgDcONY5zojIwN6ivNm1tnoRlVVlaSkpMj58+ftfqJRZ1FQUICxxiplsUEDvnOi5eRky+kzp5RTpD0rj7mwUGSKwz3PzMy0JIP6zMWJLgMUr5jBwJabBpx+vnssvVKNkAjK6jk5OW2uTtMnWWsqj6nbsdeJxnFRUc5V/fLlSwpgWhOD3N6FC/GKTvZipWk9Rn6n700arJWxseeUHq/1PoZO6yo4PltvGnD6+w4ie2A+VnXK6q2DQI2uUIlK61ViYqLdmsgpVpaXlyuFKC0z1hp1Wunp1yUWHJC96rQIvMoUHndBcbbW6MDfyAlSeUwO2JZFbQ047d3hPvidDxctESlY1RMT21+tuPLnwyR87DhM5Jh09srlpKenK/M3Y9Ho7Mh0l2wc7+jRo5WncTGUx6dOnsTKf9OmJ1p7jxM5NyqJKUKXlpa2t5vKxURO8AYU7D31bG/35P24QQNOPxKbq/otrOr0MaGDmwuidz09x7cAFE40Nzc3JXbFnqOJ/IbdmcgJKDUQKyku5eXlyvTpUTJ79hw1bt4Obvfx9ZX58xeAPp6Ky6GpWJX47cf71R+X4gJEbpa+NgRc3nsDeHl9BlkyIx8Bmc8CdTn3cIytNg04/XjnTMriGypQMyAgUNY9vk5WrlwpzniojMYUAtHRMRIVFQWRqxwWq5N2F8RI4C0rK1dm8AULFshv/8f/kF1P7sKkYt1xUwsEfV555RX5+c//ToJDQiUTvknkhGzdSmOMj+8UjWj+J4gwOHMhsu9FRES2ABxfAO/GDRtVlDcV6RfiL0gJOCFbFatsW+VtefcG+Weu2nV19eoB8/bxkV27dqq8JJcuXlLKQsP5j/tNmzZN1iAb25EjRxSnU4k8yi4AJXupFEHgHTFiOMB2hRB4/QMC5PPPP8OUa/amJR0CkLtlxYoVMnfeXETkV8Hz9o7K64JoxEF+t7vevZKSUiU6/uAHP5RJSB/x148+xPNQr05A0Sk4KFhe/tHLyMk0XD799FOA9G3Jg87LF89Q/6Q97/pYurKnBpyuUOkh7cPM+KFhofLLX/xSIiMj1EpWghQXE/BQGeZQKpIZyPncc8/J3//9r5SyELG+6IH9TDJa4ry8vJCKw1uJiwzc5OrduhGYKF5ERk4VfuY+9hDAaIyTyds8x3vK0888I/7InX3gwAFJhn6POjw25rAJRZJzJs1yQ/UQH4BMenqGAihbjTPTIpVx9/v4nROF+pmpkZEQl6ZDTyFyD8Gb/gH+iIqOVg8Ru8DES6nXrsn+A/ux/ygkTZokzk7kbuwHcAxSkyZMmdBR4z4McuU7uR57ahyPH0Sm8CmTlVL4yNGjLVwlCMizZs1GknNnxd0x2fms2bNk4sQwm6WFBpwBeIK5ghE/uGq7YhWbNxeZBIOCzT2h9/H+ffubZHvzz/qDHVKA+iymqj0KsDl96qTU1Ji8jKkkDg8PV6kpDGW5Ab62TAYNOAN49/gA0XN02vTpsgypH1khgo1cTlJSonz44YeSnZ1tt744A0j6QXFpcji0ONEn67PPPpWsrCxzv1hVYcmSpViIJnTKBZoPsoEPGnAG+CaRy/FE5cPHHlsjU6ZMMfeGXraHDh2UP/35zyofChMz2ZtIYR7sEPygwAai4gWkEn3/vfcRLc/UHCZzN3U3MTExshwKc+qwuDDZS9OAM8B30uByZs6cKVu3bhdfXz/VI/5Oh7CP//pXefutt1TlT/5mq+bQASbzoLk8gYb3sA4m/lMQof7rD39QSfmZG5ttFPR8kdDzbd26DRaqILMCedAMoJcd0VaqXhLwYRxOpShrmD/xxEbJyc2Wz2D+ZJVPAkxubg5Eqw9UfNV21BGaOXOG2pfXJXfEfXQb3BQwOFN6FZOLKcBCchypVD/5+GPl/FhdzeKKpmqYEyZMkB07dkKcWizU7zR2olQf3CNv2zsNOG1pMiC/EDxo/nzu2eek7GYZ8hsfVrFGBBRGCe/ZsxspHFIhej0mdJajudQNSdcdRo6AjudRGxa3kGr1AWt/QWS0oLzBCXCSgimw2cax0UjAeCl6lzNY9RgUxMeOHVU6G0OMegTjD/D3l61btsrmTZtlDJL0cyGyt6YBZ5DcUQILJ1lERIT89Kc/VfEyrH9VVFSoekjHQKazYDDjwYMHZcaMmdg3HCKYr/JUJujYauPYOS7me2Ej01aF9KuMHaq8XWHVR8dWxtqARGs3oY/LgfKfpYCY44dcq2GN4jjoLkEHyK1bt8qzWHDom9OZu4CtjL91PzXgtKbIAH7nxKN3KbP0//rXv1bOcXv37lXxRgzi5HZGWDONARM1Me6G9YZYsoacgC03RshXVJhKKNNBko6Qv3vjDTUZOW5bbOw2ORiWiGaYCp06W4+FoSzh4REQo3bIli1bZDzipgzHP1scc2d91oDTGYX6eTsfSHI6k2Gx+vnfIY4oOFi+/vor5YFKvY7xMNLNnyw6X/bYOFa+7LVxkfDy8lZVMAk2ixcvVmKUcX/tddwacAbhnSXoUFlIT9Pnn38eiuJZsnffN3IWpVNykHidJnPqBMgJ6GY7FKBrAwN1mSGAHuTLli1XMXMTYI2iDs8edTat744GnNYUGUTf+QDyIY2OniNTwqfItfWpKrE40xmwUmd5OYGHNajvDKJed78rBFjqOmgaNlZ4BwcHNTlHjBjR/RMOoiOGIeGag8NI5dTJVBthYRNlDmp7R8+ZI0EAGo7PGPMg6nafdUUDTp+R9uGcmJORD+QYKBYJPDNmzFB6HKYnpWcq65vX1FSbFKsQxWyhGWZiJT6iw/fvP1ABqydOHDdXbqAug45voaFh5gnJ43iMtdbeNstrtT6us23WrtXZMbyG5XEjsGDQkc8PwZkEGD8/fwCpk+oKFcNDCWw4aA046tYP/j98iBlVzeaBldLb20vJ/wNZmP5hUI0TmJPuA/gaMeudSIU6LbmBnfBHWb16tcqD8zCuNRDn4Pio0KcxgPeQYx1qIGNJdw04ltSwkc8Ute7gZTRj1TW+29u7rY9vKANM62dRA05ritjgd0sW3ta6TzDprP+dbbe1MQ/l/vZ9LJU1kRsPmW6aApoCQ48CfQo4jAUZhjpMzQ3JlBj/gxe36aYpoCkwtCjQp7OeJVpHOYwyU5Rgc+dOg6qkaCQVMm/UHzQFNAXsngJ9CjjD4WNAR6dhTWF5VJ5VVVWjomSFDQcb2v0zoQeoKdBnFOgzwKHY5IDkQTRvMvKVjYBDsKG3LM2EumkKaAoMLQr06axntjKmXHB3d1NUpbWBUc+sOmmv0bBD6/HRo9UU6B4F+gxwCC50eGL6BD8/f3OvqlXFxTgVmKez15nJoj9oCgwJCvQZ4JB6tESxlg5ztxgxMSzGzkRSZ5E4mkpl3TQFNAWGDgX6dMZTj8N8H3PnxihdDslKzocpFXbv3q2qUDI4UTdNAU2BoUGBvgUcgMtIRP1ORxkUplgwXNSZbCkWHM6ePXtUgS8tWg2Nh02PUlOgTwGH5CWX449crevWr1filfoNQMTUmR8jiTSrDVKBbICRviWaApoC9kuBvgccgAtzti5dskRWrFwlrCjIpkrapqbIf/3XH4RpCQg6mtOx3wdNj0xTgBToc8AxyMx8IE8+uUuJV4bexpQYPE5ee/U11NI+gITZt1XCKc3tGFTT75oC9kWBftHYGiby6DnR8sILLypfHFqq+DtBhnlQqNe5npYmq1atQla0MHPZW8Nfh/vqZp8U0AuM7d7X7s7LNoDDm2+YsA0y0EOY2ed782CwY/Q4Zl2lCoDMO2+/jWqSaSqYs7a2Rq5evYL6SwVyLjZWiV8s+BYYOEFVJmC6Se2ZbNwN+3vv7kNrfxSw3RE11De0KONDjKDDb3tY0QZwuLOjo6OZAgSburp6pXNhpvnePBxMHOWGsia7du5UPjh/Rt1sgg4TgjOwk+byQwe/lYvxFyQkJBSJpidLENIyesOXZ9SoUeaYLHPn9Ae7oAC52KtXrtp8bma7uBndGAT96Fj6hpkoDVwg0DB+sj0GwSrgsNaR0e4h3yyLdlXjxRMRgHrTCDr0zXnqqaeUb85f/vIXFAi7bC53wvMb5U9Yf8nJyUldd+RIB3gS9ubK+tjBSgFKy3XgcukUqpvtUIDgUoY6aUzibwAOpSPWSzP0tK1H0wZwyMUwHIEIRa7jLtJJEMWKi4rFH4rfh9HYOUcAyYYNG4S1lD//4gsUdD8muUgMzkJvRud5fSqW+dJNU0BTYHBRgBiRcSND+dIZPaMVmvjRXmHGNoBD0YUWJXIWnOimSQ+FLkqxzpkz2zhvr9/J6bBTM1FlMig4WBUCO3zokCrvWlBQCOC5JbV1ELXu946j6nVH9Qk0BTQFrFKAonAiShcb9dG5k4eHh6oY22UOhwpaAs64cePMnAVFKnXi7dutXrinP5KTYcE3smCPr10rMdExkgLfnEuXLklSYiJEqyJlvaqBjuf+veak4T29nj5uEFKAYjKeAwb1lpSUtFgtB2FvdZeaKECfOVZGTUpKalFVY9KkSaYcWBC3rLU2HI4R4U3T9I0bN9QxfBhYiD0/Lw9F1wN6rcdp3RFyO6wh6YY0FosXLZJ5c+cqsOOASktLVTF41tbWzf4owMeSK+WZs2dlz+4vVVVR+xul/Y1oOAAnHgUZs7OzzIsEdTqzZ882O/daG3UbwOFOZIuiUR3w2LFj6mT0CmZx+dMoNfs0lL191RTw4OFjozWL/QgPD1ffDb2O+qL/2A0F+JCyTnoDXvv27rWbcdnzQNQ9g5vMgf37FWdqzE1KRfPmzVORBe2N3yrgUMSZg1KkXl5ekpubq5S4ZWU3YbI+iFrIq2FlcuvzOsiW4NNe5/Xvtk8BPry0TN7XujqbuZnUz1yE2uPUqVPKgm10PCIiQjEIrf34jO18txraQF+cKVOmKPbI2LmyshK6lYuwJh3XeWwMouh3TYEhRgGDI/3kk08kvyDfrF6h7pdWZ7rUcJ/2mlXA4c40bdErmGING61VRUVF8vlnnyvxqj0ttNpZ/9EU0BSwSwpQx3sSnM23iH2stvCbos6XZZktnYatEaBdwKFZfMGCBRITE2NGLHI558/HCtGNimQd3W2NpPo3TQH7pABFJapY3n77LSmw4G7ou7djxw4JhntLex7GBkXaBRyyRTzBToQhMJ8NG5VDNF1+AUc9Rnffu3dXg45BSf2uKWDHFKBEc+tWhbz33nty5vTpFrqbGTNmyLZt25TvXmckaBdweCC5nOXLl8umTZuUbZ2/0cknM/MGgi/fkmNHj+k8NiSKbpoCdkwBgk1VVbUwDIlJ8yyjAah6eemllxD7GNIpd0MSdQg43IFJ0J9++mlltTK0z3QEvHr1qvz+97+XI0eOIPzhjuZ0SCzdNAXsjAIEm7KyMgU2b7/9RyVKUZ/L5uLiojibtXDaNRLrdTb8TgGHIDNt2jT50Y9+ZPaJ4Umpz7lwIU7+47VX5auvvtLJszqjtN6uKWBDFKBKhWCTnZMt77zzDjJz/iec/LLNVilaspctWyY//vGPzYalrgzPqh9O6wOpeWZiLHr+vv7660gpcV3tQtCJj4+X23hnZzZufAKsVbDKp0PvUcMhqPX59HdNAU2BwUkBA2hoFLp8+bJ8/MnHsn/fPpXBweBsCDbz58+XX/3qVypZXnuBmtZG2CXA4YG0r1MTXV9fL2+++aYZdJjLhnFWpVAmJ6ekyLp1j8NLOUZ8IYqRO3oARTOd+HTTFNAUGJwUIMiwhtwjeKfXd3JKspw4fkK++eYbuXgxXlmkjZ4zPzmjEH77298qCzbBpzuty4DDk7JO+LPPPqtYLWqrmSaUMU5EvsLCQvn6qz0qt828ufNk0aLFMnVaJCxcAeIIsxnBhwPTXE93bs/Q2JfPz6OPPNpisGoScCLgRdZet4dPAWM+MoFWTdVtyc/PlwSU4f7i8y+Uky8t0pb5rxiBQM7mF7/4hSxBUQRmluhu6/adJOg899xzKpqcGfsuIoCLSiU2cjup4HKys7Lk5MmTSucTERkpYaGh4gNttqenh4wd66g8lXWG4u7eKvvcH5Aid+FeQcvHg0aTMpIjvXPnrmLjM/EsWT709kmF/h8VyzdV11RDTVImBfkFKlA7OTlJUsDdMG6yNc0ZtE21yk9+8hOJiopSaUR70uth4Dh6NPcZ0HnhwgX56KOPlKWKOpzWEd1EUJrW3dzcFdiMH+8lztBsqxWrZ5ftyRj1MYOZAuR68fBnAVgSsboaydYYxxcVNUOll9Ui+cO+gYhfQ+waCxiUlpaojAxMskedbGs4oCWKKSe2bNmisnQGBgZ2yfzdXo97DDg8IVnhgoIC2Yso331QLCUihw3ZstbA0+LiZJPxD26ELX7WXzQFWj/sXLBMeWX1s9IXT0drelteg0ATFBSk9DVbt25VFinqb3rbegU4xsUJMBkZGXL48GEUtTuhdDvU6bD0i6HZNvbV75oCmgKDkwIMS2C+ccZFUWxas2aNysRJNUpnIQtdHdFDARzjYtRwkzWOi4tT5vI01JliwCflcybIphhmmY7QOE6/awpwYbLUGxjKYhOXo+nTFxSgIYeKXxZHoEKYYiwrpTz++OMyc+ZMpad9WEBj9P+hAo5xUrJq9EbOQVL09PR05aND0YvKZf5OHx39IBnU0u98Hkw6nES1KJEifPinTp0q48ePb6NX0BTrPQUIJPQOZtIshiewmAE5GyqHCUB91bptpepKRwgmdBZkQh6+CEDkbCh6GTWounIevc/QoACfiw8//FAtTuSC2RhS8/LLLyvza0e6hqFBoYc/SgIOdTJ80YjTXwxAnwBOa/JwMHQQ4otWK900BSwpQGfS1sXTyO6Tzff29rbcVX+2cQp0Gktl4+PT3bcBCpCDsWZc0JyNDdy8bnbx/wM/Ce1OYYpAJwAAAABJRU5ErkJggg==)", "_____no_output_____" ] ], [ [ "# Building the encoder layer not the whole Encoder itself. \n# This has the whole block of layer except the positional encoding \n\nclass EncoderLayer(tf.keras.layers.Layer):\n\n def __init__(self , d_model , num_heads , dff , rate = 0.1 ):\n super(EncoderLayer , self).__init__()\n\n\n # Defining the sub-layers of the encoder\n self.mha = MultiHeadAttention(d_model , num_heads) # multi head attention \n self.ffn = point_wise_feed_forward(d_model , dff) # point wise feed forward network \n\n # The layer normalization layers (Add & Norm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon= 1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon= 1e-6)\n\n # Defining the dropout layers\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n\n\n # Initiating the forward pass in the class method \n def call(self, x , training , mask):\n\n # At first we pass the input embedding into a multi-head attention (not the positional encoding in here)\n attn_output , _ = self.mha(x , x , x , mask) # (batch_size , input_seq_len , d_model)\n \n # Dropout \n attn_output = self.dropout1(attn_output , training = training) \n\n # Passing inside a layer normalization layer (also initiating the skip connections)\n # We are passing x straight to the attn output (residual connections)\n out1 = self.layernorm1(x + attn_output) # (batch_size , input_seq_len , d_model)\n\n # Point wise feed forward network \n ffn_output = self.ffn(out1) # (batch_size , input_seq_len , d_model)\n \n # Applying dropout \n ffn_output = self.dropout2(ffn_output , training = training) \n\n # Applying layernorm (and initiating the residual connections)\n out2 = self.layernorm2(out1 + ffn_output) # (batch_size , input_seq_len , d_model)\n\n return out2 \n", "_____no_output_____" ], [ "#Passing some dummy data into our Encoder Layer \nsample_encoder_layer = EncoderLayer(d_model = 512 , \n num_heads = 8 , \n dff = 2048)\n\nx = tf.random.uniform((64 , 43 , 512))\n\nsample_enc_layer_output = sample_encoder_layer(x, training = False , mask = None)\n\nsample_enc_layer_output.shape # (batch_size, input_seq_len , d_model)", "_____no_output_____" ] ], [ [ "### Decoder Layer \n\nEach decoder layer consists of sublayers:\n- Masked multi-head attention (with look ahead mask and padding mask) \n- Multi-head attention with padding mask, \n - where value and key recieve the encoder output as inputs\n - query recieves the output from the masked multi-head attention sublayer. \n- Point wise feed forward networks \n\nEach of these sublayers has a residual connection around it followed by a layer normalization. The output of each sublayer is `LayerNorm(x + Sublayer(x))`. \n\nThe normalization is done on the `d_model` last axis. \n\nLikewise there are N decoder layers in the transformer. \n\n- As the query recieves the output from decoder's first attention block, and key recieves the encoder output, the **attention weights represent the importance given to the decoders input based on the encoders output.**\n- In other words, the decoder predicts the next token by looking at the encoders outputs and self-attending to its own output. \n- ", "_____no_output_____" ] ], [ [ "# Building the Decoder layer \n\nclass DecoderLayer(tf.keras.layers.Layer):\n\n def __init__(self , d_model , num_heads , dff , rate = 0.1 , **kwargs):\n super(DecoderLayer , self).__init__(**kwargs)\n\n # Two multi-head attention block in the Decoder \n self.mha1 = MultiHeadAttention(d_model , num_heads)\n self.mha2 = MultiHeadAttention(d_model , num_heads)\n\n # Point wise feed forward network \n self.ffn = point_wise_feed_forward(d_model , dff)\n\n # Defining the three layer normalization layers \n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon= 1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon= 1e-6)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon= 1e-6)\n\n # Dropout layers\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n\n \n # Initiating the forward pass \n def call(self , x, enc_output , training , look_ahead_mask , padding_mask):\n\n # enc_output.shape == (batch_size , input_seq_len , d_model)\n\n\n # First multi-head attention and the layer normalization layer\n attn1 , attn_weights_block1 = self.mha1(x , x , x , look_ahead_mask) # (batch_size , target_seq_len , d_model)\n attn1 = self.dropout1(attn1 , training = training)\n out1 = self.layernorm1(attn1 + x)\n\n # Second multi-head attention, where we will pass the Encoder outputs in here\n # Query and keys --> enc_output \n # Value --> self attended decoder output\n # Also apply the layer normalization\n attn2 , attn_weights_block2 = self.mha2(\n enc_output , enc_output , out1 , padding_mask ) # (batch_size , target_seq_len , d_model)\n\n attn2 = self.dropout2(attn2 , training = training)\n out2 = self.layernorm2(attn2 + out1) # (batch_size , target_seq_len , d_model)\n\n # Point wise feed forward network \n ffn_output = self.ffn(out2) # (batch_size , target_seq_len , d_model)\n ffn_output = self.dropout3(ffn_output , training = training)\n out3 = self.layernorm3(ffn_output + out1) # (batch_size , target_seq_len , d_model)\n\n\n return out3 , attn_weights_block1 , attn_weights_block2 \n\n", "_____no_output_____" ], [ "# Testing out the decoder with sample data \nsample_decoder_layer = DecoderLayer(512 , 8 , 2048)\n\nsample_decoder_layer_output , _ , _ = sample_decoder_layer(\n x = tf.random.uniform((64 , 50 , 512)) , enc_output = sample_enc_layer_output , \n training = False , look_ahead_mask = None , padding_mask = None\n)\n\nsample_decoder_layer_output.shape", "_____no_output_____" ] ], [ [ "So far we've been building the Encoder and Decoder Layers, now its time to build the whole Encoder and Decoder on whole by using the layers we've created. \n\n\n", "_____no_output_____" ], [ "## Encoder \nThe `Encoder` consists of, \n- Input Embedding \n- Positional Encoding \n- N encoder layers (for n number of times)\n\n- The input is put through an embedding which is summed with the positional encoding. \n- The output of this summation is the input to the encoder layers that we created above `EncoderLayer`. \n- The output of the encoder is the input to the decoder. \n\n", "_____no_output_____" ] ], [ [ "# Coding out the Encoder \n\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self , num_layers , d_model , num_heads , dff , input_vocab_size , \n maximum_position_encoding , rate = 0.1 ):\n super(Encoder , self).__init__()\n\n # Getting the dimensions \n self.d_model = d_model \n self.num_layers = num_layers # number of layers\n\n # Initializing a Embedding layer to get the inputs \n self.embedding = tf.keras.layers.Embedding(input_vocab_size , d_model)\n \n # Defining the positional encoder \n self.pos_encoding = positional_encoding(maximum_position_encoding , \n self.d_model)\n \n \n # For the N number of layers we are create the the Encoder layer that has multi-head etc.. \n self.enc_layers = [EncoderLayer(d_model , num_heads , dff , rate) for _ in range(self.num_layers)]\n\n # Dropout \n self.dropout = tf.keras.layers.Dropout(rate)\n\n \n # Initiating the Forward pass \n def call(self , x , training , mask):\n\n # Getting the sequence length \n seq_len = tf.shape(x)[1]\n print(seq_len)\n\n # Adding embedding and the positional encoding to our input tokens\n x = self.embedding(x) # (batch_size , input_seq_len , d_model)\n x *= tf.math.sqrt(tf.cast(self.d_model , tf.float32))\n x += self.pos_encoding[: , :seq_len , :]\n\n # Applying the dropout \n x = self.dropout(x , training = training)\n\n # Now indexing into the Encoder layers and passing our inputs\n for i in range(self.num_layers):\n x = self.enc_layers[i](x , training , mask)\n\n return x # (batch_size , input_seq_len , d_model)", "_____no_output_____" ], [ "# Passing dummy data \nsample_encoder = Encoder(num_layers = 2 , d_model = 512 , num_heads = 8 , \n dff = 2048 , input_vocab_size = 8500 , \n maximum_position_encoding = 10000)\n\n\n# Sample input \ntemp_input = tf.random.uniform((64 , 206) , dtype = tf.int64 , minval= 0 , maxval= 200)\nsample_encoder_output = sample_encoder(temp_input , training = False , mask = None)\n\nprint(sample_encoder_output.shape) # (batch_size , input_seq_len , d_model)\n", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in true_divide\n after removing the cwd from sys.path.\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in multiply\n \"\"\"\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:14: RuntimeWarning: invalid value encountered in sin\n \n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:17: RuntimeWarning: invalid value encountered in cos\n" ] ], [ [ "## Decoder \n\nThe `Decoder` consists of,\n- Output Embedding \n- Positional Encoding \n- N decoder layers \n\n- The target is put through an embedding which is summed with the positional encoding. \n- The output of the this summation is the input to the decoder layers.\n- The output of the decoder is the input to the final linear layer. ", "_____no_output_____" ] ], [ [ "class Decoder(tf.keras.layers.Layer):\n\n def __init__(self , num_layers , d_model , num_heads ,dff , target_vocab_size , maximum_position_encoding , rate = 0.1):\n super(Decoder , self).__init__()\n\n # Get the dimensions and number of layers \n self.d_model = d_model\n self.num_layers = num_layers\n\n # Embedding and positional encoding \n self.embedding = tf.keras.layers.Embedding(target_vocab_size , d_model)\n self.pos_encoding = positional_encoding(maximum_position_encoding , d_model)\n\n # Generating the decoder layers \n self.dec_layers = [DecoderLayer(d_model , num_heads , dff , rate) for _ in range(num_layers)]\n\n self.dropout = tf.keras.layers.Dropout(rate)\n\n \n # Initializing the forward pass \n def call(self , x , enc_output , training , look_ahead_mask , padding_mask):\n\n seq_len = tf.shape(x)[1]\n attention_weights = {} \n\n x = self.embedding(x) # (batch_size, target_seq_len , d_model)\n x *= tf.math.sqrt(tf.cast(self.d_model , tf.float32))\n x += self.pos_encoding[: , :seq_len , :]\n\n x = self.dropout(x , training = training)\n\n for i in range(self.num_layers):\n x , block1 , block2 = self.dec_layers[i](x , enc_output , training , \n look_ahead_mask , padding_mask)\n \n attention_weights[f'decoder_layer{i+1}_block1'] = block1\n attention_weights[f'decoder_layer{i+1}_block2'] = block2 \n\n # x.shape --> (batch_size , target_seq_len , d_model)\n # also we return attention_weights dictionary\n return x , attention_weights", "_____no_output_____" ], [ "# Passing the sample data \nsample_decoder = Decoder(2 , 512 , 8 , 2048 , 8000 , 5000)\ntemp_input = tf.random.uniform((64, 26), dtype = tf.int64 , minval = 0 , maxval = 200)\n\noutput , attn = sample_decoder(temp_input , \n enc_output = sample_encoder_output , \n training = False , \n look_ahead_mask = None , \n padding_mask = None)\n\n# Printing the output shape \noutput.shape , attn['decoder_layer2_block2'].shape", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in true_divide\n after removing the cwd from sys.path.\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in multiply\n \"\"\"\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:14: RuntimeWarning: invalid value encountered in sin\n \n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:17: RuntimeWarning: invalid value encountered in cos\n" ] ], [ [ "Alright we have all the modules and layers required for creating the whole transformer model. Lets put them altogether and create a Model class called Transformer. \n\nTalking about the masks where encoder takes only one masks whereas decoders takes two of them, \n- Encoder -> Encoder padding mask \n- Decoder -> Decoder padding mask and Look ahead mask \n\nwhere the look ahead mask helps to figure out the future tokens. ", "_____no_output_____" ] ], [ [ "# Coding out the entire Transformer class \n\n\nclass Transformer(tf.keras.Model):\n def __init__(self , num_layers , d_model , num_heads , dff , input_vocab_size , \n target_vocab_size , pe_input , pe_target , rate = 0.1):\n super().__init__()\n\n # Initializing the Encoder and the Decoder + The file dense layer \n self.encoder = Encoder(num_layers , d_model , num_heads , dff , input_vocab_size , \n pe_input , rate)\n \n\n self.decoder = Decoder(num_layers , d_model , num_heads , dff , input_vocab_size , \n pe_input , rate)\n \n self.final_layer = tf.keras.layers.Dense(target_vocab_size)\n\n\n # Initializing the forward pass \n def call(self, inputs , training):\n\n # Keras model prefer if we pass all the inputs in the first segment \n # Unpacking \n inp , tar = inputs \n\n # Function to create masks \n enc_padding_mask , look_ahead_mask , dec_padding_mask = self.create_masks(inp , tar)\n\n # Getting the output from the encoder \n enc_output = self.encoder(inp , training , enc_padding_mask) # (batch_size , inp_seq_len , d_model)\n\n # Now the decoder (takes in the encoder output , look ahead mask , padding mask and the target)\n # dec_output.shape --> (batch__size , tar_seq_len , d_modek)\n dec_output , attention_weights = self.decoder(tar , enc_output , \n training , look_ahead_mask , dec_padding_mask)\n \n final_output = self.final_layer(dec_output) # (batch_size , tar_seq_len , target_vocab_size)\n\n return final_output , attention_weights \n\n # Function to create the masks \n def create_masks(self , inp , tar):\n\n # Encoder padding mask \n enc_padding_mask = create_padding(inp)\n\n\n # Used in the 2nd attention block in the decoder \n # This padding mask is used to mask the encoder ouputs \n dec_padding_mask = create_padding(inp)\n\n # Using in the 1 st attention block in the decoder \n # It is used to pad and mask future tokens in the input recieved by the decoder \n look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])\n dec_target_padding_mask = create_padding(tar)\n look_ahead_mask = tf.maximum(dec_target_padding_mask , look_ahead_mask)\n\n return enc_padding_mask , look_ahead_mask , dec_padding_mask \n\n", "_____no_output_____" ], [ "# Testing on the dummy data \nsample_transformer = Transformer(\n num_layers = 2 , d_model = 512 , num_heads = 8 , dff = 2048 , \n input_vocab_size = 8500 , target_vocab_size = 8000 , \n pe_input = 10000 , pe_target = 6000\n)\n\n# Passing dummy inputs \ntemp_input = tf.random.uniform((64 , 38) , dtype = tf.int64 , minval = 0 , maxval = 200 )\ntemp_targ = tf.random.uniform((64 , 36) , dtype = tf.int64 , minval = 0 , maxval = 200 )\n\n# Using the above Transformer valls \n# Passing in the input and output \nfn_out , _ = sample_transformer([temp_input , temp_targ] , training = False)\n\nfn_out.shape # (batch_size , tar_seq_len , target_vocab_size) ", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in true_divide\n after removing the cwd from sys.path.\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in multiply\n \"\"\"\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:14: RuntimeWarning: invalid value encountered in sin\n \n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:17: RuntimeWarning: invalid value encountered in cos\n" ] ], [ [ "### Setting the hyper parameters ", "_____no_output_____" ] ], [ [ "num_layers = 4 \nd_model = 128\ndff = 512 \nnum_heads = 8 \ndropout_rate = 0.1 ", "_____no_output_____" ] ], [ [ "Using the adam optimizer with a custom learning rate scheduler ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0bc89fd272500a16cb747a75afd13ecaaec99d9
40,385
ipynb
Jupyter Notebook
07-KNN, DT, Bayes/Homework.ipynb
PetrovaDaria/mlmathmech
49b215fe6b14a40303324fabe6f52df29cf1f334
[ "MIT" ]
null
null
null
07-KNN, DT, Bayes/Homework.ipynb
PetrovaDaria/mlmathmech
49b215fe6b14a40303324fabe6f52df29cf1f334
[ "MIT" ]
null
null
null
07-KNN, DT, Bayes/Homework.ipynb
PetrovaDaria/mlmathmech
49b215fe6b14a40303324fabe6f52df29cf1f334
[ "MIT" ]
null
null
null
41.209184
1,505
0.516182
[ [ [ "# Энтропия и критерий Джини\n\n$p_i$ - вероятность нахождения системы в i-ом состоянии.\n\nЭнтропия Шеннона определяется для системы с N возможными состояниями следующим образом\n\n$S = - \\sum_{i=1}^Np_ilog_2p_i$\n\t \nКритерий Джини (Gini Impurity). Максимизацию этого критерия можно интерпретировать как максимизацию числа пар объектов одного класса, оказавшихся в одном поддереве.\n\nВ общем случае критерий Джини считается как\n$G = 1 - \\sum_k(p_k)^2$\n \nНеобходимо посчитать, значения Энтропии и критерия Джини", "_____no_output_____" ] ], [ [ "import numpy as np\nimport math\n\ndef get_possibilities(y):\n count = len(y)\n uniq_values = set(y)\n possibilities = []\n for value in uniq_values:\n possibilities.append(len(y[y ==value]) / count)\n return possibilities\n\ndef gini_impurity(y: np.ndarray) -> float:\n possibilities = get_possibilities(y)\n sum = 0\n for p in possibilities:\n sum += p * p\n return round(1 - sum, 3)\n\ndef entropy(y: np.ndarray) -> float:\n possibilities = get_possibilities(y)\n sum = 0\n for p in possibilities:\n sum += p * math.log2(p)\n return round(-sum, 3)\n\ndef calc_criteria(y: np.ndarray) -> (float, float):\n assert y.ndim == 1\n return entropy(y), gini_impurity(y)\n\ny = np.array([1,1,1,1,1,1,0,0,0,0])\ncalc_criteria(y)", "_____no_output_____" ] ], [ [ "# Information gain\nВам надо реализовать функцию inform_gain, которая будет вычислять прирост информации для критерия (энтропия или критерий Джини) при разбиении выбрки по признаку (threshold).\n\nПрирост информации при разбиении выборки по признаку Q (например x≤12) определяется как\n\n$IG(Q)=S_0- \\sum_{i=1}^q\\frac{N_i}{N}S_i$\t\n \nгде q - число групп после разбиения. $N_i$ - число элементов выборки, у которых признак Q имеет i-ое значение.\n\nИ написать функцию get_best_threshold, которая будет находить наилучшее разбиение выборки.\n\nНа вход подается:\n\n- X - одномерный массив - значения признака.\n- y - значения бинарных классов.\n- criteria_func - функция критерия, для которой вычислется наилучшее разбиение (Добавлять код из предыдущей задачи не нужно, мы сами передадим нужную функцию).\n- thr - значение разбиения", "_____no_output_____" ] ], [ [ "import numpy as np\nimport math\n\ndef get_possibilities(y):\n count = len(y)\n y = list(y)\n uniq_values = set(y)\n possibilities = []\n for value in uniq_values:\n possibilities.append(len(list(filter(lambda x: x == value, y))) / count)\n return possibilities\n\ndef gini_impurity(y: np.ndarray) -> float:\n possibilities = get_possibilities(y)\n sum = 0\n for p in possibilities:\n sum += p * p\n return round(1 - sum, 3)\n\ndef entropy(y: np.ndarray) -> float:\n possibilities = get_possibilities(y)\n sum = 0\n for p in possibilities:\n sum += p * math.log2(p)\n return round(-sum, 3)\n\ndef len_check_criteria_func(arr, criteria_func):\n if len(arr) == 0:\n return 0\n else:\n return criteria_func(arr)\n\ndef inform_gain(X: np.ndarray, y: np.ndarray, threshold: float, criteria_func) -> float:\n s0 = criteria_func(y)\n count = y.shape[0]\n first = []\n second = []\n for i in range(count):\n if X[i] <= threshold:\n first.append(y[i])\n else:\n second.append(y[i])\n s1 = len_check_criteria_func(first, criteria_func)\n s2 = len_check_criteria_func(second, criteria_func)\n return s0 - len(first) / count * s1 - len(second) / count * s2\n \n\ndef get_best_threshold(X: np.ndarray, y: np.ndarray, criteria_func) -> (float, float):\n best_threshold = 0\n best_score = 0\n uniq_values = set(X)\n for value in uniq_values:\n score = inform_gain(X, y, value, criteria_func)\n if score > best_score:\n best_score = score\n best_threshold = value\n return best_threshold, best_score\n\nX = np.array([3, 9, 0, 4, 7, 2, 1, 6, 8, 5])\ny = np.array([0, 1, 0, 0, 1, 0, 0, 1, 1, 1])\nthreshold=3\ncriteria_func=entropy\nprint(inform_gain(X, y, threshold, criteria_func))\n\nX = np.array([3, 9, 0, 4, 7, 2, 1, 6, 8, 5])\ny = np.array([0, 1, 0, 0, 1, 0, 0, 1, 1, 1])\ncriteria_func=entropy\nget_best_threshold(X, y, criteria_func)", "0.61\n" ], [ "import math\n\nprint(1 -(-(5/6) * math.log2(5/6) - (1/6)* math.log2(1/6))* 0.6)", "0.6099865470109875\n" ] ], [ [ "# Best split\n\nРеализуйте функцию find_best_split, которая находит наилучшее разбиение по всем признакам. На вход подется обучающая выборка и функция критерий. Необходимо вернуть: индекс фичи, значение границы (threshold) и результат разбиение (information gain).\n", "_____no_output_____" ] ], [ [ "import math\n\ndef get_possibilities(y):\n count = len(y)\n y = list(y)\n uniq_values = set(y)\n possibilities = []\n for value in uniq_values:\n possibilities.append(len(list(filter(lambda x: x == value, y))) / count)\n return possibilities\n\ndef gini_impurity(y: np.ndarray) -> float:\n possibilities = get_possibilities(y)\n sum = 0\n for p in possibilities:\n sum += p * p\n return round(1 - sum, 3)\n\ndef entropy(y: np.ndarray) -> float:\n possibilities = get_possibilities(y)\n sum = 0\n for p in possibilities:\n sum += p * math.log2(p)\n return round(-sum, 3)\n\ndef inform_gain(X: np.ndarray, y: np.ndarray, threshold: float, criteria_func) -> float:\n s0 = criteria_func(y)\n count = y.shape[0]\n first = []\n second = []\n for i in range(count):\n if X[i] <= threshold:\n first.append(y[i])\n else:\n second.append(y[i])\n s1 = criteria_func(first)\n s2 = criteria_func(second)\n return s0 - len(first) / count * s1 - len(second) / count * s2\n \n\ndef get_best_threshold(X: np.ndarray, y: np.ndarray, criteria_func) -> (float, float):\n assert X.ndim == 1\n assert y.ndim == 1\n best_threshold = 0\n best_score = 0\n uniq_values = set(X)\n for value in uniq_values:\n score = inform_gain(X, y, value, criteria_func)\n if score > best_score:\n best_score = score\n best_threshold = value\n return best_threshold, best_score\n\ndef find_best_split(X, y, criteria_func):\n assert X.ndim == 2\n assert y.ndim == 1\n best_feature = 0\n best_score = 0\n best_threshold = 0\n \n for i in range(X.shape[1]):\n feature_column = X[:, i]\n threshold, score = get_best_threshold(feature_column, y, criteria_func)\n if score > best_score:\n best_score = score\n best_feature = i\n best_threshold = threshold\n \n return best_feature, best_threshold, best_score\n\nX = np.array([[1, 1], [1, -1], [-1,-1], [-1, 1]])\ny = np.array([1, 1, 0, 0])\ncriteria_func=entropy\nfind_best_split(X, y, criteria_func)", "_____no_output_____" ] ], [ [ "# Мое дерево решений\n\nВаша задача реализовать свой простой KNNClassifier для бинарных данных. Вам нужно реализовать 3 метода:\n\nfit - обучение классификатора\npredict - предсказание для новых объектов\npredict_proba - предсказание вероятностей новых объектов\nУ нашего классификатора будет лишь два гиперпараметра - максимальная глубина дерева max_depth и критерий разбиения criterion. Энтропия или Джини.\n\nВсе функции из предыдущих заданий нужно добавить в этот код.\n\nНа вход будет подаваться выборка объектов X. y - результат бинарной классификации 0 или 1.\n\n", "_____no_output_____" ] ], [ [ "import math\nimport numpy as np\nfrom sklearn.base import BaseEstimator, ClassifierMixin\n\nclass MyDecisionTreeClassifier(BaseEstimator, ClassifierMixin):\n def __init__(self, max_depth=4, criterion='entropy'): \n self.eps = 0.001\n self.max_depth = max_depth\n self.criterion = criterion # 'entropy' or 'gini' \n self.tree = {}\n self._criteria_func = {\n 'gini': self._gini_impurity,\n 'entropy': self._entropy\n }\n \n def _get_possibilities(self, y):\n count = len(y)\n y = list(y)\n uniq_values = set(y)\n possibilities = []\n for value in uniq_values:\n possibilities.append(len(list(filter(lambda x: x == value, y))) / count)\n return possibilities\n\n def _entropy(self, y: np.ndarray) -> float:\n possibilities = self._get_possibilities(y)\n sum = 0\n for p in possibilities:\n sum += p * math.log2(p)\n return round(-sum, 3)\n\n def _gini_impurity(self, y: np.ndarray) -> float:\n possibilities = self._get_possibilities(y)\n sum = 0\n for p in possibilities:\n sum += p * p\n return round(1 - sum, 3)\n \n def _inform_gain(self, X: np.ndarray, y: np.ndarray, threshold: float, criteria_func) -> float:\n print('X ', X)\n print('threshold ', threshold)\n print('y ', y)\n s0 = criteria_func(y)\n count = y.shape[0]\n first = []\n second = []\n for i in range(count):\n if X[i] < threshold - self.eps:\n first.append(y[i])\n else:\n second.append(y[i])\n print('first ', first)\n print('second ', second)\n s1 = criteria_func(first)\n s2 = criteria_func(second)\n return s0 - len(first) / count * s1 - len(second) / count * s2\n \n def _get_best_threshold(self, X: np.ndarray, y: np.ndarray, criteria_func) -> (float, float):\n found_bigger_score = False\n best_threshold = 0\n best_score = 0\n uniq_values = set(X)\n for value in uniq_values:\n score = self._inform_gain(X, y, value, criteria_func)\n print('value ', value, ' score ', score)\n if score > best_score:\n found_bigger_score = True\n best_score = score\n best_threshold = value\n if found_bigger_score:\n return best_threshold, best_score\n return None, None\n \n def _find_best_split(self, X, y, criteria_func):\n best_feature = 0\n best_score = 0\n best_threshold = 0\n found_best = False\n\n for i in range(X.shape[1]):\n feature_column = X[:, i]\n threshold, score = self._get_best_threshold(feature_column, y, criteria_func)\n \n print('X ', feature_column)\n print('y ', y)\n print('column ', i, ' threshold ', threshold, ' score ', score)\n \n if score is None: \n continue\n \n if score > best_score:\n found_best = True\n best_score = score\n best_feature = i\n best_threshold = threshold\n if found_best:\n return best_feature, best_threshold\n return None, None\n \n \n def _get_biggest_class(self, y):\n y = list(y)\n return max(set(y), key = y.count)\n \n def _get_probs(self, y):\n count = y.shape[0]\n ones_count = np.count_nonzero(y == 1)\n null_count = count - ones_count\n return [null_count / count, ones_count / count]\n \n def _build_tree(self, X, y, depth=0):\n if depth == 0:\n return\n \n is_leaf = False\n \n split_feature, split_value = self._find_best_split(X, y, self._criteria_func[self.criterion])\n \n if split_feature is None and split_value is None:\n val = self._get_biggest_class(y)\n return {'cond': val, 'leaf': True}\n \n left_inds = X[:, split_feature] < split_value - self.eps\n right_inds = X[:, split_feature] >= split_value - self.eps\n\n left_tree = self._build_tree(X[left_inds], y[left_inds], depth - 1)\n right_tree = self._build_tree(X[right_inds], y[right_inds], depth - 1)\n \n if left_tree is None and right_tree is None:\n is_leaf = True\n \n if is_leaf and split_feature is not None:\n biggest_class = self._get_biggest_class(y)\n proba = self._get_probs(y)\n \n return {'cond': biggest_class, 'leaf': True, 'proba': proba}\n \n\n return {'cond': (split_feature, split_value), 'leaf': is_leaf,\n 'left': left_tree, 'right': right_tree}\n \n def fit(self, X: np.ndarray, y: np.ndarray):\n self.tree = self._build_tree(X, y, depth=self.max_depth)\n return self \n \n def _predict(self, X):\n predictions = []\n proba = []\n for elem in X:\n current_tree = self.tree\n while type(current_tree['cond']) is tuple:\n feature = current_tree['cond'][0]\n value = current_tree['cond'][1]\n if elem[feature] < value - self.eps:\n current_tree = current_tree['left']\n else:\n current_tree = current_tree['right']\n value = current_tree['cond']\n if 'proba' in current_tree:\n proba.append(current_tree['proba'])\n elif value == 0:\n proba.append([1.0, 0.0])\n else:\n proba.append([0.0, 1.0])\n predictions.append(value)\n return predictions, proba\n \n def predict_proba(self, X: np.ndarray):\n _, proba = self._predict(X)\n return proba\n \n def predict(self, X: np.ndarray): # получаем\n predictions, _ = self._predict(X)\n return predictions\n\n# X_clf = np.array([[-1, 1], [-1, -1], [2.5, 1], [1, 1], [2, 2], [1, -1]])\n# y_clf = np.array([0, 0, 0, 1, 1, 1])\n\nX_clf = np.array([[1, 1], [2, -1], [-1, -1], [-1, -4], [2, 3], [3, 1]])\ny_clf = np.array([1, 1, 0, 0, 0, 0])\n\nmodel = MyDecisionTreeClassifier(max_depth=3, criterion='entropy').fit(X_clf, y_clf)\nprint(model.tree)\ny_pred = model.predict(np.array([[2, 1], [0.5, 1]])) \nprint(y_pred) # np.array([1, 0])\ny_prob = model.predict_proba(np.array([[2, 1], [0.5, 1]])) \nprint(y_prob) #np.array([[0.0, 1.0], [1.0, 0.0]])\n\n", "X [ 1 2 -1 -1 2 3]\nthreshold 1\ny [1 1 0 0 0 0]\nfirst [0, 0]\nsecond [1, 1, 0, 0]\nvalue 1 score 0.2513333333333334\nX [ 1 2 -1 -1 2 3]\nthreshold 2\ny [1 1 0 0 0 0]\nfirst [1, 0, 0]\nsecond [1, 0, 0]\nvalue 2 score 0.0\nX [ 1 2 -1 -1 2 3]\nthreshold 3\ny [1 1 0 0 0 0]\nfirst [1, 1, 0, 0, 0]\nsecond [0]\nvalue 3 score 0.10883333333333334\nX [ 1 2 -1 -1 2 3]\nthreshold -1\ny [1 1 0 0 0 0]\nfirst []\nsecond [1, 1, 0, 0, 0, 0]\nvalue -1 score 0.0\nX [ 1 2 -1 -1 2 3]\ny [1 1 0 0 0 0]\ncolumn 0 threshold 1 score 0.2513333333333334\nX [ 1 -1 -1 -4 3 1]\nthreshold 1\ny [1 1 0 0 0 0]\nfirst [1, 0, 0]\nsecond [1, 0, 0]\nvalue 1 score 0.0\nX [ 1 -1 -1 -4 3 1]\nthreshold 3\ny [1 1 0 0 0 0]\nfirst [1, 1, 0, 0, 0]\nsecond [0]\nvalue 3 score 0.10883333333333334\nX [ 1 -1 -1 -4 3 1]\nthreshold -4\ny [1 1 0 0 0 0]\nfirst []\nsecond [1, 1, 0, 0, 0, 0]\nvalue -4 score 0.0\nX [ 1 -1 -1 -4 3 1]\nthreshold -1\ny [1 1 0 0 0 0]\nfirst [0]\nsecond [1, 1, 0, 0, 0]\nvalue -1 score 0.10883333333333334\nX [ 1 -1 -1 -4 3 1]\ny [1 1 0 0 0 0]\ncolumn 1 threshold 3 score 0.10883333333333334\nX [-1 -1]\nthreshold -1\ny [0 0]\nfirst []\nsecond [0, 0]\nvalue -1 score 0.0\nX [-1 -1]\ny [0 0]\ncolumn 0 threshold None score None\nX [-1 -4]\nthreshold -4\ny [0 0]\nfirst []\nsecond [0, 0]\nvalue -4 score 0.0\nX [-1 -4]\nthreshold -1\ny [0 0]\nfirst [0]\nsecond [0]\nvalue -1 score 0.0\nX [-1 -4]\ny [0 0]\ncolumn 1 threshold None score None\nX [1 2 2 3]\nthreshold 1\ny [1 1 0 0]\nfirst []\nsecond [1, 1, 0, 0]\nvalue 1 score 0.0\nX [1 2 2 3]\nthreshold 2\ny [1 1 0 0]\nfirst [1]\nsecond [1, 0, 0]\nvalue 2 score 0.3115\nX [1 2 2 3]\nthreshold 3\ny [1 1 0 0]\nfirst [1, 1, 0]\nsecond [0]\nvalue 3 score 0.3115\nX [1 2 2 3]\ny [1 1 0 0]\ncolumn 0 threshold 2 score 0.3115\nX [ 1 -1 3 1]\nthreshold 1\ny [1 1 0 0]\nfirst [1]\nsecond [1, 0, 0]\nvalue 1 score 0.3115\nX [ 1 -1 3 1]\nthreshold 3\ny [1 1 0 0]\nfirst [1, 1, 0]\nsecond [0]\nvalue 3 score 0.3115\nX [ 1 -1 3 1]\nthreshold -1\ny [1 1 0 0]\nfirst []\nsecond [1, 1, 0, 0]\nvalue -1 score 0.0\nX [ 1 -1 3 1]\ny [1 1 0 0]\ncolumn 1 threshold 1 score 0.3115\nX [1]\nthreshold 1\ny [1]\nfirst []\nsecond [1]\nvalue 1 score 0.0\nX [1]\ny [1]\ncolumn 0 threshold None score None\nX [1]\nthreshold 1\ny [1]\nfirst []\nsecond [1]\nvalue 1 score 0.0\nX [1]\ny [1]\ncolumn 1 threshold None score None\nX [2 2 3]\nthreshold 2\ny [1 0 0]\nfirst []\nsecond [1, 0, 0]\nvalue 2 score 0.0\nX [2 2 3]\nthreshold 3\ny [1 0 0]\nfirst [1, 0]\nsecond [0]\nvalue 3 score 0.2513333333333334\nX [2 2 3]\ny [1 0 0]\ncolumn 0 threshold 3 score 0.2513333333333334\nX [-1 3 1]\nthreshold 1\ny [1 0 0]\nfirst [1]\nsecond [0, 0]\nvalue 1 score 0.918\nX [-1 3 1]\nthreshold 3\ny [1 0 0]\nfirst [1, 0]\nsecond [0]\nvalue 3 score 0.2513333333333334\nX [-1 3 1]\nthreshold -1\ny [1 0 0]\nfirst []\nsecond [1, 0, 0]\nvalue -1 score 0.0\nX [-1 3 1]\ny [1 0 0]\ncolumn 1 threshold 1 score 0.918\n{'cond': (0, 1), 'leaf': False, 'left': {'cond': 0, 'leaf': True}, 'right': {'cond': (0, 2), 'leaf': False, 'left': {'cond': 1, 'leaf': True}, 'right': {'cond': 0, 'leaf': True, 'proba': [0.6666666666666666, 0.3333333333333333]}}}\n[0, 0]\n[[0.6666666666666666, 0.3333333333333333], [1.0, 0.0]]\n" ] ], [ [ "# Наивный Байес\n\nТребуется написать свой классификтор, на основе наивного баеса. Необходимо реализовать аналог MultinomialNB.\n\n$y_{test}=argmax_cln(P(y_{test}=c))+\\sum_{j=1}^mln(P(f_j|y_{test}=c)+ \\alpha)$, c∈{0,1}\n\nНа вход подаются численные категориальные признаки. Классы: 00 и 11. У классификатора будет единственный параметр - alpha.", "_____no_output_____" ] ], [ [ "from sklearn.base import BaseEstimator, ClassifierMixin\nfrom collections import defaultdict\nfrom math import log, inf\nimport numpy as np\n\nclass MyNaiveBayes(BaseEstimator, ClassifierMixin):\n def __init__(self, alpha=1):\n self.alpha = alpha\n self.classes = [0, 1]\n self.class_counts = {0: 0, 1: 0}\n self.class_possibilities = {0: 0, 1: 0}\n self.indicators = {0: {}, 1: {}}\n \n def fit(self, X: np.ndarray, y: np.ndarray):\n self.class_counts = {0: 0, 1: 0}\n self.class_possibilities = {0: 0, 1: 0}\n self.indicators = {0: {}, 1: {}}\n \n n = y.shape[0]\n features_len = len(X[0])\n for cls in self.classes:\n for j in range(features_len):\n self.indicators[cls][j] = {}\n \n for i in range(n):\n cls = y[i]\n self.class_counts[cls] += 1\n for feature_num in range(features_len):\n feature_value = X[i][feature_num]\n if feature_value not in self.indicators[cls][feature_num].keys():\n self.indicators[cls][feature_num][feature_value] = 0\n self.indicators[cls][feature_num][feature_value] += 1\n \n for cls in self.classes:\n self.class_possibilities[cls] = self.class_counts[cls] / n\n \n return self\n \n def predict(self, X: np.ndarray):\n features_len = len(X[0])\n result = []\n for obj in X:\n max_value = -inf\n result_cls = None\n for cls in self.classes:\n value = log(self.class_possibilities[cls])\n for feature_num in range(features_len):\n feature_value = obj[feature_num]\n if feature_value not in self.indicators[cls][feature_num].keys():\n value += log(self.alpha)\n else:\n value += log(self.indicators[cls][feature_num][feature_value] / self.class_counts[cls] + self.alpha)\n if value > max_value:\n max_value = value\n result_cls = cls \n result.append(result_cls)\n return result\n \nX_clf = np.array([[1, 1], [1, -1], [-1,-1], [-1, 1]])\ny_clf = np.array([1, 1, 0, 0])\n\nmodel = MyNaiveBayes(alpha=1).fit(X_clf, y_clf)\n\nprint(model.class_counts)\nprint(model.class_possibilities)\nprint(model.indicators)\n\ny_pred = model.predict(np.array([[1, 2], [-1, -2]]))\nprint(y_pred) # [1, 0]", "{0: 2, 1: 2}\n{0: 0.5, 1: 0.5}\n{0: {0: {-1: 2}, 1: {-1: 1, 1: 1}}, 1: {0: {1: 2}, 1: {1: 1, -1: 1}}}\n[1, 1, 0]\n" ], [ "from nltk.tokenize import WordPunctTokenizer, TweetTokenizer\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom nltk.corpus import stopwords\nimport pandas as pd\nimport numpy as np\nimport math\nimport re\n\ncommon_words = ['was', 'were', 'and', 'you', 'the', 'did']\nstops = set(stopwords.words(\"english\")) \n\ndef preprocess(df: pd.DataFrame):\n wp = WordPunctTokenizer()\n size = df.shape[0]\n preprocessed = []\n \n for i in range(size):\n sentence = df.iloc[i]['text']\n# sentence_parts = sentence.split(' ')\n# sentence_parts = list(filter(lambda x: '@' not in x and '#' not in x, sentence_parts))\n# sentence = ' '.join(sentence_parts)\n tokenized = wp.tokenize(sentence)\n tokenized = list(filter(lambda x: \n len(x) > 2 and \n x not in common_words and\n re.search('\\d+', x) is None, tokenized))\n new_sentence = ' '.join(tokenized)\n preprocessed.append(new_sentence)\n \n return preprocessed\n\ndef predict(df_train: pd.DataFrame, df_test: pd.DataFrame):\n predictions = df_train[:]['airline_sentiment']\n positive_train = df_train[df_train['airline_sentiment'] == 'positive']\n negative_train = df_train[df_train['airline_sentiment'] == 'negative']\n \n positive_indices = positive_train.index.tolist()\n negative_indices = negative_train.index.tolist()\n \n positive_len = len(positive_indices)\n negative_len = len(negative_indices)\n whole_len = positive_len + negative_len\n \n preprocessed_train = preprocess(df_train) \n vectorizer = CountVectorizer()\n \n X = vectorizer.fit_transform(preprocessed_train)\n frequencies = np.array(X.toarray())\n \n# preprocessed_test = preprocess(df_test)\n# X_test = vectorizer.fit_transform(preprocessed_test)\n# test_frequencies = np.array(X_test.toarray())\n \n# clf = MultinomialNB()\n# clf.fit(frequencies, predictions)\n \n# test_predict = clf.predict(test_frequencies)\n# print(test_predict)\n \n all_words = vectorizer.get_feature_names()\n all_words_dict = dict((all_words[i], i) for i in range(len(all_words)))\n\n positive_frequencies = frequencies[positive_indices]\n negative_frequencies = frequencies[negative_indices]\n \n word_counts = frequencies.sum(axis=0)\n positive_word_counts = positive_frequencies.sum(axis=0)\n negative_word_counts = negative_frequencies.sum(axis=0)\n \n positive_word_frequencies = (positive_word_counts / word_counts) / positive_len * whole_len\n negative_word_frequencies = (negative_word_counts / word_counts) / negative_len * whole_len\n \n preprocessed_test = preprocess(df_test)\n predictions = []\n \n eps = 0.001\n logged_eps = math.log(eps)\n wp = WordPunctTokenizer()\n \n for i in range(len(preprocessed_test)):\n sentence = preprocessed_test[i]\n sentence_words = wp.tokenize(sentence)\n positive_sum = 0\n negative_sum = 0\n for word in sentence_words:\n if word in all_words_dict.keys():\n word_index = all_words_dict[word]\n positive_sum += math.log(positive_word_frequencies[word_index] + eps)\n negative_sum += math.log(negative_word_frequencies[word_index] + eps)\n else:\n positive_sum += logged_eps\n negative_sum += logged_eps\n if positive_sum >= negative_sum:\n predictions.append('positive')\n else:\n predictions.append('negative') \n return predictions\n\ntrain = pd.read_csv('./tweets_train.csv')\ntest = pd.read_csv('./tweets_test.csv')\npredictions = predict(train, test)\ntest_sentiments = test[:]['airline_sentiment'].tolist()\ntrue = 0\ni = 0\nfor x, y in zip(predictions, test_sentiments):\n if x == y: \n true += 1\n# else:\n# print(i)\n# print('true ', y)\n# print(test.loc[i]['text'])\n i += 1\nprint(true / len(predictions))\n# print(predictions)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0bc94b8042bc28f05b1a64b3e5643ba0fea35a0
3,440
ipynb
Jupyter Notebook
Python Basics/First.ipynb
kashish-goel/Machine-Learning
22e1074dfa1153ded24035b3701635d81bb2af9a
[ "MIT" ]
null
null
null
Python Basics/First.ipynb
kashish-goel/Machine-Learning
22e1074dfa1153ded24035b3701635d81bb2af9a
[ "MIT" ]
null
null
null
Python Basics/First.ipynb
kashish-goel/Machine-Learning
22e1074dfa1153ded24035b3701635d81bb2af9a
[ "MIT" ]
null
null
null
23.087248
99
0.459302
[ [ [ "print(4+5)\nprint(4-5)\nprint(4*5)\nprint(43/5)\nprint(43%5)\nprint(43//5)", "9\n-1\n20\n8.6\n3\n8\n" ], [ "age = 10\nif age >=18:\n print(\"Eligible to vote\")\nelse:\n print(\"Not eligible to vote\")", "Not eligible to vote\n" ] ], [ [ "# Check\n 0-3 - Toddler\n Above 3 - 12 - Kid\n Above 12 - 19 - Teen\n Above 19 - 30 - Young Adults\n Above 30 - 50 - Adults\n Above 50 - 60 - Mature\n Above 60 - 80 - Senior Citizens\n Above 80 - Super Senior Citizens", "_____no_output_____" ] ], [ [ "age = input(\"Enter age \")\nage = int(age)\nprint(type(age))\nif age <=3:\n print(\"Toddler\")\nelif age > 3 and age <= 12:\n print(\"Kid\")\nelif age > 12 and age <= 19:\n print(\"Teen\")\nelif age > 19 and age <= 30:\n print(\"Young Adult\")\nelif age > 30 and age < 50:\n print(\"Adult\")\nelif age >= 50 and age < 60:\n print(\"mature\")\nelif age >= 60 and age < 80:\n print(\"Senior Citizens\")\nelse:\n print(\"Super Senior Citizens\")", "Enter age 45\n<class 'int'>\nAdult\n" ] ], [ [ "# Exercise 1\n 1. WAP to check is a number is dibvisible by 5\n 2. WAP to check is a number is even or odd\n 3. WAP to check if roots of a quatratic equation are real, real and equal or imaginary\n 4. WAP to print grade of a student \n Less than 40 - NC\n 40 - Less than 50 - D\n 50 - Less than 60 - C\n 60 - Less than 70 - B\n 70 - Less than 80 - A\n 80 and Above - O\n 5. WAP print the Electricity Bill\n Upto 200 - 0.5/unit\n 201 - 500 - 1/unit for units consumed above 200\n 501 - 1000 - 2.5/unit for units consumed above 500\n 1001 - 1500 - 3.5/unit for units consumed above 1000\n 1501 - 200 - 1/unit for units consumed above 200", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0bcad43532e8d709bc5b97c4adabe0a33de1172
1,609
ipynb
Jupyter Notebook
04. Python Numpy/22 memory maps.ipynb
yacper/Quant101
92048ed93e8d053bb258b126d86844bd84efca2e
[ "Apache-2.0" ]
null
null
null
04. Python Numpy/22 memory maps.ipynb
yacper/Quant101
92048ed93e8d053bb258b126d86844bd84efca2e
[ "Apache-2.0" ]
null
null
null
04. Python Numpy/22 memory maps.ipynb
yacper/Quant101
92048ed93e8d053bb258b126d86844bd84efca2e
[ "Apache-2.0" ]
null
null
null
20.628205
116
0.472965
[ [ [ "# 内存映射", "_____no_output_____" ], [ "**Numpy** 有对内存映射的支持。\n\n内存映射也是一种处理文件的方法,主要的函数有:\n\n- `memmap`\n- `frombuffer`\n- `ndarray constructor`\n\n内存映射文件与虚拟内存有些类似,通过内存映射文件可以保留一个地址空间的区域,同时将物理存储器提交给此区域,内存文件映射的物理存储器来自一个已经存在于磁盘上的文件,而且在对该文件进行操作之前必须首先对文件进行映射。\n\n使用内存映射文件处理存储于磁盘上的文件时,将不必再对文件执行I/O操作,使得内存映射文件在处理大数据量的文件时能起到相当重要的作用。", "_____no_output_____" ], [ "## memmap", "_____no_output_____" ], [ " memmap(filename,\n dtype=uint8,\n mode='r+'\n offset=0\n shape=None\n order=0)\n\n`mode` 表示文件被打开的类型: \n\n- `r` 只读\n- `c` 复制+写,但是不改变源文件\n- `r+` 读写,使用 `flush` 方法会将更改的内容写入文件\n- `w+` 写,如果存在则将数据覆盖\n\n`offset` 表示从第几个位置开始。", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ] ]
d0bcaf779aa2958e2324e6aa875e5bde450a4eda
51,221
ipynb
Jupyter Notebook
IMDB_in _Keras.ipynb
adityasaxena26/analyzing-IMDb-data-in-Keras
da4c41bdc343f688509366693e0af54a7aa4b75e
[ "MIT" ]
null
null
null
IMDB_in _Keras.ipynb
adityasaxena26/analyzing-IMDb-data-in-Keras
da4c41bdc343f688509366693e0af54a7aa4b75e
[ "MIT" ]
null
null
null
IMDB_in _Keras.ipynb
adityasaxena26/analyzing-IMDb-data-in-Keras
da4c41bdc343f688509366693e0af54a7aa4b75e
[ "MIT" ]
null
null
null
40.14185
881
0.525663
[ [ [ "import numpy as np\nimport keras\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.preprocessing.text import Tokenizer\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nnp.random.seed(42)", "Using TensorFlow backend.\n" ] ], [ [ "## Loading the data\n\nThe dataset comes preloaded in Keras, which means I don't need to open or read any files manually and one simple command will get us training and testing data. The command to load the data will actually split the words into training and testing sets and labels. There is a parameter for how many words we want to look at. I am setting it at 1000.", "_____no_output_____" ] ], [ [ "# load the data(it's comes preloaded with Keras)\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=1000)\n\nprint(x_train.shape)\nprint(x_test.shape)", "(25000,)\n(25000,)\n" ] ], [ [ "## Examining the data\nNotice that the data has been already pre-processed, where all the words have numbers, and the reviews come in as a vector with the words that the review contains. For example, if the word 'the' is the first one in our dictionary, and a review contains the word 'the', then there is a 1 in the corresponding vector.\n\nThe output comes as a vector of 1's and 0's, where 1 is a positive sentiment for the review, and 0 is negative.", "_____no_output_____" ] ], [ [ "print(x_train[0])\nprint(y_train[0])", "[1, 14, 22, 16, 43, 530, 973, 2, 2, 65, 458, 2, 66, 2, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 2, 2, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2, 19, 14, 22, 4, 2, 2, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 2, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2, 2, 16, 480, 66, 2, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 2, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 2, 15, 256, 4, 2, 7, 2, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 2, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2, 56, 26, 141, 6, 194, 2, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 2, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 2, 88, 12, 16, 283, 5, 16, 2, 113, 103, 32, 15, 16, 2, 19, 178, 32]\n1\n" ] ], [ [ "## One-hot encoding the data\nNow, let's turn the input vectors into (0,1)-vectors. For example, if the pre-processed vector contains the number 14, then in the processed vector, the 14th entry will be 1.", "_____no_output_____" ] ], [ [ "# one-hot encoding the input into vector mode, each of length 1000\ntokenizer = Tokenizer(num_words=1000)\nx_train = tokenizer.sequences_to_matrix(x_train, mode='binary')\nx_test = tokenizer.sequences_to_matrix(x_test, mode='binary')\nprint(x_train[0])", "[0. 1. 1. 0. 1. 1. 1. 1. 1. 1. 0. 0. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 0.\n 0. 1. 1. 0. 1. 0. 1. 0. 1. 1. 0. 1. 1. 0. 1. 1. 0. 0. 0. 1. 0. 0. 1. 0.\n 1. 0. 1. 1. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 1. 0. 0. 1. 1. 0. 0. 0. 0. 1.\n 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 1. 0. 0. 0. 1. 0. 0. 0.\n 0. 0. 1. 0. 1. 0. 0. 1. 1. 0. 1. 1. 0. 0. 0. 0. 1. 1. 0. 0. 0. 1. 0. 0.\n 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 1. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 1. 0. 0.\n 1. 0. 0. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.\n 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 1. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.\n 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.\n 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.\n 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.\n 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n" ] ], [ [ "And we'll also one-hot encode the output.", "_____no_output_____" ] ], [ [ "# one-hot encoding the output\nnum_classes = 2\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\nprint(y_train.shape)\nprint(y_test.shape)", "(25000, 2)\n(25000, 2)\n" ] ], [ [ "## Building the model architecture", "_____no_output_____" ] ], [ [ "# build the model architecture with one layer of length 100\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_dim=1000))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\n\n# compile the model using categorical_crossentropy loss, and rmsprop optimizer.\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])", "WARNING: Logging before flag parsing goes to stderr.\nW0719 18:29:40.188429 6340 deprecation_wrapper.py:119] From c:\\users\\aditya\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:74: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n\nW0719 18:29:40.672129 6340 deprecation_wrapper.py:119] From c:\\users\\aditya\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n\nW0719 18:29:40.812438 6340 deprecation_wrapper.py:119] From c:\\users\\aditya\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:4138: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n\nW0719 18:29:40.968436 6340 deprecation_wrapper.py:119] From c:\\users\\aditya\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:133: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n\nW0719 18:29:41.015259 6340 deprecation.py:506] From c:\\users\\aditya\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\nW0719 18:29:41.140060 6340 deprecation_wrapper.py:119] From c:\\users\\aditya\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\keras\\optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n\nW0719 18:29:41.249304 6340 deprecation_wrapper.py:119] From c:\\users\\aditya\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3295: The name tf.log is deprecated. Please use tf.math.log instead.\n\n" ] ], [ [ "## Training the model", "_____no_output_____" ] ], [ [ "# train the model\nhist = model.fit(x_train, y_train,\n batch_size=32,\n epochs=10,\n validation_data=(x_test, y_test), \n verbose=2)", "W0719 18:56:13.029227 6340 deprecation.py:323] From c:\\users\\aditya\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\tensorflow\\python\\ops\\math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\n" ] ], [ [ "## Evaluating the model", "_____no_output_____" ] ], [ [ "# evaluate the model\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Accuracy: \", score[1])", "Accuracy: 0.85532\n" ] ], [ [ "The trained model has an accuracy of 85.53%. Let's make some changes in our model architecture to improve the accuracy. It might be possible by adding one more hidden layer and dropout to reduce overfitting. Let's explore now.", "_____no_output_____" ] ], [ [ "# build the model architecture with one layer of length 100\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_dim=1000))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(256, activation='relu')) # newly added layer \nmodel.add(Dropout(0.3)) # added dropout regularization of 0.3\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\n\n# compile the model using categorical_crossentropy loss, and rmsprop optimizer.\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_3 (Dense) (None, 512) 512512 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 256) 131328 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 2) 514 \n=================================================================\nTotal params: 644,354\nTrainable params: 644,354\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# train the model\nhist = model.fit(x_train, y_train,\n batch_size=32,\n epochs=10,\n validation_data=(x_test, y_test), \n verbose=2)", "Train on 25000 samples, validate on 25000 samples\nEpoch 1/10\n - 29s - loss: 0.4207 - acc: 0.8124 - val_loss: 0.3674 - val_acc: 0.8474\nEpoch 2/10\n - 26s - loss: 0.3571 - acc: 0.8575 - val_loss: 0.3489 - val_acc: 0.8595\nEpoch 3/10\n - 26s - loss: 0.3342 - acc: 0.8680 - val_loss: 0.3371 - val_acc: 0.8624\nEpoch 4/10\n - 27s - loss: 0.3219 - acc: 0.8742 - val_loss: 0.3659 - val_acc: 0.8626\nEpoch 5/10\n - 29s - loss: 0.3154 - acc: 0.8789 - val_loss: 0.3689 - val_acc: 0.8623\nEpoch 6/10\n - 29s - loss: 0.3092 - acc: 0.8860 - val_loss: 0.3564 - val_acc: 0.8591\nEpoch 7/10\n - 30s - loss: 0.3009 - acc: 0.8921 - val_loss: 0.3561 - val_acc: 0.8608\nEpoch 8/10\n - 28s - loss: 0.2891 - acc: 0.9012 - val_loss: 0.4220 - val_acc: 0.8618\nEpoch 9/10\n - 29s - loss: 0.2762 - acc: 0.9055 - val_loss: 0.3971 - val_acc: 0.8592\nEpoch 10/10\n - 30s - loss: 0.2587 - acc: 0.9150 - val_loss: 0.4278 - val_acc: 0.8574\n" ], [ "# evaluate the model\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Accuracy: \", score[1])", "Accuracy: 0.85736\n" ] ], [ [ "Although small, the new model with an extra hidden and a dropout layer shows the accuracy of 85.73% which is higher than the previous model.", "_____no_output_____" ], [ "Let's experiment with applying reduced dropout of 0.2 and 0.1 to the corresponding dropout layers.", "_____no_output_____" ] ], [ [ "# build the model architecture with one layer of length 100\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_dim=1000))\nmodel.add(Dropout(0.2)) # changed dropout to 0.2 from 0.5\nmodel.add(Dense(256, activation='relu')) # newly added layer \nmodel.add(Dropout(0.1)) # changed dropout to 0.1 from 0.3\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\n\n# compile the model using categorical_crossentropy loss, and rmsprop optimizer.\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_9 (Dense) (None, 512) 512512 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_10 (Dense) (None, 256) 131328 \n_________________________________________________________________\ndropout_7 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_11 (Dense) (None, 2) 514 \n=================================================================\nTotal params: 644,354\nTrainable params: 644,354\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# train the model\nhist = model.fit(x_train, y_train,\n batch_size=32,\n epochs=10,\n validation_data=(x_test, y_test), \n verbose=2)", "Train on 25000 samples, validate on 25000 samples\nEpoch 1/10\n - 28s - loss: 0.3951 - acc: 0.8311 - val_loss: 0.4281 - val_acc: 0.8294\nEpoch 2/10\n - 26s - loss: 0.3339 - acc: 0.8664 - val_loss: 0.3739 - val_acc: 0.8537\nEpoch 3/10\n - 29s - loss: 0.3001 - acc: 0.8839 - val_loss: 0.3587 - val_acc: 0.8608\nEpoch 4/10\n - 29s - loss: 0.2608 - acc: 0.9058 - val_loss: 0.4144 - val_acc: 0.8500\nEpoch 5/10\n - 29s - loss: 0.2155 - acc: 0.9285 - val_loss: 0.4353 - val_acc: 0.8513\nEpoch 6/10\n - 27s - loss: 0.1708 - acc: 0.9478 - val_loss: 0.5275 - val_acc: 0.8510\nEpoch 7/10\n - 27s - loss: 0.1335 - acc: 0.9652 - val_loss: 0.6446 - val_acc: 0.8369\nEpoch 8/10\n - 28s - loss: 0.1184 - acc: 0.9730 - val_loss: 0.8396 - val_acc: 0.8350\nEpoch 9/10\n - 32s - loss: 0.1054 - acc: 0.9782 - val_loss: 0.7660 - val_acc: 0.8441\nEpoch 10/10\n - 29s - loss: 0.0916 - acc: 0.9817 - val_loss: 0.9427 - val_acc: 0.8448\n" ], [ "# evaluate the model\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Accuracy: \", score[1])", "Accuracy: 0.8448\n" ] ], [ [ "It can be observed that the acuracy takes a hit, being reduced to 84.48% from 85.73%, when the dropout values are changed from 0.5 to 0.2 for the first dropout layer and from 0.3 to 0.1 for the second dropout layer. For the chosen network configuration, reducing the dropout rate in the hidden layers did not lift performance. In fact, accuracy was worse than the baseline. The model starts to overfit in this case.", "_____no_output_____" ], [ "Lets's experiment with restoring the dropout rates close to their previous values, 0.5 and 0.4 ", "_____no_output_____" ] ], [ [ "# build the model architecture with one layer of length 100\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_dim=1000))\nmodel.add(Dropout(0.5)) # changed dropout rate from to 0.2 to 0.5 again\nmodel.add(Dense(256, activation='relu')) \nmodel.add(Dropout(0.4)) # changed dropout rate of to 0.4 from 0.3\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\n\n# compile the model using categorical_crossentropy loss, and rmsprop optimizer.\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_9 (Dense) (None, 512) 512512 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_10 (Dense) (None, 256) 131328 \n_________________________________________________________________\ndropout_7 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_11 (Dense) (None, 2) 514 \n=================================================================\nTotal params: 644,354\nTrainable params: 644,354\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "Lets's see what happens when model is trained with increased batch_size of 200 aand higher epochs 0f 50. ", "_____no_output_____" ] ], [ [ "# train the model\nhist = model.fit(x_train, y_train,\n batch_size=200,\n epochs=50,\n validation_data=(x_test, y_test), \n verbose=2)", "Train on 25000 samples, validate on 25000 samples\nEpoch 1/50\n - 14s - loss: 0.4686 - acc: 0.7798 - val_loss: 0.3315 - val_acc: 0.8578\nEpoch 2/50\n - 9s - loss: 0.3312 - acc: 0.8605 - val_loss: 0.3414 - val_acc: 0.8527\nEpoch 3/50\n - 9s - loss: 0.2891 - acc: 0.8792 - val_loss: 0.3564 - val_acc: 0.8510\nEpoch 4/50\n - 9s - loss: 0.2526 - acc: 0.8967 - val_loss: 0.3436 - val_acc: 0.8607\nEpoch 5/50\n - 9s - loss: 0.2005 - acc: 0.9225 - val_loss: 0.3819 - val_acc: 0.8517\nEpoch 6/50\n - 10s - loss: 0.1581 - acc: 0.9412 - val_loss: 0.4081 - val_acc: 0.8586\nEpoch 7/50\n - 10s - loss: 0.1144 - acc: 0.9598 - val_loss: 0.4985 - val_acc: 0.8580\nEpoch 8/50\n - 10s - loss: 0.0885 - acc: 0.9697 - val_loss: 0.5963 - val_acc: 0.8565\nEpoch 9/50\n - 10s - loss: 0.0686 - acc: 0.9776 - val_loss: 0.6315 - val_acc: 0.8540\nEpoch 10/50\n - 10s - loss: 0.0604 - acc: 0.9811 - val_loss: 0.6800 - val_acc: 0.8521\nEpoch 11/50\n - 10s - loss: 0.0484 - acc: 0.9848 - val_loss: 0.7595 - val_acc: 0.8570\nEpoch 12/50\n - 10s - loss: 0.0504 - acc: 0.9862 - val_loss: 0.7227 - val_acc: 0.8532\nEpoch 13/50\n - 10s - loss: 0.0414 - acc: 0.9884 - val_loss: 0.8126 - val_acc: 0.8565\nEpoch 14/50\n - 10s - loss: 0.0355 - acc: 0.9905 - val_loss: 0.8585 - val_acc: 0.8540\nEpoch 15/50\n - 10s - loss: 0.0417 - acc: 0.9902 - val_loss: 1.3426 - val_acc: 0.8075\nEpoch 16/50\n - 10s - loss: 0.0449 - acc: 0.9892 - val_loss: 0.9120 - val_acc: 0.8440\nEpoch 17/50\n - 10s - loss: 0.0389 - acc: 0.9909 - val_loss: 0.8655 - val_acc: 0.8550\nEpoch 18/50\n - 11s - loss: 0.0401 - acc: 0.9909 - val_loss: 0.9224 - val_acc: 0.8491\nEpoch 19/50\n - 10s - loss: 0.0415 - acc: 0.9915 - val_loss: 0.8784 - val_acc: 0.8510\nEpoch 20/50\n - 10s - loss: 0.0361 - acc: 0.9926 - val_loss: 0.9633 - val_acc: 0.8511\nEpoch 21/50\n - 10s - loss: 0.0386 - acc: 0.9918 - val_loss: 1.1745 - val_acc: 0.8293\nEpoch 22/50\n - 11s - loss: 0.0330 - acc: 0.9924 - val_loss: 1.0312 - val_acc: 0.8452\nEpoch 23/50\n - 12s - loss: 0.0425 - acc: 0.9920 - val_loss: 0.9567 - val_acc: 0.8547\nEpoch 24/50\n - 11s - loss: 0.0392 - acc: 0.9916 - val_loss: 0.9201 - val_acc: 0.8517\nEpoch 25/50\n - 10s - loss: 0.0414 - acc: 0.9916 - val_loss: 0.9907 - val_acc: 0.8462\nEpoch 26/50\n - 11s - loss: 0.0420 - acc: 0.9933 - val_loss: 0.9878 - val_acc: 0.8477\nEpoch 27/50\n - 11s - loss: 0.0462 - acc: 0.9909 - val_loss: 1.0023 - val_acc: 0.8437\nEpoch 28/50\n - 11s - loss: 0.0436 - acc: 0.9912 - val_loss: 0.9257 - val_acc: 0.8510\nEpoch 29/50\n - 11s - loss: 0.0352 - acc: 0.9932 - val_loss: 0.9424 - val_acc: 0.8485\nEpoch 30/50\n - 11s - loss: 0.0386 - acc: 0.9931 - val_loss: 0.9883 - val_acc: 0.8487\nEpoch 31/50\n - 12s - loss: 0.0389 - acc: 0.9929 - val_loss: 0.9871 - val_acc: 0.8473\nEpoch 32/50\n - 11s - loss: 0.0387 - acc: 0.9922 - val_loss: 0.9604 - val_acc: 0.8462\nEpoch 33/50\n - 12s - loss: 0.0347 - acc: 0.9926 - val_loss: 0.9776 - val_acc: 0.8487\nEpoch 34/50\n - 11s - loss: 0.0331 - acc: 0.9940 - val_loss: 0.9174 - val_acc: 0.8485\nEpoch 35/50\n - 12s - loss: 0.0343 - acc: 0.9932 - val_loss: 1.0568 - val_acc: 0.8383\nEpoch 36/50\n - 12s - loss: 0.0316 - acc: 0.9928 - val_loss: 0.8720 - val_acc: 0.8476\nEpoch 37/50\n - 12s - loss: 0.0259 - acc: 0.9945 - val_loss: 0.9702 - val_acc: 0.8465\nEpoch 38/50\n - 12s - loss: 0.0312 - acc: 0.9936 - val_loss: 0.9065 - val_acc: 0.8462\nEpoch 39/50\n - 12s - loss: 0.0263 - acc: 0.9943 - val_loss: 0.9612 - val_acc: 0.8445\nEpoch 40/50\n - 12s - loss: 0.0272 - acc: 0.9938 - val_loss: 0.9178 - val_acc: 0.8462\nEpoch 41/50\n - 12s - loss: 0.0294 - acc: 0.9932 - val_loss: 0.9214 - val_acc: 0.8439\nEpoch 42/50\n - 11s - loss: 0.0250 - acc: 0.9943 - val_loss: 1.0723 - val_acc: 0.8386\nEpoch 43/50\n - 11s - loss: 0.0267 - acc: 0.9938 - val_loss: 0.9870 - val_acc: 0.8424\nEpoch 44/50\n - 11s - loss: 0.0247 - acc: 0.9946 - val_loss: 1.0822 - val_acc: 0.8399\nEpoch 45/50\n - 11s - loss: 0.0252 - acc: 0.9942 - val_loss: 1.0762 - val_acc: 0.8388\nEpoch 46/50\n - 11s - loss: 0.0251 - acc: 0.9946 - val_loss: 1.1129 - val_acc: 0.8374\nEpoch 47/50\n - 11s - loss: 0.0239 - acc: 0.9948 - val_loss: 1.0573 - val_acc: 0.8400\nEpoch 48/50\n - 11s - loss: 0.0229 - acc: 0.9952 - val_loss: 1.1101 - val_acc: 0.8378\nEpoch 49/50\n - 11s - loss: 0.0223 - acc: 0.9953 - val_loss: 1.1122 - val_acc: 0.8390\nEpoch 50/50\n - 12s - loss: 0.0249 - acc: 0.9947 - val_loss: 1.1113 - val_acc: 0.8369\n" ] ], [ [ "From the results, it is obvious that the model is overfitting. We need to tweak the hyperparameters again in order to improve the network's performance.", "_____no_output_____" ], [ "Let's explore the effect of increase in nodes of the hidden layer on the model's performaance.", "_____no_output_____" ] ], [ [ "# build the model architecture with one layer of length 100\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_dim=1000))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(384, activation='relu')) # increased no. of nodes in the hidden layer from 256 to 384\nmodel.add(Dropout(0.3)) # changed dropout rate to 0.3\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\n\n# compile the model using categorical_crossentropy loss, and rmsprop optimizer.\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_12 (Dense) (None, 512) 512512 \n_________________________________________________________________\ndropout_8 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_13 (Dense) (None, 384) 196992 \n_________________________________________________________________\ndropout_9 (Dropout) (None, 384) 0 \n_________________________________________________________________\ndense_14 (Dense) (None, 2) 770 \n=================================================================\nTotal params: 710,274\nTrainable params: 710,274\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# train the model\nhist = model.fit(x_train, y_train,\n batch_size=32,\n epochs=10,\n validation_data=(x_test, y_test), \n verbose=2)", "Train on 25000 samples, validate on 25000 samples\nEpoch 1/10\n - 75s - loss: 0.4219 - acc: 0.8170 - val_loss: 0.3624 - val_acc: 0.8565\nEpoch 2/10\n - 29s - loss: 0.3557 - acc: 0.8574 - val_loss: 0.3674 - val_acc: 0.8612\nEpoch 3/10\n - 28s - loss: 0.3341 - acc: 0.8695 - val_loss: 0.3708 - val_acc: 0.8580\nEpoch 4/10\n - 29s - loss: 0.3235 - acc: 0.8752 - val_loss: 0.3641 - val_acc: 0.8600\nEpoch 5/10\n - 31s - loss: 0.3195 - acc: 0.8797 - val_loss: 0.3639 - val_acc: 0.8592\nEpoch 6/10\n - 31s - loss: 0.3095 - acc: 0.8850 - val_loss: 0.3633 - val_acc: 0.8591\nEpoch 7/10\n - 30s - loss: 0.3008 - acc: 0.8908 - val_loss: 0.3719 - val_acc: 0.8620\nEpoch 8/10\n - 31s - loss: 0.2929 - acc: 0.8973 - val_loss: 0.4063 - val_acc: 0.8586\nEpoch 9/10\n - 31s - loss: 0.2793 - acc: 0.9065 - val_loss: 0.4092 - val_acc: 0.8543\nEpoch 10/10\n - 33s - loss: 0.2657 - acc: 0.9138 - val_loss: 0.4657 - val_acc: 0.8570\n" ] ], [ [ "The network performance shows improvement as compared to the performance of the previous network architecture.", "_____no_output_____" ], [ "Lets change the optimizer 'rmsprop' used in the previous model to 'adam' and check network's performance.", "_____no_output_____" ] ], [ [ "# build the model architecture with one layer of length 100\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_dim=1000))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(384, activation='relu'))\nmodel.add(Dropout(0.3)) \nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\n\n# compile the model using categorical_crossentropy loss, and adam optimizer.\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam', # optimizer changed from 'rmsprop' to 'adam'\n metrics=['accuracy'])", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_33 (Dense) (None, 512) 512512 \n_________________________________________________________________\ndropout_22 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_34 (Dense) (None, 384) 196992 \n_________________________________________________________________\ndropout_23 (Dropout) (None, 384) 0 \n_________________________________________________________________\ndense_35 (Dense) (None, 2) 770 \n=================================================================\nTotal params: 710,274\nTrainable params: 710,274\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# train the model\nhist = model.fit(x_train, y_train,\n batch_size=32,\n epochs=10,\n validation_data=(x_test, y_test), \n verbose=2)", "Train on 25000 samples, validate on 25000 samples\nEpoch 1/10\n - 38s - loss: 0.4006 - acc: 0.8208 - val_loss: 0.3306 - val_acc: 0.8551\nEpoch 2/10\n - 37s - loss: 0.3154 - acc: 0.8644 - val_loss: 0.3254 - val_acc: 0.8598\nEpoch 3/10\n - 37s - loss: 0.2798 - acc: 0.8849 - val_loss: 0.3284 - val_acc: 0.8559\nEpoch 4/10\n - 36s - loss: 0.2310 - acc: 0.9080 - val_loss: 0.3444 - val_acc: 0.8557\nEpoch 5/10\n - 37s - loss: 0.1819 - acc: 0.9290 - val_loss: 0.3693 - val_acc: 0.8546\nEpoch 6/10\n - 38s - loss: 0.1360 - acc: 0.9476 - val_loss: 0.4033 - val_acc: 0.8499\nEpoch 7/10\n - 39s - loss: 0.1044 - acc: 0.9617 - val_loss: 0.4504 - val_acc: 0.8499\nEpoch 8/10\n - 38s - loss: 0.0837 - acc: 0.9687 - val_loss: 0.4854 - val_acc: 0.8473\nEpoch 9/10\n - 38s - loss: 0.0745 - acc: 0.9718 - val_loss: 0.5267 - val_acc: 0.8486\nEpoch 10/10\n - 39s - loss: 0.0643 - acc: 0.9766 - val_loss: 0.5313 - val_acc: 0.8496\n" ], [ "# evaluate the model\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Accuracy: \", score[1])", "Accuracy: 0.8496\n" ] ], [ [ "Here, in this case the 'rmsprop' seems to be a better optimizer than 'adam' for the model in terms of its performance(test accuracy) which is 85.70% and 84.96% respectively.", "_____no_output_____" ], [ "Now, let us add an extra hidden layer before the output layer and apply dropout to it.", "_____no_output_____" ] ], [ [ "# build the model architecture with one layer of length 100\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_dim=1000))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(256, activation='relu')) # newly added hidden layer \nmodel.add(Dropout(0.3)) # added dropout rate of 0.3\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\n\n# compile the model using categorical_crossentropy loss, and rmsprop optimizer.\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_44 (Dense) (None, 512) 512512 \n_________________________________________________________________\ndropout_30 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_45 (Dense) (None, 256) 131328 \n_________________________________________________________________\ndropout_31 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_46 (Dense) (None, 256) 65792 \n_________________________________________________________________\ndropout_32 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_47 (Dense) (None, 2) 514 \n=================================================================\nTotal params: 710,146\nTrainable params: 710,146\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.fit(x_train, y_train,\n batch_size=32,\n epochs=10,\n validation_data=(x_test, y_test), \n verbose=2)", "Train on 25000 samples, validate on 25000 samples\nEpoch 1/10\n - 37s - loss: 0.4340 - acc: 0.8077 - val_loss: 0.4265 - val_acc: 0.8214\nEpoch 2/10\n - 35s - loss: 0.3689 - acc: 0.8563 - val_loss: 0.3843 - val_acc: 0.8538\nEpoch 3/10\n - 33s - loss: 0.3654 - acc: 0.8636 - val_loss: 0.3708 - val_acc: 0.8605\nEpoch 4/10\n - 33s - loss: 0.3631 - acc: 0.8679 - val_loss: 0.4089 - val_acc: 0.8575\nEpoch 5/10\n - 34s - loss: 0.3529 - acc: 0.8747 - val_loss: 0.3993 - val_acc: 0.8596\nEpoch 6/10\n - 34s - loss: 0.3484 - acc: 0.8769 - val_loss: 0.3749 - val_acc: 0.8608\nEpoch 7/10\n - 35s - loss: 0.3432 - acc: 0.8837 - val_loss: 0.4508 - val_acc: 0.8378\nEpoch 8/10\n - 34s - loss: 0.3357 - acc: 0.8888 - val_loss: 0.4785 - val_acc: 0.8465\nEpoch 9/10\n - 34s - loss: 0.3372 - acc: 0.8938 - val_loss: 0.4856 - val_acc: 0.8502\nEpoch 10/10\n - 33s - loss: 0.3263 - acc: 0.8958 - val_loss: 0.4591 - val_acc: 0.8525\n" ], [ "# evaluate the model\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Accuracy: \", score[1])", "Accuracy: 0.85252\n" ] ], [ [ "Let's increase the number of nodes in the first hidden layer to 384 and see what happens.", "_____no_output_____" ] ], [ [ "# build the model architecture with one layer of length 100\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_dim=1000))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(384, activation='relu')) # changed number of nodes to 384\nmodel.add(Dropout(0.3))\nmodel.add(Dense(256, activation='relu')) \nmodel.add(Dropout(0.2)) # added dropout rate of 0.2\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\n\n# compile the model using categorical_crossentropy loss, and rmsprop optimizer.\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_52 (Dense) (None, 512) 512512 \n_________________________________________________________________\ndropout_36 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_53 (Dense) (None, 384) 196992 \n_________________________________________________________________\ndropout_37 (Dropout) (None, 384) 0 \n_________________________________________________________________\ndense_54 (Dense) (None, 256) 98560 \n_________________________________________________________________\ndropout_38 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_55 (Dense) (None, 2) 514 \n=================================================================\nTotal params: 808,578\nTrainable params: 808,578\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.fit(x_train, y_train,\n batch_size=32,\n epochs=10,\n validation_data=(x_test, y_test), \n verbose=2)", "Train on 25000 samples, validate on 25000 samples\nEpoch 1/10\n - 37s - loss: 0.3874 - acc: 0.8427 - val_loss: 0.3550 - val_acc: 0.8580\nEpoch 2/10\n - 37s - loss: 0.3759 - acc: 0.8598 - val_loss: 0.3675 - val_acc: 0.8448\nEpoch 3/10\n - 37s - loss: 0.3615 - acc: 0.8646 - val_loss: 0.3479 - val_acc: 0.8617\nEpoch 4/10\n - 37s - loss: 0.3481 - acc: 0.8716 - val_loss: 0.3823 - val_acc: 0.8564\nEpoch 5/10\n - 37s - loss: 0.3415 - acc: 0.8781 - val_loss: 0.3741 - val_acc: 0.8600\nEpoch 6/10\n - 39s - loss: 0.3373 - acc: 0.8823 - val_loss: 0.3917 - val_acc: 0.8600\nEpoch 7/10\n - 39s - loss: 0.3324 - acc: 0.8870 - val_loss: 0.4167 - val_acc: 0.8596\nEpoch 8/10\n - 40s - loss: 0.3370 - acc: 0.8902 - val_loss: 0.4081 - val_acc: 0.8612\nEpoch 9/10\n - 44s - loss: 0.3271 - acc: 0.8969 - val_loss: 0.4460 - val_acc: 0.8601\nEpoch 10/10\n - 46s - loss: 0.3306 - acc: 0.9001 - val_loss: 0.4895 - val_acc: 0.8589\n" ], [ "# evaluate the model\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Accuracy: \", score[1])", "Accuracy: 0.85892\n" ] ], [ [ "This is our the best performing network architecture so far, which is configured by increasing the number of nodes in the first hidden layer from 256 to 384, while second hidden layer has 256 nodes.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
d0bcb906c3bb312cf523d434ecd3f27efe91786b
3,209
ipynb
Jupyter Notebook
01 Machine Learning/scikit_examples_jupyter/gaussian_process/plot_gpc_xor.ipynb
alphaolomi/colab
19e4eb1bed56346dd18ba65638cda2d17a960d0c
[ "Apache-2.0" ]
null
null
null
01 Machine Learning/scikit_examples_jupyter/gaussian_process/plot_gpc_xor.ipynb
alphaolomi/colab
19e4eb1bed56346dd18ba65638cda2d17a960d0c
[ "Apache-2.0" ]
null
null
null
01 Machine Learning/scikit_examples_jupyter/gaussian_process/plot_gpc_xor.ipynb
alphaolomi/colab
19e4eb1bed56346dd18ba65638cda2d17a960d0c
[ "Apache-2.0" ]
null
null
null
59.425926
1,641
0.560922
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n========================================================================\nIllustration of Gaussian process classification (GPC) on the XOR dataset\n========================================================================\n\nThis example illustrates GPC on XOR data. Compared are a stationary, isotropic\nkernel (RBF) and a non-stationary kernel (DotProduct). On this particular\ndataset, the DotProduct kernel obtains considerably better results because the\nclass-boundaries are linear and coincide with the coordinate axes. In general,\nstationary kernels often obtain better results.\n\n", "_____no_output_____" ] ], [ [ "print(__doc__)\n\n# Authors: Jan Hendrik Metzen <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF, DotProduct\n\n\nxx, yy = np.meshgrid(np.linspace(-3, 3, 50),\n np.linspace(-3, 3, 50))\nrng = np.random.RandomState(0)\nX = rng.randn(200, 2)\nY = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)\n\n# fit the model\nplt.figure(figsize=(10, 5))\nkernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]\nfor i, kernel in enumerate(kernels):\n clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)\n\n # plot the decision function for each datapoint on the grid\n Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]\n Z = Z.reshape(xx.shape)\n\n plt.subplot(1, 2, i + 1)\n image = plt.imshow(Z, interpolation='nearest',\n extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)\n contours = plt.contour(xx, yy, Z, levels=[0.5], linewidths=2,\n colors=['k'])\n plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,\n edgecolors=(0, 0, 0))\n plt.xticks(())\n plt.yticks(())\n plt.axis([-3, 3, -3, 3])\n plt.colorbar(image)\n plt.title(\"%s\\n Log-Marginal-Likelihood:%.3f\"\n % (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),\n fontsize=12)\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
d0bcd50d4f83e5dfa78175b88404f5890abc7195
75,537
ipynb
Jupyter Notebook
2016-1/w06/Interpolation-Exercises.ipynb
forero/ComputationalLab
d7bca519dbb439fd76f3ee5a59e21af0ae560989
[ "MIT" ]
null
null
null
2016-1/w06/Interpolation-Exercises.ipynb
forero/ComputationalLab
d7bca519dbb439fd76f3ee5a59e21af0ae560989
[ "MIT" ]
null
null
null
2016-1/w06/Interpolation-Exercises.ipynb
forero/ComputationalLab
d7bca519dbb439fd76f3ee5a59e21af0ae560989
[ "MIT" ]
null
null
null
208.665746
26,924
0.883792
[ [ [ "# Interpolation\n\n### [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman), [Matthew Piggott](http://www.imperial.ac.uk/people/m.d.piggott), [Christian Jacobs](http://www.christianjacobs.uk)", "_____no_output_____" ], [ "## Interpolation vs curve-fitting\n\nConsider a discrete set of data points \n\n$$ (x_i, y_i),\\quad i=0,\\ldots,N,$$\n\nand that we wish to approximate this data in some sense. The data may be known to be exact (if we wished to approximate a complex function by a simpler expression say), or it may have errors from measurement or observational techniques with known or unknown error bars.\n\n### Interpolation\nInterpolation assumes that these data points are exact (e.g. no measurement errors) and at distinct $x$ locations. It aims to fit a function (or curve), $y=f(x)$, to this data which exactly passes through the $N$ discrete points. This means that we have the additional constraint on the $x_s$'s that\n$$x_0 < x_1 < \\ldots < x_N,$$ \nand that \n$$y_i=f(x_i),\\quad \\forall i.$$\n\nIn this case the function $f$ is known as the *interpolating function*, or simply the *interpolant*.\n\n### Curve-fitting\nAlternatively, when we have data with noise, or multiple different measurement values ($y$) at a given $x$ then we cannot fit a function/curve that goes through all points exactly, and rather have to perform **curve-fitting** - finding a function that approximates the data in some sense by does not necessarily hit all points. In this case we no longer have the requirement that \n$$x_0 < x_1 < \\ldots < x_N$$ \nand can consider the data simply as a *cloud of points*. This is the most typical case for real world data which contains variability and noise giving rise to multiple different measurements (i.e. $y$ values) at the same $x$ location.\n\nAn example of interpolation would be to simply fit a line between every successive two data points - this is a piecewise-linear (an example of the more general piecewise-polynomial) interpolation.\n\nIf we were to construct a single straight line ($y=mx+c$ where we have only two free parameters $m$ and $c$) that, for example, minimised that sum of the squares of the differences to the data, this would be what is known as a *least squares approximation* to the data using a linear function. In real data this fitting of data to a function has the effect of *smoothing* complex or noisy data.\n\n### Choice of interpolating function\n\nWe have a lot of choice for how we construct the interpolating or curve-fitting function. Considerations for how to do this include the smoothness of the resulting function (i.e. how many smooth derivatives it has - cf. the piecewise polynomial case - what does this approximation tell us about the rate of change of the data?), replicating known positivity or periodicity, the cost of evaluating it, etc.\n\nSome choices include: polynomials, piecewise polynomials, trigonometric series (sums of sines and cosines leading to an approximation similar to Fourier series).\n", "_____no_output_____" ], [ "# Lagrange polynomial\n\n[Lagrange polynomials](http://mathworld.wolfram.com/LagrangeInterpolatingPolynomial.html) are a particularly popular choice for constructing an interpolant for a given data set. The Lagrange polynomial is the polynomial of the least degree that passes through each data point in the set. **The interpolating polynomial of the least degree is unique.**\n\nGiven a set of points as defined above, the Lagrange polynomial is defined as the linear combination\n\n$$L(x) = \\sum_{i=0}^{N} y_i \\ell_i(x).$$\n\nThe functions $\\ell_i$ are known as the *Lagrange basis polynomials* defined by the product\n\n$$\\ell_i(x) := \\prod_{\\begin{smallmatrix}0\\le m\\le N\\\\ m\\neq i\\end{smallmatrix}} \\frac{x-x_m}{x_i-x_m} = \\frac{(x-x_0)}{(x_i-x_0)} \\cdots \\frac{(x-x_{i-1})}{(x_i-x_{i-1})} \\frac{(x-x_{i+1})}{(x_i-x_{i+1})} \\cdots \\frac{(x-x_k)}{(x_i-x_k)},$$\n\nwhere $0\\le i\\le N$.\n\nNotice from the definition the requirement that no two $x_i$ are the same, $x_i - x_m \\neq 0$, so this expression is always well-defined (i.e. we never get a divide by zero!) The reason pairs $x_i = x_j$ with $y_i\\neq y_j$ are not allowed is that no interpolation function $L$ such that $y_i = L(x_i)$ would exist; a function can only get one value for each argument $x_i$. On the other hand, if also $y_i = y_j$, then those two points would actually be one single point.\n\nFor all $i\\neq j$, $\\ell_j(x)$ includes the term $(x-x_i)$ in the numerator, so the whole product will be zero at $x=x_i$:\n\n$\\ell_{j\\ne i}(x_i) = \\prod_{m\\neq j} \\frac{x_i-x_m}{x_j-x_m} = \\frac{(x_i-x_0)}{(x_j-x_0)} \\cdots \\frac{(x_i-x_i)}{(x_j-x_i)} \\cdots \\frac{(x_i-x_k)}{(x_j-x_k)} = 0$.\n\nOn the other hand,\n\n$\\ell_i(x_i) := \\prod_{m\\neq i} \\frac{x_i-x_m}{x_i-x_m} = 1$\n\nIn other words, all basis polynomials are zero at $x=x_i$, except $\\ell_i(x)$, for which it holds that $\\ell_i(x_i)=1$, because it lacks the $(x-x_i)$ term.\n\nIt follows that $y_i \\ell_i(x_i)=y_i$, so at each point $x_i$, $L(x_i)=y_i+0+0+\\dots +0=y_i$, showing that $L$ interpolates the function exactly.\n\nTo help illustrate our discussion lets first create some data in Python and take a look at it.", "_____no_output_____" ] ], [ [ "%pylab inline\n\n# Invent some raw data \nx=numpy.array([0.5,2.0,4.0,5.0,7.0,9.0])\ny=numpy.array([0.5,0.4,0.3,0.1,0.9,0.8])\n\n# For clarity we are going to add a small margin to all the plots.\npylab.margins(0.1)\n\n# We want to overlay a plot the raw data a few times so lets make this a function.\ndef plot_raw_data(x,y):\n # Plot the data as black stars\n pylab.plot(x,y,'k*',label='raw data')\n pylab.xlabel('x')\n pylab.ylabel('y')\n pylab.grid(True)\n\n# The simple plot function you used in Introduction to Programming last term\n# will show a piecewise-linear approximation:\npylab.plot(x,y,'r',label='p/w linear')\n\n# Overlay raw data\nplot_raw_data(x,y)\n\n# Add a legend\npylab.legend(loc='best')\n\npylab.show()", "Populating the interactive namespace from numpy and matplotlib\n" ] ], [ [ "We can use [scipy.interpolate.lagrange](http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.lagrange.html)\nfrom [SciPy](http://www.scipy.org) to generate the **Lagrange polynomial** for a dataset as shown below.\n\n<span style=\"color:red\">(Note: SciPy provides a [wide range of interpolators](http://docs.scipy.org/doc/scipy/reference/interpolate.html) with many different properties which we do not have time to go into in this course. When you need to interpolate data for your specific application then you should look up the literature to ensure you are using the best one.)</span>", "_____no_output_____" ] ], [ [ "import scipy.interpolate\n\n# Create the Lagrange polynomial for the given points.\nlp=scipy.interpolate.lagrange(x, y)\n\n# Evaluate this fuction at a high resolution so that we can get a smooth plot. \nxx=numpy.linspace(0.4, 9.1, 100)\npylab.plot(xx, lp(xx), 'b', label='Lagrange')\n\n# Overlay raw data\nplot_raw_data(x, y)\n\n# Add a legend\npylab.legend(loc='best')\n\npylab.show()", "_____no_output_____" ] ], [ [ "# Error in Lagrange interpolation\n\nNote that it can be proven that in the case where we are interpolating a known function (e.g. a complex non-polynomial function by a simpler polynomial), the error is proportional to the distance from any of the data points (which makes sense as the error is obviously zero at these points) and to the $(n+1)$-st derivative of that function evaluated at some location within the bounds of the data. I.e. the more complex (sharply varying) the function is, the higher the error could be.\n", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 1: Approximating a function </span>\n\nSample the function $y(x)=x^3$ at the points $x=(1,2,3)$. \n\nWrite your own Python function to construct the Lagrange polynomials $L_0$, $L_1+L_0$ and $L_2+L_1+L_0$. Plot the resulting polynomials along with the error compared to the original exact function. (<span style=\"color:green\">Guru tip: Using the pylab function [fill_between](http://matplotlib.org/examples/pylab_examples/fill_between_demo.html) provides a nice way of illustrating the difference between graphs.</span>)", "_____no_output_____" ], [ "# Curve fitting\nCurve-fitting in the [least squares](http://mathworld.wolfram.com/LeastSquaresFitting.html) sense is popular when the dataset contains noise (nearly always the case when dealing with real world data). This is straightforward to do for polynomials of different polynomial degree using [numpy.polyfit](http://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html), see below.", "_____no_output_____" ] ], [ [ "# Calculate coefficients of polynomial degree 0 - ie a constant value.\npoly_coeffs=numpy.polyfit(x, y, 0)\n\n# Construct a polynomial function which we can use to evaluate for arbitrary x values.\np0 = numpy.poly1d(poly_coeffs)\npylab.plot(xx, p0(xx), 'k', label='Constant')\n\n# Fit a polynomial degree 1 - ie a straight line.\npoly_coeffs=numpy.polyfit(x, y, 1)\np1 = numpy.poly1d(poly_coeffs)\npylab.plot(xx, p1(xx), 'b', label='Linear')\n\n# Quadratic\npoly_coeffs=numpy.polyfit(x, y, 2)\np2 = numpy.poly1d(poly_coeffs)\npylab.plot(xx, p2(xx), 'r', label='Quadratic')\n\n# Cubic\npoly_coeffs=numpy.polyfit(x, y, 3)\np3 = numpy.poly1d(poly_coeffs)\npylab.plot(xx, p3(xx), 'g', label='Cubic')\n\n# Overlay raw data\nplot_raw_data(x, y)\n\n# Add a legend\npylab.legend(loc='best')\n\npylab.show()", "_____no_output_____" ] ], [ [ "### <span style=\"color:blue\">Exercise 2: Squared error calculation</span>\n\nAs described in the docs ([numpy.polyfit](http://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html)), least squares fitting minimises the square of the difference between the data provided and the polynomial,\n\n$$E = \\sum_{i=0}^{k} (p(x_i) - y_i)^2,$$\n\nwhere $p(x_i)$ is the value of the polynomial function that has been fit to the data evaluated at point $x_i$, and $y_i$ is the $i^th$ data value.\n\nWrite a Python fucntion that evaluates the squared error, $E$, and use this function to evaluate the error for each of the polynomials calculated above. <span style=\"color:green\">Tip: Try to pass the function *p* in as an argument to your error calculation function. One of the great features of Python is that it is easy to pass in functions as arguments.</span>\n\nWhy is the square of the difference used? ", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 3: Degree of approximation </span>\n\nExtend the example above by fitting and plotting polynomials of increasing degree past cubic. At what *degree* does the resulting polynomial approximation equate to the Lagrange interpolant?\n\nWhy does this make sense? \n\n<span style=\"color:green\">Hint: think about the number of free parameters in a polynomial, and the amount of data you have.</span>", "_____no_output_____" ], [ "# Extrapolation\n\nTake to remember that *interpolation* by definition is used to estimate $y$ for values of $x$ within the bounds of the available data (here $[0.5,0]$) with some confidence. *Extrapolation* on the other hand is the process of estimating (e.g. using the interpolating function) $y$ *outside* the bounds of the available data. However, extrapolation requires a great deal of care as it will become increasingly inaccurate as you go further out of bounds.\n\n### <span style=\"color:blue\">Exercise 4: Extrapolation </span>\n\nRecreate the plots in the example above for different degrees of polynomial, setting the x-range from -2.0 to 13.0. What do you notice about extrapolation when you use higher degree polynomials?", "_____no_output_____" ], [ "# Challenge of the day\n\n### <span style=\"color:blue\">Exercise 5: Submarine landslide size in the North Atlantic </span>\n\nOpen the data file [Length-Width.dat](https://raw.githubusercontent.com/ggorman/Numerical-methods-1/master/notebook/data/Length-Width.dat) giving the lengths and widths of submarine landslides in the North Atlantic basin [from [Huhnerbach & Masson, 2004](http://www.sciencedirect.com/science/article/pii/S0025322704002774), Fig. 7]. Fit a linear best fit line using polyfit and try to recreate the image below.\n\n<span style=\"color:green\">Hint: You will need to take the log of the data before fitting a line to it. </span>\n\n![\"Cloud of point data for submarine landslide widths and depths in the North Atlantic, and a correspondong best (linear) curve fit.\"](https://raw.githubusercontent.com/ggorman/Numerical-methods-1/master/notebook/images/Width-Length.png)\n\n\nReference: [V. Huhnerbach, D.G. Masson, Landslides in the North Atlantic and its adjacent seas:\nan analysis of their morphology, setting and behaviour, Marine Geology 213 (2004) 343 – 362.](http://www.sciencedirect.com/science/article/pii/S0025322704002774)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
d0bcd8cd7f29b89ec5695bfc667b9559b60323c2
245,382
ipynb
Jupyter Notebook
Notebooks/WhyNotTesting.ipynb
sandiegodata/covid19
fc68f1175079ec987fa33404e972abc0d2c48fa6
[ "CC0-1.0" ]
1
2020-04-10T21:34:34.000Z
2020-04-10T21:34:34.000Z
Notebooks/WhyNotTesting.ipynb
sandiegodata/covid19
fc68f1175079ec987fa33404e972abc0d2c48fa6
[ "CC0-1.0" ]
null
null
null
Notebooks/WhyNotTesting.ipynb
sandiegodata/covid19
fc68f1175079ec987fa33404e972abc0d2c48fa6
[ "CC0-1.0" ]
null
null
null
271.140331
63,872
0.906187
[ [ [ "show_input: hide \ngithub: https://github.com/sandiegodata/covid19/blob/master/Notebooks/WhyNotTesting.ipynb\nfeatured_image: 550 \nauthors: \n- email: [email protected] \nname: Eric Busboom \norganization: Civic Knowledge \ntype: Analyst \ntags: \n- covid19 \ncategories: \n- Health\nidentifier: f30be0f4-5e12-476a-9d58-9763f9f8ab9c", "_____no_output_____" ] ], [ [ "## Imperfect Tests and The Effects of False Positives", "_____no_output_____" ], [ "The US government has been widely criticized for its failure to test as many of its citizens for COVID-19 infections as other countries. But is mass testing really as easy as it seems? This analysis of the false positive and false negative rates of tests, using published sensitivities and specificities for COVID-19 rt-PCR and antigen tests, shows that even tests with slightly less than perfect results can produce very large numbers of false positives. ", "_____no_output_____" ] ], [ [ "import sys\n# Install required packages\n#!{sys.executable} -mpip -q install matplotlib seaborn statsmodels pandas publicdata metapack\n\n%matplotlib inline\n\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport metapack as mp\nimport rowgenerators as rg\nimport publicdata as pub\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(color_codes=True)", "_____no_output_____" ] ], [ [ "As the world became more aware of the threat posed by COVID-19 in February 2020, US media began to draw attention to the disparity between the extent of testing being done in other countries versus the United States. The CDC released [fairly restrictive guidelines](https://www.cdc.gov/coronavirus/2019-ncov/hcp/clinical-criteria.html) for what conditions qualified a patient for a lab test for COVID-19 infections, and many media outlets criticized the US CDC for being unprepared to test for the virus. \n\nCriticism intensified when the first version of tests created by the CDC [proved to be unreliable](https://www.forbes.com/sites/rachelsandler/2020/03/02/how-the-cdc-botched-its-initial-coronavirus-response-with-faulty-tests/#5bbf1d50670e). But there are important considerations that these reports have largely ignored, the most important of which is the false positive and false negative rates of the tests, which can produce results that are worse than useless when the prevalence of the condition — the percentage of people who are infected — is very low.", "_____no_output_____" ], [ "Every test — for nearly any sort of test — has an error rate: false positives and false negatives. False negatives are fairly easy to understand. If a 1,000 women who have breast cancer take a test that has a false positive rate of 1%, the test will report that 999 of them have cancer, and 1 who does not, even though she actually does.\n\nThe false positive rate is trickier, because it is multipled not by the number of women who have cancer, but by the number of women who take the test. If the situation is that a large number of women are tested, but few have cancer, the test can report many more false positives than women who actually have cancer. \n\nThere is evidence that the tests for the COVID-19 virus have a false positive rate large enough that if a large number of people are tested when the prevalence of COVID-19 infections are small, most of the reported positives are false positives. \n", "_____no_output_____" ], [ "# Primer on False Positives and Negatives\n\nResearch related to epidemiological tests typically does not report the false positive rate directly; instead it reports two parameters, the Selectivity and Specificity. [Wikipedia has an excellent article](https://en.wikipedia.org/wiki/Sensitivity_and_specificity) describing these parameters and how they related to false positive and false negative rates, and [Health News Review](https://www.healthnewsreview.org/) publishes this [very accessible overview of the most important concepts](https://www.healthnewsreview.org/toolkit/tips-for-understanding-studies/understanding-medical-tests-sensitivity-specificity-and-positive-predictive-value/). The most important part of the Wikipedia article to understand is the table in the [worked example](https://en.wikipedia.org/wiki/Sensitivity_and_specificity#Worked_example). When a test is administered, there are four possible outcomes. The test can return a positive result, which can be a true positive or a false positive, or it can return a negative result, which is a true negative or a false negative. If you organize those posibilities by what is the true condition ( does the patient have the vius or not ):\n\n* Patient has virus\n * True Positive ($\\mathit{TP}$)\n * False negative ($\\mathit{FN}$)\n* Patient does not have virus\n * True Negative ($\\mathit{TN}$)\n * False Positive. ($\\mathit{FP}$)\n\nIn the Wikipedia worked example table:\n\n* The number of people who do have the virus is $\\mathit{TP}+\\mathit{FN}$, the true positives plus the false negatives, which are the cases that should have been reported positive, but were not. \n* The number of people who do not have the virus is $\\mathit{TN}+\\mathit{FP}$, the true negatives and the false positives, which are the cases should have been reported positive, but were not. \n\nThe values of Sensitivity and Specificity are defined as: \n\n$$\\begin{array}{ll}\nSn = \\frac{\\mathit{TP}}{\\mathit{TP} + \\mathit{FN}} & \\text{True positives outcomes divided by all positive conditions} \\tag{1}\\label{eq1}\\\\ \nSp = \\frac{\\mathit{TN}}{\\mathit{FP} + \\mathit{TN}} & \\text{True negatives outcomes divided by all negative conditions}\\\\ \n\\end{array}$$\n\nWe want to know the number of false positives($\\mathit{FP}$) given the number of positive conditions ($\\mathit{TP}+\\mathit{FN}$) and the total number of tests. To compute these, we need to have some more information about the number of people tested, and how common the disease is: \n\n* Total test population $P$, the number of people being tested, which equals $\\mathit{TP}+\\mathit{FP}+\\mathit{FN}+\\mathit{TN}$\n* The prevalence $p$, the population rate of positive condition. \n\nWe can do a little math to get: \n\n$$\\begin{array}{ll}\n\\mathit{TP} = Pp\\mathit{Sn} & \\text{}\\\\ \n\\mathit{FP} = P(1-p)(1-\\mathit{Sp}) \\text{}\\\\ \n\\mathit{TN} = P(1-p)\\mathit{Sp} & \\text{}\\\\ \n\\mathit{FN} = Pp(1-\\mathit{Sn})& \\text{}\\\\ \n\\end{array}$$\n\nYou can see examples of these equations worked out in the third line in the red and green cells of the [Worked Example](https://en.wikipedia.org/wiki/Sensitivity_and_specificity#Worked_example) on the Sensitivity and Specificity Wikipedia page. \n\nIt is important to note that when these four values are used to calculate $\\mathit{Sp}$ and $\\mathit{Sn}$, the population value $P$ cancels out, so $\\mathit{Sp}$ and $\\mathit{Sn}$ do not depend on the number of people tested. \n\n", "_____no_output_____" ], [ "One of the interesting questions when test results are reported is \"What percentage of the positive results are true positives?\" This is a particularly important question for the COVID-19 pandemic because there are a lot of reports that most people with the virus are asymptomatic. Are they really asymptomatic, or just false positives?\n\nThe metric we're interested here is the portion of positive results that are true positives, the positive predictive value, $\\mathit{PPV}$:\n\n$$\\mathit{PPV} = \\frac{\\mathit{TP} }{ \\mathit{TP} +\\mathit{FP} } $$\n\nWhich expands to:\n\n$$\\mathit{PPV} = \\frac{p\\mathit{Sn} }{ p\\mathit{Sn} + (1-p)(1-\\mathit{Sp}) }\\tag{2}\\label{eq2} $$\n\nIt is important to note that $\\mathit{PPV}$ is not dependent on $P$, the size of the population being tested. It depends only on the quality parameters of the test, $\\mathit{Sn}$ and $\\mathit{Sp}$, and the prevalence, $p$. For a given test, only the prevalence will change over time. ", "_____no_output_____" ], [ "# Selctivity and Specificity Values\n\nIt has been dificult to find specificity and sensitivity values for COVID-19 tests, or any rt-PCR tests; research papers rarely publish the values. Howver, there are a few reports for the values for serology tests, and a few reports of values for rt-PRC tests for the MERS-CoV virus. \n\nWe can get values for an antibidy test for COVID-19 from a a recently published paper, _Development and Clinical Application of A Rapid IgM-IgG Combined Antibody Test for SARS-CoV-2 Infection Diagnosis_<sup><a href=\"#fnote2\" rel=\"noopener\" target=\"_self\">2</a></sup>, which reports: \n\n> The overall testing sensitivity was 88.66% and specificity was 90.63%\n\nThis test is significantly different from the most common early tests for COVID-19; this test looks for antibodies in the patient's blood, while most COVID-19 tests are rt-PCR assays that look for fragments of RNA from the virus. \n\nThe article _MERS-CoV diagnosis: An update._<sup><a href=\"#fnote4\" rel=\"noopener\" target=\"_self\">4</a></sup> reports that for MERS-CoV:\n\n> Song et al. developed a rapid immunochromatographic assay for the detection of MERS-CoV nucleocapsid protein from camel nasal swabs with 93.9% sensitivity and 100% specificity compared to RT-rtPCR\n\nThe article _Performance Evaluation of the PowerChek MERS (upE & ORF1a) Real-Time PCR Kit for the Detection of Middle East Respiratory Syndrome Coronavirus RNA_<sup><a href=\"#fnote5\" rel=\"noopener\" target=\"_self\">5</a></sup> reports:\n\n> The diagnostic sensitivity and specificity of the PowerChek MERS assay were both 100% (95% confidence interval, 91.1–100%).\n\nThe [Emergency Use Authorization for LabCorp's rt-PCR test](https://www.fda.gov/media/136151/download)<sup><a href=\"#fnote6\" rel=\"noopener\" target=\"_self\">6</a></sup> reports:\n\n~~~\nPerformance of the COVID-19 RT-PCR test against the expected results [ with NP swabs ] are:\nPositive Percent Agreement 40/40 = 100% (95% CI: 91.24%-100%)\nNegative Percent Agreement 50/50 = 100% (95% CI: 92.87% -100%)\n~~~\n\nUsing the lower bound of the 95% CI, values convert to a specificity of .90 and sensitivity of .94.\n\nA recent report characterizes Abbott Labs ID NOW system, used for influenza tests. [Abbott Labs recieved an EUA](https://www.fda.gov/media/136525/download), on 27 March 2020, for a version of the device for use with COVID-19. The study of the the influenza version states:\n\n> The sensitivities of ID NOW 2 for influenza A were 95.9% and 95.7% in NPS and NPA, respectively, and for influenza B were 100% and 98.7% in NPS and NPA, respectively. The specificity was 100% for both influenza A and influenza B in NPS and NPA. \n\nThe results section of the paper provides these parameters, when compared to rRT-PCR: \n\n<table>\n <tr>\n <th>Virus</th>\n <th>Parameter</th>\n <th>ID NOW 2</th>\n <th> ID NOW 2 VTM</th>\n </tr>\n <tr>\n <td>type A</td>\n <td>Sensitivity (95% CI)</td>\n <td>95.7 (89.2-98.8)</td>\n <td>96.7 (90.8-99.3)</td>\n </tr>\n <tr>\n <td></td>\n <td>Specificity (95% CI)</td>\n <td>100 (89.3-100) </td>\n <td>100 (89.3-100)</td>\n </tr>\n <tr>\n <td>Type B</td>\n <td>Sensitivity (95% CI)</td>\n <td>98.7 (93.0-100)</td>\n <td>100 (96.2-100)</td>\n </tr>\n <tr>\n <td></td>\n <td>Specificity (95% CI)</td>\n <td>100 (98.5-100)</td>\n <td>100 (98.5-100)</td>\n </tr>\n </table>\n \nA recent Medscape article<a href=\"#fnote7\" rel=\"noopener\" target=\"_self\">7</a></sup> on the specificity and sensitivity of Influenza tests reports: \n\n> In a study of the nucleic acid amplification tests ID Now (Abbott), Cobas Influenza A/B Assay (Roche Molecular Diagnostics), and Xpert Xpress Flu (Cepheid), Kanwar et al found the three products to have comparable sensitivities for influenza A (93.2%, 100%, 100%, respectively) and B (97.2%, 94.4%, 91.7%, respectively) detection. In addition, each product had greater than 97% specificity for influenza A and B detection. \n\n> Rapid antigen tests generally have a sensitivity of 50-70% and a specificity of 90-95%. Limited studies have demonstrated very low sensitivity for detection of 2009 H1N1 with some commercial brands. \n\n\n\n\nBased on these values, we'll explore the effects of sensitivity and specificities in the range of .9 to 1. \n", "_____no_output_____" ], [ "# PPV For Serology Test\n\nFirst we'll look at the positive prediction value for the antibody test in reference (<a href=\"#fnote2\" rel=\"noopener\" target=\"_self\">2</a>), which has the lowest published Sp and Sn values at .9063 and .8866. The plot below shows the portion of positive test results that are true positives s a function of the prevalence. \n", "_____no_output_____" ] ], [ [ "def p_vs_tpr(Sp, Sn):\n\n for p in np.power(10,np.linspace(-7,np.log10(.5), num=100)): # range from 1 per 10m to 50%\n ppv = (p*Sn) / ( (p*Sn)+(1-p)*(1-Sp))\n yield (p, ppv)\n\ndef plot_ppv(Sp, Sn):\n\n df = pd.DataFrame(list(p_vs_tpr(Sp, Sn)), columns='p ppv'.split())\n df.head()\n\n fig, ax = plt.subplots(figsize=(12,8))\n\n df.plot(ax=ax, x='p',y='ppv', figsize=(10,10))\n\n fig.suptitle(f'Portion of Positives that Are True Vs Prevalence\\nFor test with Sp={Sp} and Sn={Sn}', fontsize=20)\n\n ax.set_xlabel('Condition Prevalence in Portion of Tested Population', fontsize=18)\n ax.set_ylabel('Portion of Positive Test Results that are True Positives', fontsize=18);\n\n\n #ax.set_xscale('log')\n #ax.set_yscale('log')\n \n \nplot_ppv(Sp = .9063, Sn = .8866)\n", "_____no_output_____" ] ], [ [ "The important implication of this curve is that using a test with low Sp and Sn values in conditions of low prevalence will result in a very large portion of false positives.", "_____no_output_____" ], [ "# False Positives for LabCorp's test\n\nAlthough the published results for the LabCorp test are 100% true positives and true negative rates, the 95% error margin is substantial, because the test was validatd with a relatively small number of samples. This analysis will use the published error margins to produce a distribution of positive prediction values. First, let's look at the distributions of the true positive and true negative rates, accounting for the published confidence intervals. These distributions are generated by converting the published true and false rates, and their CIs into gaussian distributions, and selecting only values that are 1 or lower from those distributions.\n", "_____no_output_____" ] ], [ [ "# Convert CI to standard error. The values are reported for a one-sided 95% CI, \n# so we're multiplying by the conversion for a two-sided 90% ci\np_se = (1-.9124) * 1.645 \nn_se = (1-.9287) * 1.645 \n\n\ndef select_v(se):\n \"\"\"get a distribution value, which must be less than or equal to 1\"\"\"\n while True:\n v = np.random.normal(1, se)\n if v <= 1:\n return v\n \n \n# These values are not TP and FP counts; they are normalized to \n# prevalence\nTP = np.array(list(select_v(p_se) for _ in range(100_000)))\nTN = np.array(list(select_v(n_se) for _ in range(100_000)))\n\nfig, ax = plt.subplots(1,2, figsize=(12,8))\nsns.distplot( TP, ax=ax[0], kde=False);\n\nax[0].set_title('Distribution of Posibile True Positives Rates');\n\nsns.distplot( TN, ax=ax[1], kde=False);\n\nax[1].set_title('Distribution of Posibile True Negative Rates');\n\nfig.suptitle(f'Distribution of True Positive and Negative Rates'\n '\\nFor published confidence intervals and 4K random samples', fontsize=20);\n", "_____no_output_____" ] ], [ [ "It is important to note that these are not the distributions \n\nFrom these distributions, we can calculate the distributions for the positive prediction value, the portion of all positive results that are true positives. ", "_____no_output_____" ], [ "With these distributions, we can use ([Eq 2](#MathJax-Span-5239)) to compute the distributions of PPV for a variety of prevalences. In each chart, the 'mean' is the expectation value of the distribution, the weighted mean of the values. It is the most likely PPV valule for the given prevalence. ", "_____no_output_____" ] ], [ [ "FP = 1-TN\nFN = 1-TP\n\nSn = TP / (TP+FN)\nSp = TN / (TN+FP)\n\ndef ppv_dist_ufunc(p, Sp, Sn):\n return (p*Sn) / ( (p*Sn)+(1-p)*(1-Sp))\n\ndef ppv_dist(p, Sp, Sn):\n sp = np.random.choice(Sp, 1_000_000, replace=True)\n sn = np.random.choice(Sn, 1_000_000, replace=True)\n \n return ppv_dist_ufunc(p,sp, sn)\n \nfig, axes = plt.subplots( 2,2, figsize=(15,15))\naxes = axes.flat\n\ndef plot_axis(axn, prevalence):\n ppvd = ppv_dist(prevalence, Sp, Sn)\n wmean = (ppvd.sum()/len(ppvd)).round(4)\n sns.distplot( ppvd, ax=axes[axn], kde=False);\n axes[axn].set_title(f' prevalence = {prevalence}, mean={wmean}');\n axes[axn].set_xlabel('Positive Prediction Value (PPV)')\n axes[axn].set_ylabel('PPV Frequency')\n\nplot_axis(0, .001)\nplot_axis(1, .01)\nplot_axis(2, .10)\nplot_axis(3, .5)\n \nfig.suptitle(f'Distribution of PPV Values for LabCorp Test\\nBy condition prevalence', fontsize=20);\n", "_____no_output_____" ] ], [ [ "The implication of these charts is that, even for a test with published true positive and true negative rate of 100%, the uncertainties in the measurements can mean that there still a substantial problem of false positives for low prevalences. ", "_____no_output_____" ], [ "Computing the mean PPV value or a range of prevalence values results in the following relationship.", "_____no_output_____" ] ], [ [ "def ppv_vs_p():\n for p in np.power(10,np.linspace(-7,np.log10(1), num=100)): # range from 1 per 10m to 50%\n ppvd = ppv_dist(p, Sp, Sn)\n yield p, ppvd.sum()/len(ppvd)\n \nppv_v_p = pd.DataFrame(list(ppv_vs_p()), columns='p ppv'.split())\n\nfig, ax = plt.subplots(figsize=(8,8))\n\nsns.lineplot(x='p', y='ppv', data=ppv_v_p, ax=ax)\nax.set_xlabel('Prevalence')\nax.set_ylabel('Positive Predictive Value')\n\nfig.suptitle(\"Positive Predictive Value vs Prevalence\\nFor LabCorp Test\", fontsize=18);", "_____no_output_____" ] ], [ [ "Compare this curve to the one presented earlier, for the antibody test with published sensitivity of 88.66% and specificity of 90.63%; The relationship between P and PPV for the rt-PCR test isn't much better. \n\nBut what if the tests are really, really good: .99 for both sensitivity and specificity? Here is the curve for that case:\n", "_____no_output_____" ] ], [ [ "def ppv_vs_p():\n for p in np.power(10,np.linspace(-7,np.log10(1), num=100)): # range from 1 per 10m to 50%\n ppvd = ppv_dist_ufunc(p, .99, .99)\n yield p, ppvd\n \nppv_v_p = pd.DataFrame(list(ppv_vs_p()), columns='p ppv'.split())\n\nfig, ax = plt.subplots(figsize=(8,8))\n\nsns.lineplot(x='p', y='ppv', data=ppv_v_p, ax=ax)\nax.set_xlabel('Prevalence')\nax.set_ylabel('Positive Predictive Value')\n\nfig.suptitle(\"Positive Predictive Value vs Prevalence\\nFor Sp=.99, Sn=.99\", fontsize=18);", "_____no_output_____" ] ], [ [ "This table shows the PPVs and false positive rate for a logrhythimic range of prevalences. ", "_____no_output_____" ] ], [ [ "prevs = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\nnames = [\"1 per {}\".format(round(1/p,0)) for p in prevs]\nppvs = [ppv_v_p.loc[(ppv_v_p.p-p).abs().idxmin()].ppv for p in prevs]\nfp = [ str(round((1-ppv)*100,1))+\"%\" for ppv in ppvs]\ndf = pd.DataFrame({\n 'Rate': names,\n 'Prevalence': prevs,\n 'PPV': ppvs,\n 'False Positives Rate': fp\n}).set_index('Prevalence')\n\ndf\n", "_____no_output_____" ] ], [ [ "This case is much better, across the range of prevalences, but for low prevalence, there are still a lot of false positives, and below 1 per 1000, it is nearly all false positives. Here is the same chart, but for Sp and Sn at 99.99%", "_____no_output_____" ] ], [ [ "def ppv_vs_p():\n for p in np.power(10,np.linspace(-7,np.log10(1), num=100)): # range from 1 per 10m to 50%\n ppvd = ppv_dist_ufunc(p, .9999, .9999)\n yield p, ppvd\n \nppv_v_p = pd.DataFrame(list(ppv_vs_p()), columns='p ppv'.split())\n\nppvs = [ppv_v_p.loc[(ppv_v_p.p-p).abs().idxmin()].ppv for p in prevs]\nfp = [ str(round((1-ppv)*100,1))+\"%\" for ppv in ppvs]\ndf = pd.DataFrame({\n 'Rate': names,\n 'Prevalence': prevs,\n 'PPV': ppvs,\n 'False Positives Rate': fp\n}).set_index('Prevalence')\n\ndf\n\n", "_____no_output_____" ] ], [ [ "Even a very accurate test will not be able to distinguish healthy from sick better than a coin flip if the prevalence is less than 1 per 10,000. ", "_____no_output_____" ], [ "# Conclusion\n\nTests with less than 100% specificity and selectivity, including those with published values of 100% but with a moderate confidence interval, are very sensitive to low condition prevalences. Considering the confidence intervals, to ensure that 50% of positive results are true positives requires a prevalence of about 10%, and 80% PPV requires about a 30% prevalence. This suggests that using rt-PCR tests to test a large population that has a low prevalence is likely to produce a large number of false positive results. ", "_____no_output_____" ], [ "# References \n\n* <a name=\"fnote1\">1</a> Parikh, Rajul et al. “[Understanding and using sensitivity, specificity and predictive values.](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2636062/)” Indian journal of ophthalmology vol. 56,1 (2008): 45-50. doi:10.4103/0301-4738.37595\n* <a name=\"fnote2\">2</a> Li, Zhengtu et al. “[Development and Clinical Application of A Rapid IgM-IgG Combined Antibody Test for SARS-CoV-2 Infection Diagnosis.](https://pubmed.ncbi.nlm.nih.gov/32104917/)” Journal of medical virology, 10.1002/jmv.25727. 27 Feb. 2020, doi:10.1002/jmv.25727\n* <a name=\"fnote3\">3</a> Zhuang, G H et al. “[Potential False-Positive Rate Among the 'Asymptomatic Infected Individuals' in Close Contacts of COVID-19 Patients](https://pubmed.ncbi.nlm.nih.gov/32133832)” Zhonghua liuxingbingxue zazhi, vol. 41,4 485-488. 5 Mar. 2020, doi:10.3760/cma.j.cn112338-20200221-00144\n* <a name=\"fnote4\">4</a> Al Johani, Sameera, and Ali H Hajeer. “[MERS-CoV diagnosis: An update.](https://www.sciencedirect.com/science/article/pii/S1876034116300223)” Journal of infection and public health vol. 9,3 (2016): 216-9. doi:10.1016/j.jiph.2016.04.005\n* <a name=\"fnote5\">5</a> Huh, Hee Jae et al. “[Performance Evaluation of the PowerChek MERS (upE & ORF1a) Real-Time PCR Kit for the Detection of Middle East Respiratory Syndrome Coronavirus RNA.](http://www.annlabmed.org/journal/view.html?volume=37&number=6&spage=494)” Annals of laboratory medicine vol. 37,6 (2017): 494-498. doi:10.3343/alm.2017.37.6.494\n* <a name=\"fnote7\">7</a> [Emergency Use Authorization summary](https://www.fda.gov/media/136151/download) for LabCorp's COVID-19 rt-PCR test. \n* Mitamura, Keiko et al. “[Clinical evaluation of ID NOW influenza A & B 2, a rapid influenza virus detection kit using isothermal nucleic acid amplification technology - A comparison with currently available tests.](https://pubmed.ncbi.nlm.nih.gov/31558351/?from_single_result=31558351)” Journal of infection and chemotherapy : official journal of the Japan Society of Chemotherapy vol. 26,2 (2020): 216-221. doi:10.1016/j.jiac.2019.08.015\n* <a name=\"fnote7\">8</a> Blanco, E. M. (2020, January 22). [What is the sensitivity and specificity of diagnostic influenza tests?](https://www.medscape.com/answers/2053517-197226/what-is-the-sensitivity-and-specificity-of-diagnostic-influenza-tests) Retrieved March 27, 2020, from https://www.medscape.com/answers/2053517-197226/what-is-the-sensitivity-and-specificity-of-diagnostic-influenza-tests\n\n\n## Supporting Web Articles\n\nThe World Health Organization has a [web page with links to information the COVID-19 tests](https://www.who.int/emergencies/diseases/novel-coronavirus-2019/technical-guidance/laboratory-guidance) from many countries. \n\nThe CDC's page for [Rapid Diagnostic Testing for Influenza: Information for Clinical Laboratory Directors](https://www.cdc.gov/flu/professionals/diagnosis/rapidlab.htm) describes the minimum specificity and sensitivity for rapid influenza diagnostic tests, and shows some examples of PPV and flase positive rates. \n\nWashington Post: [A ‘negative’ coronavirus test result doesn’t always mean you aren’t infected](https://www.washingtonpost.com/science/2020/03/26/negative-coronavirus-test-result-doesnt-always-mean-you-arent-infected/)\n\nPrague Morning: [80% of Rapid COVID-19 Tests the Czech Republic Bought From China are Wrong](https://www.praguemorning.cz/80-of-rapid-covid-19-tests-the-czech-republic-bought-from-china-are-wrong/)\n\nBusinessInsider: [Spain, Europe's worst-hit country after Italy, says coronavirus tests it bought from China are failing to detect positive cases](https://www.businessinsider.com/coronavirus-spain-says-rapid-tests-sent-from-china-missing-cases-2020-3?op=1)\n\nWikipedia has a good discussion of the false positives problem in the articl about the [Base Rate Falacy](https://en.wikipedia.org/wiki/Base_rate_fallacy#False_positive_paradox). \n\n\n## Other References\n\nThe following references were referenced by Blanco <a href=\"#fnote6\" rel=\"noopener\" target=\"_self\">6</a></sup>, but I haven't evaluated them yet. \n\nKanwar N, Michael J, Doran K, Montgomery E, Selvarangan R. Comparison of the ID NOWTM Influenza A & B 2, Cobas® Influenza A/B, and Xpert® Xpress Flu Point-of-Care Nucleic Acid Amplification Tests for Influenza A/B Detection in Children. J Clin Microbiol. 2020 Jan 15. \n\nBlyth CC, Iredell JR, Dwyer DE. Rapid-test sensitivity for novel swine-origin influenza A (H1N1) virus in humans. N Engl J Med. 2009 Dec 17. 361(25):2493. \n\nEvaluation of rapid influenza diagnostic tests for detection of novel influenza A (H1N1) Virus - United States, 2009. MMWR Morb Mortal Wkly Rep. 2009 Aug 7. 58(30):826-9. \n\nFaix DJ, Sherman SS, Waterman SH. Rapid-test sensitivity for novel swine-origin influenza A (H1N1) virus in humans. N Engl J Med. 2009 Aug 13. 361(7):728-9. \n\nGinocchio CC, Zhang F, Manji R, Arora S, Bornfreund M, Falk L. Evaluation of multiple test methods for the detection of the novel 2009 influenza A (H1N1) during the New York City outbreak. J Clin Virol. 2009 Jul. 45(3):191-5. \n\nSambol AR, Abdalhamid B, Lyden ER, Aden TA, Noel RK, Hinrichs SH. Use of rapid influenza diagnostic tests under field conditions as a screening tool during an outbreak of the 2009 novel influenza virus: practical considerations. J Clin Virol. 2010 Mar. 47(3):229-33. \n", "_____no_output_____" ], [ "# Updates\n\n* 2020-03-25: Changed conversion from CI to SE from 1.96 to 1.645; using the factor for a two sided 90% ci for the 95% one sided CI.\n* 2020-03-27: Added parameters for Sp and Sn for the influenza version of Abbott Labs ID NOW device. ", "_____no_output_____" ] ] ]
[ "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "raw" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
d0bcd94e86d5c4a5e6ce60e87abd451ae7335959
12,131
ipynb
Jupyter Notebook
04_deploy_model/04_deploy_model.ipynb
Niobiumkey/amazon-sagemaker-build-train-deploy
3fb62e64c43944b5dfb47a006e0d5167222a1735
[ "Apache-2.0" ]
1
2022-03-11T11:57:57.000Z
2022-03-11T11:57:57.000Z
04_deploy_model/04_deploy_model.ipynb
Niobiumkey/amazon-sagemaker-build-train-deploy
3fb62e64c43944b5dfb47a006e0d5167222a1735
[ "Apache-2.0" ]
null
null
null
04_deploy_model/04_deploy_model.ipynb
Niobiumkey/amazon-sagemaker-build-train-deploy
3fb62e64c43944b5dfb47a006e0d5167222a1735
[ "Apache-2.0" ]
null
null
null
32.875339
479
0.598302
[ [ [ "<h1>Model Deployment</h1>", "_____no_output_____" ], [ "Once we have built and trained our models for feature engineering (using Amazon SageMaker Processing and SKLearn) and binary classification (using the XGBoost open-source container for Amazon SageMaker), we can choose to deploy them in a pipeline on Amazon SageMaker Hosting, by creating an Inference Pipeline.\nhttps://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipelines.html\n\nThis notebook demonstrates how to create a pipeline with the SKLearn model for feature engineering and the XGBoost model for binary classification.\n\nLet's define the variables first.", "_____no_output_____" ] ], [ [ "import sagemaker\nimport sys\nimport IPython\n\n# Let's make sure we have the required version of the SM PySDK.\nrequired_version = '2.49.2'\n\ndef versiontuple(v):\n return tuple(map(int, (v.split(\".\"))))\n\nif versiontuple(sagemaker.__version__) < versiontuple(required_version):\n !{sys.executable} -m pip install -U sagemaker=={required_version}\n IPython.Application.instance().kernel.do_shutdown(True)", "_____no_output_____" ], [ "import sagemaker\nprint(sagemaker.__version__)", "_____no_output_____" ], [ "import boto3\n\nrole = sagemaker.get_execution_role()\nregion = boto3.Session().region_name\nsagemaker_session = sagemaker.Session()\nbucket_name = sagemaker_session.default_bucket()\nprefix = 'endtoendmlsm'\n\nprint(region)\nprint(role)\nprint(bucket_name)", "_____no_output_____" ] ], [ [ "## Retrieve model artifacts", "_____no_output_____" ], [ "First, we need to create two Amazon SageMaker **Model** objects, which associate the artifacts of training (serialized model artifacts in Amazon S3) to the Docker container used for inference. In order to do that, we need to get the paths to our serialized models in Amazon S3.\n<ul>\n <li>For the SKLearn model, in Step 02 (data exploration and feature engineering) we defined the path where the artifacts are saved</li>\n <li>For the XGBoost model, we need to find the path based on Amazon SageMaker's naming convention. We are going to use a utility function to get the model artifacts of the last training job matching a specific base job name.</li>\n</ul>", "_____no_output_____" ] ], [ [ "from notebook_utilities import get_latest_training_job_name, get_training_job_s3_model_artifacts\n\n# SKLearn model artifacts path.\nsklearn_model_path = 's3://{0}/{1}/output/sklearn/model.tar.gz'.format(bucket_name, prefix)\n\n# XGBoost model artifacts path.\ntraining_base_job_name = 'end-to-end-ml-sm-xgb'\nlatest_training_job_name = get_latest_training_job_name(training_base_job_name)\nxgboost_model_path = get_training_job_s3_model_artifacts(latest_training_job_name)\n\nprint('SKLearn model path: ' + sklearn_model_path)\nprint('XGBoost model path: ' + xgboost_model_path)", "_____no_output_____" ] ], [ [ "## SKLearn Featurizer Model", "_____no_output_____" ], [ "Let's build the SKLearn model. For hosting this model we also provide a custom inference script, that is used to process the inputs and outputs and execute the transform.\n\nThe inference script is implemented in the `sklearn_source_dir/inference.py` file. The custom script defines:\n\n- a custom `input_fn` for pre-processing inference requests. Our input function accepts only CSV input, loads the input in a Pandas dataframe and assigns feature column names to the dataframe\n- a custom `predict_fn` for running the transform over the inputs\n- a custom `output_fn` for returning either JSON or CSV\n- a custom `model_fn` for deserializing the model", "_____no_output_____" ] ], [ [ "!pygmentize sklearn_source_dir/inference.py", "_____no_output_____" ] ], [ [ "Now, let's create the `SKLearnModel` object, by providing the custom script and S3 model artifacts as input.", "_____no_output_____" ] ], [ [ "import time\nfrom sagemaker.sklearn import SKLearnModel\n\ncode_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)\n\nsklearn_model = SKLearnModel(name='end-to-end-ml-sm-skl-model-{0}'.format(str(int(time.time()))),\n model_data=sklearn_model_path,\n entry_point='inference.py',\n source_dir='sklearn_source_dir/',\n code_location=code_location,\n role=role,\n sagemaker_session=sagemaker_session,\n framework_version='0.20.0',\n py_version='py3')", "_____no_output_____" ] ], [ [ "## XGBoost Model", "_____no_output_____" ], [ "Similarly to the previous steps, we can create an `XGBoost` model object. Also here, we have to provide a custom inference script.\n\nThe inference script is implemented in the `xgboost_source_dir/inference.py` file. The custom script defines:\n\n- a custom `input_fn` for pre-processing inference requests. This input function is able to handle JSON requests, plus all content types supported by the default XGBoost container. For additional information please visit: https://github.com/aws/sagemaker-xgboost-container/blob/master/src/sagemaker_xgboost_container/encoder.py. The reason for adding the JSON content type is that the container-to-container default request content type in an inference pipeline is JSON.\n- a custom `model_fn` for deserializing the model", "_____no_output_____" ] ], [ [ "!pygmentize xgboost_source_dir/inference.py", "_____no_output_____" ] ], [ [ "Now, let's create the `XGBoostModel` object, by providing the custom script and S3 model artifacts as input.", "_____no_output_____" ] ], [ [ "import time\nfrom sagemaker.xgboost import XGBoostModel\n\ncode_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)\n\nxgboost_model = XGBoostModel(name='end-to-end-ml-sm-xgb-model-{0}'.format(str(int(time.time()))),\n model_data=xgboost_model_path,\n entry_point='inference.py',\n source_dir='xgboost_source_dir/',\n code_location=code_location,\n framework_version='0.90-2',\n py_version='py3',\n role=role, \n sagemaker_session=sagemaker_session)", "_____no_output_____" ] ], [ [ "## Pipeline Model", "_____no_output_____" ], [ "Once we have models ready, we can deploy them in a pipeline, by building a `PipelineModel` object and calling the `deploy()` method.", "_____no_output_____" ] ], [ [ "import sagemaker\nimport time\nfrom sagemaker.pipeline import PipelineModel\n\npipeline_model_name = 'end-to-end-ml-sm-xgb-skl-pipeline-{0}'.format(str(int(time.time())))\n\npipeline_model = PipelineModel(\n name=pipeline_model_name, \n role=role,\n models=[\n sklearn_model, \n xgboost_model],\n sagemaker_session=sagemaker_session)\n\nendpoint_name = 'end-to-end-ml-sm-pipeline-endpoint-{0}'.format(str(int(time.time())))\nprint(endpoint_name)\n\npipeline_model.deploy(initial_instance_count=1, \n instance_type='ml.m5.xlarge', \n endpoint_name=endpoint_name)", "_____no_output_____" ] ], [ [ "<span style=\"color: red; font-weight:bold\">Please take note of the endpoint name, since it will be used in the next workshop module.</span>", "_____no_output_____" ], [ "## Getting inferences", "_____no_output_____" ], [ "Finally we can try invoking our pipeline of models and get some inferences:", "_____no_output_____" ] ], [ [ "from sagemaker.serializers import CSVSerializer\nfrom sagemaker.deserializers import JSONDeserializer\nfrom sagemaker.predictor import Predictor\n\npredictor = Predictor(\n endpoint_name=endpoint_name,\n sagemaker_session=sagemaker_session,\n serializer=CSVSerializer(),\n deserializer=JSONDeserializer())\n\n#'Type', 'Air temperature [K]', 'Process temperature [K]', 'Rotational speed [rpm]', 'Torque [Nm]', 'Tool wear [min]'\npayload = \"L,298.4,308.2,1582,70.7,216\"\nprint(predictor.predict(payload))\n\npayload = \"M,298.4,308.2,1582,30.2,214\"\nprint(predictor.predict(payload))\n\npayload = \"L,298.4,308.2,30,70.7,216\"\nprint(predictor.predict(payload))", "_____no_output_____" ], [ "#predictor.delete_endpoint()", "_____no_output_____" ] ], [ [ "Once we have tested the endpoint, we can move to the next workshop module. Please access the module <a href=\"https://github.com/aws-samples/amazon-sagemaker-build-train-deploy/tree/master/05_API_Gateway_and_Lambda\" target=\"_blank\">05_API_Gateway_and_Lambda</a> on GitHub to continue.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0bcdc2037f1aa435b077f1cfe1e16a84dcf4334
47,948
ipynb
Jupyter Notebook
Starter_Code.ipynb
tritchlin/deep_learning
6b2094f00b01f07eb908f83a3b6bdb4239b87ace
[ "ADSL" ]
null
null
null
Starter_Code.ipynb
tritchlin/deep_learning
6b2094f00b01f07eb908f83a3b6bdb4239b87ace
[ "ADSL" ]
null
null
null
Starter_Code.ipynb
tritchlin/deep_learning
6b2094f00b01f07eb908f83a3b6bdb4239b87ace
[ "ADSL" ]
null
null
null
37.635793
137
0.389276
[ [ [ "## Preprocessing", "_____no_output_____" ] ], [ [ "# Import our dependencies\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nimport tensorflow as tf\nfrom keras.callbacks import ModelCheckpoint\n\n\n# Import and read the charity_data.csv.\nimport pandas as pd \napplication_df = pd.read_csv(\"charity_data.csv\")\napplication_df.head()", "_____no_output_____" ], [ "# Drop the non-beneficial ID columns, 'EIN' and 'NAME'.\napplication_df.drop([\"EIN\",\"NAME\"],axis=1, inplace=True)\napplication_df.head()", "_____no_output_____" ], [ "# Determine the number of unique values in each column.\napplication_df.nunique()", "_____no_output_____" ], [ "# Look at APPLICATION_TYPE value counts for binning\napplication_df[\"APPLICATION_TYPE\"].value_counts()", "_____no_output_____" ], [ "# Choose a cutoff value and create a list of application types to be replaced\n# use the variable name `application_types_to_replace`\n\napp_vc = application_df.APPLICATION_TYPE.value_counts()\napplication_types_to_replace = app_vc[app_vc < 500].index\n\n# # Replace in dataframe\nfor app in application_types_to_replace:\n application_df['APPLICATION_TYPE'] = application_df['APPLICATION_TYPE'].replace(app,\"Other\")\n\n# # Check to make sure binning was successful\napplication_df['APPLICATION_TYPE'].value_counts()", "_____no_output_____" ], [ "# Look at CLASSIFICATION value counts for binning\napplication_df.CLASSIFICATION.value_counts()", "_____no_output_____" ], [ "# Choose a cutoff value and create a list of classifications to be replaced\n# use the variable name `classifications_to_replace`\nclass_vc = application_df.CLASSIFICATION.value_counts()\nclassifications_to_replace = class_vc[class_vc < 1800].index\n\n# Replace in dataframe\nfor cls in classifications_to_replace:\n application_df['CLASSIFICATION'] = application_df['CLASSIFICATION'].replace(cls,\"Other\")\n \n# Check to make sure binning was successful\napplication_df['CLASSIFICATION'].value_counts()", "_____no_output_____" ], [ "application_df.head()", "_____no_output_____" ], [ "# Convert categorical data to numeric with `pd.get_dummies`\napplication_dummies = pd.get_dummies(application_df)\napplication_dummies.head()", "_____no_output_____" ], [ "# Split our preprocessed data into our features and target arrays\ny = application_dummies[\"IS_SUCCESSFUL\"].values\nX = application_dummies.drop([\"IS_SUCCESSFUL\"],1).values\n\n# Split the preprocessed data into a training and testing dataset\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=78) ", "_____no_output_____" ], [ "# Create a StandardScaler instances\nscaler = StandardScaler()\n\n# Fit the StandardScaler\nX_scaler = scaler.fit(X_train)\n\n# Scale the data\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)", "_____no_output_____" ] ], [ [ "## Compile, Train and Evaluate the Model", "_____no_output_____" ] ], [ [ "# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.\nnumber_input_features = len(X_train[0])\nhidden_nodes_layer1 = 80\nhidden_nodes_layer2 = 30\n\nnn = tf.keras.models.Sequential()\n\n# First hidden layer\nnn.add(\n tf.keras.layers.Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation=\"relu\")\n)\n\n# Second hidden layer\nnn.add(tf.keras.layers.Dense(units=hidden_nodes_layer2, activation=\"relu\"))\n\n# Output layer\nnn.add(tf.keras.layers.Dense(units=1, activation=\"sigmoid\"))\n\n# Check the structure of the model\nnn.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 80) 3520 \n_________________________________________________________________\ndense_1 (Dense) (None, 30) 2430 \n_________________________________________________________________\ndense_2 (Dense) (None, 1) 31 \n=================================================================\nTotal params: 5,981\nTrainable params: 5,981\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# Compile the model\nnn.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"]) ", "_____no_output_____" ], [ "# Train the model\ncheckpoint = ModelCheckpoint(\"AlphabetSoupCharity.hdf5\", monitor='loss', verbose=1, mode='auto', period=5)\n\n\nfit_model = nn.fit(X_train_scaled,y_train,epochs=100,callbacks=[checkpoint])", "WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\nEpoch 1/100\n804/804 [==============================] - 1s 460us/step - loss: 0.5727 - accuracy: 0.7204\nEpoch 2/100\n804/804 [==============================] - 0s 467us/step - loss: 0.5565 - accuracy: 0.7323\nEpoch 3/100\n804/804 [==============================] - 0s 441us/step - loss: 0.5535 - accuracy: 0.7305\nEpoch 4/100\n804/804 [==============================] - 0s 459us/step - loss: 0.5512 - accuracy: 0.7327\nEpoch 5/100\n804/804 [==============================] - 0s 446us/step - loss: 0.5509 - accuracy: 0.7329\n\nEpoch 00005: saving model to AlphabetSoupCharity.hdf5\nEpoch 6/100\n804/804 [==============================] - 0s 440us/step - loss: 0.5490 - accuracy: 0.7330\nEpoch 7/100\n804/804 [==============================] - 0s 455us/step - loss: 0.5481 - accuracy: 0.7340\nEpoch 8/100\n804/804 [==============================] - 0s 456us/step - loss: 0.5474 - accuracy: 0.7350\nEpoch 9/100\n804/804 [==============================] - 0s 453us/step - loss: 0.5466 - accuracy: 0.7340\nEpoch 10/100\n804/804 [==============================] - 0s 454us/step - loss: 0.5463 - accuracy: 0.7348\n\nEpoch 00010: saving model to AlphabetSoupCharity.hdf5\nEpoch 11/100\n804/804 [==============================] - 0s 471us/step - loss: 0.5460 - accuracy: 0.7337\nEpoch 12/100\n804/804 [==============================] - 0s 463us/step - loss: 0.5457 - accuracy: 0.7339\nEpoch 13/100\n804/804 [==============================] - 0s 440us/step - loss: 0.5451 - accuracy: 0.7359\nEpoch 14/100\n804/804 [==============================] - 0s 435us/step - loss: 0.5446 - accuracy: 0.7364\nEpoch 15/100\n804/804 [==============================] - 0s 453us/step - loss: 0.5448 - accuracy: 0.7357\n\nEpoch 00015: saving model to AlphabetSoupCharity.hdf5\nEpoch 16/100\n804/804 [==============================] - 0s 435us/step - loss: 0.5447 - accuracy: 0.7369\nEpoch 17/100\n804/804 [==============================] - 0s 432us/step - loss: 0.5441 - accuracy: 0.7367\nEpoch 18/100\n804/804 [==============================] - 0s 464us/step - loss: 0.5437 - accuracy: 0.7369\nEpoch 19/100\n804/804 [==============================] - 0s 442us/step - loss: 0.5432 - accuracy: 0.7371\nEpoch 20/100\n804/804 [==============================] - 0s 447us/step - loss: 0.5432 - accuracy: 0.7369\n\nEpoch 00020: saving model to AlphabetSoupCharity.hdf5\nEpoch 21/100\n804/804 [==============================] - 0s 454us/step - loss: 0.5430 - accuracy: 0.7374\nEpoch 22/100\n804/804 [==============================] - 0s 430us/step - loss: 0.5424 - accuracy: 0.7379\nEpoch 23/100\n804/804 [==============================] - 0s 435us/step - loss: 0.5426 - accuracy: 0.7378\nEpoch 24/100\n804/804 [==============================] - 0s 440us/step - loss: 0.5429 - accuracy: 0.7371\nEpoch 25/100\n804/804 [==============================] - 0s 458us/step - loss: 0.5421 - accuracy: 0.7373\n\nEpoch 00025: saving model to AlphabetSoupCharity.hdf5\nEpoch 26/100\n804/804 [==============================] - 0s 454us/step - loss: 0.5423 - accuracy: 0.7381\nEpoch 27/100\n804/804 [==============================] - 0s 450us/step - loss: 0.5419 - accuracy: 0.7383\nEpoch 28/100\n804/804 [==============================] - 0s 448us/step - loss: 0.5414 - accuracy: 0.7373\nEpoch 29/100\n804/804 [==============================] - 0s 448us/step - loss: 0.5418 - accuracy: 0.7381\nEpoch 30/100\n804/804 [==============================] - 0s 435us/step - loss: 0.5413 - accuracy: 0.7374\n\nEpoch 00030: saving model to AlphabetSoupCharity.hdf5\nEpoch 31/100\n804/804 [==============================] - 0s 437us/step - loss: 0.5410 - accuracy: 0.7391\nEpoch 32/100\n804/804 [==============================] - 0s 454us/step - loss: 0.5411 - accuracy: 0.7384\nEpoch 33/100\n804/804 [==============================] - 0s 436us/step - loss: 0.5408 - accuracy: 0.7378\nEpoch 34/100\n804/804 [==============================] - 0s 431us/step - loss: 0.5407 - accuracy: 0.7377\nEpoch 35/100\n804/804 [==============================] - 0s 454us/step - loss: 0.5406 - accuracy: 0.7383\n\nEpoch 00035: saving model to AlphabetSoupCharity.hdf5\nEpoch 36/100\n804/804 [==============================] - 0s 431us/step - loss: 0.5410 - accuracy: 0.7383\nEpoch 37/100\n804/804 [==============================] - 0s 452us/step - loss: 0.5402 - accuracy: 0.7382\nEpoch 38/100\n804/804 [==============================] - 0s 440us/step - loss: 0.5402 - accuracy: 0.7375\nEpoch 39/100\n804/804 [==============================] - 0s 454us/step - loss: 0.5402 - accuracy: 0.7388\nEpoch 40/100\n804/804 [==============================] - 0s 481us/step - loss: 0.5400 - accuracy: 0.7386\n\nEpoch 00040: saving model to AlphabetSoupCharity.hdf5\nEpoch 41/100\n804/804 [==============================] - 0s 455us/step - loss: 0.5404 - accuracy: 0.7379\nEpoch 42/100\n804/804 [==============================] - 0s 447us/step - loss: 0.5398 - accuracy: 0.7387\nEpoch 43/100\n804/804 [==============================] - 0s 461us/step - loss: 0.5403 - accuracy: 0.7391\nEpoch 44/100\n804/804 [==============================] - 0s 450us/step - loss: 0.5398 - accuracy: 0.7388\nEpoch 45/100\n804/804 [==============================] - 0s 459us/step - loss: 0.5394 - accuracy: 0.7395\n\nEpoch 00045: saving model to AlphabetSoupCharity.hdf5\nEpoch 46/100\n804/804 [==============================] - 0s 447us/step - loss: 0.5390 - accuracy: 0.7405\nEpoch 47/100\n804/804 [==============================] - 0s 445us/step - loss: 0.5396 - accuracy: 0.7395\nEpoch 48/100\n804/804 [==============================] - 0s 452us/step - loss: 0.5391 - accuracy: 0.7390\nEpoch 49/100\n804/804 [==============================] - 0s 431us/step - loss: 0.5391 - accuracy: 0.7397\nEpoch 50/100\n804/804 [==============================] - 0s 431us/step - loss: 0.5386 - accuracy: 0.7393\n\nEpoch 00050: saving model to AlphabetSoupCharity.hdf5\nEpoch 51/100\n804/804 [==============================] - 0s 451us/step - loss: 0.5387 - accuracy: 0.7399\nEpoch 52/100\n804/804 [==============================] - 0s 441us/step - loss: 0.5384 - accuracy: 0.7396\nEpoch 53/100\n804/804 [==============================] - 0s 469us/step - loss: 0.5390 - accuracy: 0.7397\nEpoch 54/100\n804/804 [==============================] - 0s 489us/step - loss: 0.5386 - accuracy: 0.7397\nEpoch 55/100\n804/804 [==============================] - 0s 458us/step - loss: 0.5388 - accuracy: 0.7393\n\nEpoch 00055: saving model to AlphabetSoupCharity.hdf5\nEpoch 56/100\n804/804 [==============================] - 0s 456us/step - loss: 0.5381 - accuracy: 0.7389\nEpoch 57/100\n804/804 [==============================] - 0s 441us/step - loss: 0.5384 - accuracy: 0.7399\nEpoch 58/100\n804/804 [==============================] - 0s 455us/step - loss: 0.5382 - accuracy: 0.7405\nEpoch 59/100\n804/804 [==============================] - 0s 484us/step - loss: 0.5381 - accuracy: 0.7404\nEpoch 60/100\n804/804 [==============================] - 0s 458us/step - loss: 0.5376 - accuracy: 0.7400\n\nEpoch 00060: saving model to AlphabetSoupCharity.hdf5\nEpoch 61/100\n804/804 [==============================] - 0s 449us/step - loss: 0.5380 - accuracy: 0.7395\nEpoch 62/100\n804/804 [==============================] - 0s 464us/step - loss: 0.5381 - accuracy: 0.7402\nEpoch 63/100\n804/804 [==============================] - 0s 438us/step - loss: 0.5377 - accuracy: 0.7398\nEpoch 64/100\n804/804 [==============================] - 0s 471us/step - loss: 0.5377 - accuracy: 0.7401\nEpoch 65/100\n804/804 [==============================] - 0s 445us/step - loss: 0.5377 - accuracy: 0.7403\n\nEpoch 00065: saving model to AlphabetSoupCharity.hdf5\nEpoch 66/100\n804/804 [==============================] - 0s 466us/step - loss: 0.5376 - accuracy: 0.7405\nEpoch 67/100\n804/804 [==============================] - 0s 507us/step - loss: 0.5371 - accuracy: 0.7404\nEpoch 68/100\n804/804 [==============================] - 0s 452us/step - loss: 0.5374 - accuracy: 0.7406\nEpoch 69/100\n804/804 [==============================] - 0s 447us/step - loss: 0.5375 - accuracy: 0.7399\nEpoch 70/100\n804/804 [==============================] - 0s 466us/step - loss: 0.5373 - accuracy: 0.7404\n\nEpoch 00070: saving model to AlphabetSoupCharity.hdf5\nEpoch 71/100\n804/804 [==============================] - 0s 455us/step - loss: 0.5372 - accuracy: 0.7399\nEpoch 72/100\n804/804 [==============================] - 0s 457us/step - loss: 0.5373 - accuracy: 0.7413\nEpoch 73/100\n804/804 [==============================] - 0s 449us/step - loss: 0.5371 - accuracy: 0.7399\nEpoch 74/100\n804/804 [==============================] - 0s 451us/step - loss: 0.5370 - accuracy: 0.7404\nEpoch 75/100\n804/804 [==============================] - 0s 463us/step - loss: 0.5371 - accuracy: 0.7411\n\nEpoch 00075: saving model to AlphabetSoupCharity.hdf5\nEpoch 76/100\n804/804 [==============================] - 0s 466us/step - loss: 0.5366 - accuracy: 0.7414\nEpoch 77/100\n804/804 [==============================] - 0s 449us/step - loss: 0.5367 - accuracy: 0.7402\nEpoch 78/100\n804/804 [==============================] - 0s 468us/step - loss: 0.5368 - accuracy: 0.7406\nEpoch 79/100\n804/804 [==============================] - 0s 461us/step - loss: 0.5367 - accuracy: 0.7405\nEpoch 80/100\n804/804 [==============================] - 0s 466us/step - loss: 0.5363 - accuracy: 0.7402\n\nEpoch 00080: saving model to AlphabetSoupCharity.hdf5\nEpoch 81/100\n804/804 [==============================] - 0s 480us/step - loss: 0.5368 - accuracy: 0.7399\nEpoch 82/100\n804/804 [==============================] - 0s 443us/step - loss: 0.5368 - accuracy: 0.7409\nEpoch 83/100\n804/804 [==============================] - 0s 462us/step - loss: 0.5363 - accuracy: 0.7396\nEpoch 84/100\n804/804 [==============================] - 0s 446us/step - loss: 0.5360 - accuracy: 0.7404\nEpoch 85/100\n804/804 [==============================] - 0s 445us/step - loss: 0.5359 - accuracy: 0.7408\n\nEpoch 00085: saving model to AlphabetSoupCharity.hdf5\nEpoch 86/100\n804/804 [==============================] - 0s 466us/step - loss: 0.5361 - accuracy: 0.7399\nEpoch 87/100\n804/804 [==============================] - 0s 448us/step - loss: 0.5364 - accuracy: 0.7406\nEpoch 88/100\n804/804 [==============================] - 0s 463us/step - loss: 0.5363 - accuracy: 0.7408\nEpoch 89/100\n804/804 [==============================] - 0s 448us/step - loss: 0.5359 - accuracy: 0.7406\nEpoch 90/100\n804/804 [==============================] - 0s 447us/step - loss: 0.5363 - accuracy: 0.7402\n\nEpoch 00090: saving model to AlphabetSoupCharity.hdf5\nEpoch 91/100\n804/804 [==============================] - 0s 469us/step - loss: 0.5361 - accuracy: 0.7405\nEpoch 92/100\n804/804 [==============================] - 0s 446us/step - loss: 0.5358 - accuracy: 0.7412\nEpoch 93/100\n804/804 [==============================] - 0s 452us/step - loss: 0.5358 - accuracy: 0.7402\nEpoch 94/100\n804/804 [==============================] - 0s 461us/step - loss: 0.5358 - accuracy: 0.7412\nEpoch 95/100\n804/804 [==============================] - 0s 480us/step - loss: 0.5357 - accuracy: 0.7410\n\nEpoch 00095: saving model to AlphabetSoupCharity.hdf5\nEpoch 96/100\n804/804 [==============================] - 0s 460us/step - loss: 0.5356 - accuracy: 0.7407\nEpoch 97/100\n804/804 [==============================] - 0s 456us/step - loss: 0.5357 - accuracy: 0.7407\nEpoch 98/100\n804/804 [==============================] - 0s 475us/step - loss: 0.5358 - accuracy: 0.7411\nEpoch 99/100\n804/804 [==============================] - 0s 456us/step - loss: 0.5352 - accuracy: 0.7409\nEpoch 100/100\n804/804 [==============================] - 0s 453us/step - loss: 0.5361 - accuracy: 0.7409\n\nEpoch 00100: saving model to AlphabetSoupCharity.hdf5\n" ], [ "# Evaluate the model using the test data\nmodel_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2)\nprint(f\"Loss: {model_loss}, Accuracy: {model_accuracy}\")", "268/268 - 0s - loss: 0.5578 - accuracy: 0.7256\nLoss: 0.5577850937843323, Accuracy: 0.7255976796150208\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0bce7e61dd8c7d5916aa997de0ecf705b63834d
16,064
ipynb
Jupyter Notebook
sms-spam-detection-with-various-classifiers.ipynb
pruthiviraj71/Spam-and-Fraudulent-Activites-Detection
e7a50e8898b568f2cebf54e14be480684395e34a
[ "MIT" ]
null
null
null
sms-spam-detection-with-various-classifiers.ipynb
pruthiviraj71/Spam-and-Fraudulent-Activites-Detection
e7a50e8898b568f2cebf54e14be480684395e34a
[ "MIT" ]
null
null
null
sms-spam-detection-with-various-classifiers.ipynb
pruthiviraj71/Spam-and-Fraudulent-Activites-Detection
e7a50e8898b568f2cebf54e14be480684395e34a
[ "MIT" ]
null
null
null
16,064
16,064
0.736927
[ [ [ "Goal of this notebook to test several classifiers on the data set with different features ", "_____no_output_____" ], [ "And beforehand i want to thank Jose Portilla for his magnificent \"Python for Data Science and Machine Learning\" course on Udemy , which helped me to dive into ML =)", "_____no_output_____" ], [ "### Let's begin", "_____no_output_____" ], [ "First of all neccesary imports", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport string\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom nltk.stem import SnowballStemmer\nfrom nltk.corpus import stopwords\n%matplotlib inline", "_____no_output_____" ] ], [ [ "Let's read the data from csv file", "_____no_output_____" ] ], [ [ "sms = pd.read_csv('../input/spam.csv', encoding='latin-1')\nsms.head()", "_____no_output_____" ] ], [ [ "Now drop \"unnamed\" columns and rename v1 and v2 to \"label\" and \"message\"", "_____no_output_____" ] ], [ [ "sms = sms.drop(['Unnamed: 2','Unnamed: 3','Unnamed: 4'],axis=1)\nsms = sms.rename(columns = {'v1':'label','v2':'message'})", "_____no_output_____" ] ], [ [ "Let's look into our data", "_____no_output_____" ] ], [ [ "sms.groupby('label').describe()", "_____no_output_____" ] ], [ [ "Intresting that \"Sorry, I'll call later\" appears only 30 times here =)", "_____no_output_____" ], [ "Now let's create new feature \"message length\" and plot it to see if it's of any interest", "_____no_output_____" ] ], [ [ "sms['length'] = sms['message'].apply(len)\nsms.head()", "_____no_output_____" ], [ "mpl.rcParams['patch.force_edgecolor'] = True\nplt.style.use('seaborn-bright')\nsms.hist(column='length', by='label', bins=50,figsize=(11,5))", "_____no_output_____" ] ], [ [ "Looks like the lengthy is the message, more likely it is a spam. Let's not forget this", "_____no_output_____" ], [ "### Text processing and vectorizing our meddages", "_____no_output_____" ], [ "Let's create new data frame. We'll need a copy later on", "_____no_output_____" ] ], [ [ "text_feat = sms['message'].copy()", "_____no_output_____" ] ], [ [ "Now define our tex precessing function. It will remove any punctuation and stopwords aswell.", "_____no_output_____" ] ], [ [ "def text_process(text):\n \n text = text.translate(str.maketrans('', '', string.punctuation))\n text = [word for word in text.split() if word.lower() not in stopwords.words('english')]\n \n return \" \".join(text)", "_____no_output_____" ], [ "text_feat = text_feat.apply(text_process)", "_____no_output_____" ], [ "vectorizer = TfidfVectorizer(\"english\")", "_____no_output_____" ], [ "features = vectorizer.fit_transform(text_feat)", "_____no_output_____" ] ], [ [ "### Classifiers and predictions", "_____no_output_____" ], [ "First of all let's split our features to test and train set", "_____no_output_____" ] ], [ [ "features_train, features_test, labels_train, labels_test = train_test_split(features, sms['label'], test_size=0.3, random_state=111)", "_____no_output_____" ] ], [ [ "Now let's import bunch of classifiers, initialize them and make a dictionary to itereate through", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.metrics import accuracy_score", "_____no_output_____" ], [ "svc = SVC(kernel='sigmoid', gamma=1.0)\nknc = KNeighborsClassifier(n_neighbors=49)\nmnb = MultinomialNB(alpha=0.2)\ndtc = DecisionTreeClassifier(min_samples_split=7, random_state=111)\nlrc = LogisticRegression(solver='liblinear', penalty='l1')\nrfc = RandomForestClassifier(n_estimators=31, random_state=111)\nabc = AdaBoostClassifier(n_estimators=62, random_state=111)\nbc = BaggingClassifier(n_estimators=9, random_state=111)\netc = ExtraTreesClassifier(n_estimators=9, random_state=111)", "_____no_output_____" ] ], [ [ "Parametres are based on notebook:\n[Spam detection Classifiers hyperparameter tuning][1]\n\n\n [1]: https://www.kaggle.com/muzzzdy/d/uciml/sms-spam-collection-dataset/spam-detection-classifiers-hyperparameter-tuning/", "_____no_output_____" ] ], [ [ "clfs = {'SVC' : svc,'KN' : knc, 'NB': mnb, 'DT': dtc, 'LR': lrc, 'RF': rfc, 'AdaBoost': abc, 'BgC': bc, 'ETC': etc}", "_____no_output_____" ] ], [ [ "Let's make functions to fit our classifiers and make predictions", "_____no_output_____" ] ], [ [ "def train_classifier(clf, feature_train, labels_train): \n clf.fit(feature_train, labels_train)", "_____no_output_____" ], [ "def predict_labels(clf, features):\n return (clf.predict(features))", "_____no_output_____" ] ], [ [ "Now iterate through classifiers and save the results", "_____no_output_____" ] ], [ [ "pred_scores = []\nfor k,v in clfs.items():\n train_classifier(v, features_train, labels_train)\n pred = predict_labels(v,features_test)\n pred_scores.append((k, [accuracy_score(labels_test,pred)]))", "_____no_output_____" ], [ "df = pd.DataFrame.from_items(pred_scores,orient='index', columns=['Score'])\ndf", "_____no_output_____" ], [ "df.plot(kind='bar', ylim=(0.9,1.0), figsize=(11,6), align='center', colormap=\"Accent\")\nplt.xticks(np.arange(9), df.index)\nplt.ylabel('Accuracy Score')\nplt.title('Distribution by Classifier')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)", "_____no_output_____" ] ], [ [ "Looks like ensemble classifiers are not doing as good as expected.", "_____no_output_____" ], [ "### Stemmer", "_____no_output_____" ], [ "It is said that stemming short messages does no goot or even harm predictions. Let's try this out.", "_____no_output_____" ], [ "Define our stemmer function", "_____no_output_____" ] ], [ [ "def stemmer (text):\n text = text.split()\n words = \"\"\n for i in text:\n stemmer = SnowballStemmer(\"english\")\n words += (stemmer.stem(i))+\" \"\n return words", "_____no_output_____" ] ], [ [ "Stem, split, fit - repeat... Predict!", "_____no_output_____" ] ], [ [ "text_feat = text_feat.apply(stemmer)", "_____no_output_____" ], [ "features = vectorizer.fit_transform(text_feat)", "_____no_output_____" ], [ "features_train, features_test, labels_train, labels_test = train_test_split(features, sms['label'], test_size=0.3, random_state=111)", "_____no_output_____" ], [ "pred_scores = []\nfor k,v in clfs.items():\n train_classifier(v, features_train, labels_train)\n pred = predict_labels(v,features_test)\n pred_scores.append((k, [accuracy_score(labels_test,pred)]))", "_____no_output_____" ], [ "df2 = pd.DataFrame.from_items(pred_scores,orient='index', columns=['Score2'])\ndf = pd.concat([df,df2],axis=1)\ndf", "_____no_output_____" ], [ "df.plot(kind='bar', ylim=(0.85,1.0), figsize=(11,6), align='center', colormap=\"Accent\")\nplt.xticks(np.arange(9), df.index)\nplt.ylabel('Accuracy Score')\nplt.title('Distribution by Classifier')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)", "_____no_output_____" ] ], [ [ "Looks like mostly the same . Ensemble classifiers doing a little bit better, NB still got the lead.", "_____no_output_____" ], [ "### What have we forgotten? Message length!", "_____no_output_____" ], [ "Let's append our message length feature to the matrix we fit into our classifiers", "_____no_output_____" ] ], [ [ "lf = sms['length'].as_matrix()\nnewfeat = np.hstack((features.todense(),lf[:, None]))", "_____no_output_____" ], [ "features_train, features_test, labels_train, labels_test = train_test_split(newfeat, sms['label'], test_size=0.3, random_state=111)", "_____no_output_____" ], [ "pred_scores = []\nfor k,v in clfs.items():\n train_classifier(v, features_train, labels_train)\n pred = predict_labels(v,features_test)\n pred_scores.append((k, [accuracy_score(labels_test,pred)]))", "_____no_output_____" ], [ "df3 = pd.DataFrame.from_items(pred_scores,orient='index', columns=['Score3'])\ndf = pd.concat([df,df3],axis=1)\ndf", "_____no_output_____" ], [ "df.plot(kind='bar', ylim=(0.85,1.0), figsize=(11,6), align='center', colormap=\"Accent\")\nplt.xticks(np.arange(9), df.index)\nplt.ylabel('Accuracy Score')\nplt.title('Distribution by Classifier')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)", "_____no_output_____" ] ], [ [ "This time everyone are doing a little bit worse, except for LinearRegression and RandomForest. But the winner is still MultinominalNaiveBayes.", "_____no_output_____" ], [ "### Voting classifier", "_____no_output_____" ], [ "We are using ensemble algorithms here, but what about ensemble of ensembles? Will it beat NB?", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import VotingClassifier", "_____no_output_____" ], [ "eclf = VotingClassifier(estimators=[('BgC', bc), ('ETC', etc), ('RF', rfc), ('Ada', abc)], voting='soft')", "_____no_output_____" ], [ "eclf.fit(features_train,labels_train)", "_____no_output_____" ], [ "pred = eclf.predict(features_test)", "_____no_output_____" ], [ "print(accuracy_score(labels_test,pred))", "_____no_output_____" ] ], [ [ "Better but nope.", "_____no_output_____" ], [ "### Final verdict - well tuned NaiveBayes is your friend in spam detection.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]