markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
The values for the second and third columns which are Have you seen any of the 6 films in the Star Wars franchise? and Do you consider yourself to be a fan of the Star Wars film franchise? respectively are Yes, No, NaN. We want to change them to True or False.
star_wars['Have you seen any of the 6 films in the Star Wars franchise?'] = star_wars['Have you seen any of the 6 films in the Star Wars franchise?'].map({'Yes': True, 'No': False}) star_wars['Do you consider yourself to be a fan of the Star Wars film franchise?'] = star_wars['Do you consider yourself to be a fan of the Star Wars film franchise?'].map({'Yes': True, 'No': False})
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Cleaning the columns from index 3 to 9. From the fourth column to ninth columns are checkbox questions: If values are the movie names: they have seen the movies. If values are NaN: they have not seen the movies. We are going to convert the values of these columns to bool type.
for col in star_wars.columns[3:9]: star_wars[col] = star_wars[col].apply(lambda x: False if pd.isnull(x) else True)
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Rename the columns from index 3 to 9 for better readibility. seen_1 means Star Wars Episode I, and so on.
star_wars.rename(columns={'Which of the following Star Wars films have you seen? Please select all that apply.': 'seen_1', \ 'Unnamed: 4': 'seen_2', \ 'Unnamed: 5': 'seen_3', \ 'Unnamed: 6': 'seen_4', \ 'Unnamed: 7': 'seen_5', \ 'Unnamed: 8': 'seen_6'}, inplace=True)
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Cleaning the columns from index 9 to 15. Changing data type to float.
star_wars[star_wars.columns[9:15]] = star_wars[star_wars.columns[9:15]].astype(float)
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Renaming columns names.
star_wars.rename(columns={'Please rank the Star Wars films in order of preference with 1 being your favorite film in the franchise and 6 being your least favorite film.': 'ranking_1', \ 'Unnamed: 10': 'ranking_2', \ 'Unnamed: 11': 'ranking_3', \ 'Unnamed: 12': 'ranking_4', \ 'Unnamed: 13': 'ranking_5', \ 'Unnamed: 14': 'ranking_6'}, inplace=True)
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Cleaning the cloumns from index 15 to 29.
star_wars.rename(columns={'Please state whether you view the following characters favorably, unfavorably, or are unfamiliar with him/her.': 'Luck Skywalker', \ 'Unnamed: 16': 'Han Solo', \ 'Unnamed: 17': 'Princess Leia Oragana', \ 'Unnamed: 18': 'Obi Wan Kenobi', \ 'Unnamed: 19': 'Yoda', \ 'Unnamed: 20': 'R2-D2', \ 'Unnamed: 21': 'C-3P0', \ 'Unnamed: 22': 'Anakin Skywalker', \ 'Unnamed: 23': 'Darth Vader', \ 'Unnamed: 24': 'Lando Calrissian', \ 'Unnamed: 25': 'Padme Amidala', \ 'Unnamed: 26': 'Boba Fett', \ 'Unnamed: 27': 'Emperor Palpatine', \ 'Unnamed: 28': 'Jar Jar Binks'}, inplace=True)
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Data Analysis Finding The Most Seen Movie
seen_sum = star_wars[['seen_1', 'seen_2', 'seen_3', 'seen_4', 'seen_5', 'seen_6']].sum() seen_sum seen_sum.idxmax()
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
From the data above, we can find that the most seen movie is the episode V.
ax = seen_sum.plot(kind='bar') for p in ax.patches: ax.annotate(str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.01)) plt.show()
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Finding The Highest Ranked Movie.
ranking_mean = star_wars[['ranking_1', 'ranking_2', 'ranking_3', 'ranking_4', 'ranking_5', 'ranking_6']].mean() ranking_mean ranking_mean.idxmin()
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
The highest ranked movie is ranking_5 which is the episode V.
ranking_mean.plot(kind='bar') plt.show()
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Let's break down data by Gender.
males = star_wars[star_wars['Gender'] == 'Male'] females = star_wars[star_wars['Gender'] == 'Female']
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
The number of movies seen.
males[males.columns[3:9]].sum().plot(kind='bar', title='male seen') plt.show() males[females.columns[3:9]].sum().plot(kind='bar', title='female seen') plt.show()
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
The ranking of movies.
males[males.columns[9:15]].mean().plot(kind='bar', title='Male Ranking') plt.show() females[males.columns[9:15]].mean().plot(kind='bar', title='Female Ranking') plt.show()
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
From the charts above, we do not find significant difference among gender. Star Wars Character Favorability Ratings
star_wars['Luck Skywalker'].value_counts() star_wars[star_wars.columns[15:29]].head() fav = star_wars[star_wars.columns[15:29]].dropna() fav.head()
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Convert fav to pivot table.
fav_df_list = [] for col in fav.columns.tolist(): row = fav[col].value_counts() d1 = pd.DataFrame(data={'favorably': row[0] + row[1], \ 'neutral': row[2], \ 'unfavorably': row[4] + row[5], \ 'Unfamiliar': row[3]}, \ index=[col], \ columns=['favorably', 'neutral', 'unfavorably', 'Unfamiliar']) fav_df_list.append(d1) fav_pivot = pd.concat(fav_df_list) fav_pivot fig = plt.figure() ax = plt.subplot(111) fav_pivot.plot(kind='barh', stacked=True, figsize=(10,10), ax=ax) # Shrink current axis's height by 10% on the bottom box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) # Put a legend below current axis ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=5) plt.show()
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Who Shot First?
shot_first = star_wars['Which character shot first?'].value_counts() shot_first shot_sum = shot_first.sum() shot_first = shot_first.apply(lambda x: x / shot_sum * 100) shot_first ax = shot_first.plot(kind='barh') for p in ax.patches: ax.annotate(str("{0:.2f}%".format(round(p.get_width(),2))), (p.get_width() * 1.005, p.get_y() + p.get_height() * 0.5)) plt.show()
Star Wars survey/Star Wars survey.ipynb
frankbearzou/Data-analysis
mit
Define the necessary environment variables and install the KubeFlow Pipeline SDK We assume this notebook kernel has access to Python's site-packages and is in Python3. Please fill in the below environment variables with you own settings. KFP_PACKAGE: The latest release of kubeflow pipeline platform library. KUBEFLOW_PIPELINE_LINK: The link to access the KubeFlow pipeline API. MOUNT: The mount configuration to map data above into the training job. The format is 'data:/directory' GPUs: The number of the GPUs for training.
KFP_SERVICE="ml-pipeline.kubeflow.svc.cluster.local:8888" KFP_PACKAGE = 'http://kubeflow.oss-cn-beijing.aliyuncs.com/kfp/0.1.14/kfp.tar.gz' KFP_ARENA_PACKAGE = 'http://kubeflow.oss-cn-beijing.aliyuncs.com/kfp-arena/kfp-arena-0.3.tar.gz' KUBEFLOW_PIPELINE_LINK = '' MOUNT="['user-susan:/training']" GPUs=1
samples/contrib/arena-samples/standalonejob/standalone_pipeline.ipynb
kubeflow/kfp-tekton-backend
apache-2.0
Install the necessary python packages Note: Please change pip3 to the package manager that's used for this Notebook Kernel.
!pip3 install $KFP_PACKAGE --upgrade
samples/contrib/arena-samples/standalonejob/standalone_pipeline.ipynb
kubeflow/kfp-tekton-backend
apache-2.0
Note: Install arena's python package
!pip3 install $KFP_ARENA_PACKAGE --upgrade
samples/contrib/arena-samples/standalonejob/standalone_pipeline.ipynb
kubeflow/kfp-tekton-backend
apache-2.0
2. Define pipeline tasks using the kfp library.
import arena import kfp.dsl as dsl @dsl.pipeline( name='pipeline to run jobs', description='shows how to run pipeline jobs.' ) def sample_pipeline(learning_rate='0.01', dropout='0.9', model_version='1'): """A pipeline for end to end machine learning workflow.""" # 1. prepare data prepare_data = arena.StandaloneOp( name="prepare-data", image="byrnedo/alpine-curl", data=MOUNT, command="mkdir -p /training/dataset/mnist && \ cd /training/dataset/mnist && \ curl -O https://code.aliyun.com/xiaozhou/tensorflow-sample-code/raw/master/data/t10k-images-idx3-ubyte.gz && \ curl -O https://code.aliyun.com/xiaozhou/tensorflow-sample-code/raw/master/data/t10k-labels-idx1-ubyte.gz && \ curl -O https://code.aliyun.com/xiaozhou/tensorflow-sample-code/raw/master/data/train-images-idx3-ubyte.gz && \ curl -O https://code.aliyun.com/xiaozhou/tensorflow-sample-code/raw/master/data/train-labels-idx1-ubyte.gz") # 2. prepare source code prepare_code = arena.StandaloneOp( name="source-code", image="alpine/git", data=MOUNT, command="mkdir -p /training/models/ && \ cd /training/models/ && \ if [ ! -d /training/models/tensorflow-sample-code ]; then https://github.com/cheyang/tensorflow-sample-code.git; else echo no need download;fi") # 3. train the models train = arena.StandaloneOp( name="train", image="tensorflow/tensorflow:1.11.0-gpu-py3", gpus=GPUs, data=MOUNT, command="echo %s; \ echo %s; \ python /training/models/tensorflow-sample-code/tfjob/docker/mnist/main.py --max_steps 500 --data_dir /training/dataset/mnist --log_dir /training/output/mnist" % (prepare_data.output, prepare_code.output), metric_name="Train-accuracy", metric_unit="PERCENTAGE", ) # 4. export the model export_model = arena.StandaloneOp( name="export-model", image="tensorflow/tensorflow:1.11.0-py3", data=MOUNT, command="echo %s; \ python /training/models/tensorflow-sample-code/tfjob/docker/mnist/export_model.py --model_version=%s --checkpoint_step=400 --checkpoint_path=/training/output/mnist /training/output/models" % (train.output,model_version)) learning_rate = "0.001" dropout = "0.8" model_verison = "1" arguments = { 'learning_rate': learning_rate, 'dropout': dropout, 'model_version': model_version, } import kfp client = kfp.Client(host=KUBEFLOW_PIPELINE_LINK) run = client.create_run_from_pipeline_func(sample_pipeline, arguments=arguments).run_info print('The above run link is assuming you ran this cell on JupyterHub that is deployed on the same cluster. ' + 'The actual run link is ' + KUBEFLOW_PIPELINE_LINK + '/#/runs/details/' + run.id)
samples/contrib/arena-samples/standalonejob/standalone_pipeline.ipynb
kubeflow/kfp-tekton-backend
apache-2.0
Cleaning the Raw Data Printing the 3rd element in the test dataset shows the data contains text with newlines, punctuation, misspellings, and other items common in text documents. To build a model, we will clean up the text by removing some of these issues.
news_train_data.data[2], news_train_data.target_names[news_train_data.target[2]] def clean_and_tokenize_text(news_data): """Cleans some issues with the text data Args: news_data: list of text strings Returns: For each text string, an array of tokenized words are returned in a list """ cleaned_text = [] for text in news_data: x = re.sub('[^\w]|_', ' ', text) # only keep numbers and letters and spaces x = x.lower() x = re.sub(r'[^\x00-\x7f]',r'', x) # remove non ascii texts tokens = [y for y in x.split(' ') if y] # remove empty words tokens = ['[number]' if x.isdigit() else x for x in tokens] # convert all numbers to '[number]' to reduce vocab size. cleaned_text.append(tokens) return cleaned_text clean_train_tokens = clean_and_tokenize_text(news_train_data.data) clean_test_tokens = clean_and_tokenize_text(news_test_data.data)
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Get Vocabulary We will need to filter the vocabulary to remove high frequency words and low frequency words.
def get_unique_tokens_per_row(text_token_list): """Collect unique tokens per row. Args: text_token_list: list, where each element is a list containing tokenized text Returns: One list containing the unique tokens in every row. For example, if row one contained ['pizza', 'pizza'] while row two contained ['pizza', 'cake', 'cake'], then the output list would contain ['pizza' (from row 1), 'pizza' (from row 2), 'cake' (from row 2)] """ words = [] for row in text_token_list: words.extend(list(set(row))) return words # Make a plot where the x-axis is a token, and the y-axis is how many text documents # that token is in. words = pd.DataFrame(get_unique_tokens_per_row(clean_train_tokens) , columns=['words']) token_frequency = words['words'].value_counts() # how many documents contain each token. token_frequency.plot(logy=True) vocab = token_frequency[np.logical_and(token_frequency < 1000, token_frequency > 10)] vocab.plot(logy=True) def filter_text_by_vocab(news_data, vocab): """Removes tokens if not in vocab. Args: news_data: list, where each element is a token list vocab: set containing the tokens to keep. Returns: List of strings containing the final cleaned text data """ text_strs = [] for row in news_data: words_to_keep = [token for token in row if token in vocab or token == '[number]'] text_strs.append(' '.join(words_to_keep)) return text_strs clean_train_data = filter_text_by_vocab(clean_train_tokens, set(vocab.index)) clean_test_data = filter_text_by_vocab(clean_test_tokens, set(vocab.index)) # Check a few instances of cleaned data clean_train_data[:3]
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Save the Cleaned Data For Training
!mkdir -p ./data with open('./data/train.csv', 'w') as f: writer = csv.writer(f, lineterminator='\n') for target, text in zip(news_train_data.target, clean_train_data): writer.writerow([news_train_data.target_names[target], text]) with open('./data/eval.csv', 'w') as f: writer = csv.writer(f, lineterminator='\n') for target, text in zip(news_test_data.target, clean_test_data): writer.writerow([news_test_data.target_names[target], text]) # Also save the vocab, which will be useful in making new predictions. with open('./data/vocab.txt', 'w') as f: vocab.to_csv(f)
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Create Model with ML Workbench The MLWorkbench Magics are a set of Datalab commands that allow an easy code-free experience to training, deploying, and predicting ML models. This notebook will take the cleaned data from the previous notebook and build a text classification model. The MLWorkbench Magics are a collection of magic commands for each step in ML workflows: analyzing input data to build transforms, transforming data, training a model, evaluating a model, and deploying a model. For details of each command, run with --help. For example, "%%ml train --help". When the dataset is small (like with the 20 newsgroup data), there is little benefit of using cloud services. This notebook will run the analyze, transform, and training steps locally. However, we will take the locally trained model and deploy it to ML Engine and show how to make real predictions on a deployed model. Every MLWorkbench magic can run locally or use cloud services (adding --cloud flag). The next notebook (Text Classification --- 20NewsGroup (large data)) in this sequence shows the cloud version of every command, and gives the normal experience when building models are large datasets. However, we will still use the 20 newsgroup data.
import google.datalab.contrib.mlworkbench.commands # This loads the '%%ml' magics
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
First, define the dataset we are going to use for training.
%%ml dataset create name: newsgroup_data format: csv train: ./data/train.csv eval: ./data/eval.csv schema: - name: news_label type: STRING - name: text type: STRING %%ml dataset explore name: newsgroup_data
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Step 1: Analyze The first step in the MLWorkbench workflow is to analyze the data for the requested transformations. We are going to build a bag of words representation on the text and use this in a linear model. Therefore, the analyze step will compute the vocabularies and related statistics of the data for traing.
%%ml analyze output: ./analysis data: newsgroup_data features: news_label: transform: target text: transform: bag_of_words !ls ./analysis
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Step 2: Transform This step is optional as training can start from csv data (the same data used in the analysis step). The transform step performs some transformations on the input data and saves the results to a special TensorFlow file called a TFRecord file containing TF.Example protocol buffers. This allows training to start from preprocessed data. If this step is not used, training would have to perform the same preprocessing on every row of csv data every time it is used. As TensorFlow reads the same data row multiple times during training, this means the same row would be preprocessed multiple times. By writing the preprocessed data to disk, we can speed up training. Because the the 20 newsgroups data is small, this step does not matter, but we do it anyway for illustration. This step is recommended if there are text column in a dataset, and required if there are image columns in a dataset. We run the transform step for the training and eval data.
!rm -rf ./transform %%ml transform --shuffle output: ./transform analysis: ./analysis data: newsgroup_data # note: the errors_* files are all 0 size, which means no error. !ls ./transform/ -l -h
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Create a "transformed dataset" to use in next step.
%%ml dataset create name: newsgroup_transformed train: ./transform/train-* eval: ./transform/eval-* format: transformed
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Step 3: Training MLWorkbench automatically builds standard TensorFlow models without you having to write any TensorFlow code.
# Training should use an empty output folder. So if you run training multiple times, # use different folders or remove the output from the previous run. !rm -fr ./train
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
The following training step takes about 10~15 minutes.
%%ml train output: ./train analysis: ./analysis/ data: newsgroup_transformed model_args: model: linear_classification top-n: 5
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Go to Tensorboard (link shown above) to monitor the training progress. Note that training stops when it detects accuracy is no longer increasing for eval data.
# You can also plot the summary events which will be saved with the notebook. from google.datalab.ml import Summary summary = Summary('./train') summary.list_events() summary.plot(['loss', 'accuracy'])
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
The output of training is two models, one in training_output/model and another in training_output/evaluation_model. These tensorflow models are identical except the latter assumes the target column is part of the input and copies the target value to the output. Therefore, the latter is ideal for evaluation.
!ls ./train/
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Step 4: Evaluation using batch prediction Below, we use the evaluation model and run batch prediction locally. Batch prediction is needed for large datasets where the data cannot fit in memory. For demo purpose, we will use the training evaluation data again.
%%ml batch_predict model: ./train/evaluation_model/ output: ./batch_predict format: csv data: csv: ./data/eval.csv # It creates a results csv file, and a results schema json file. !ls ./batch_predict
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Note that the output of prediction is a csv file containing the score for each label class. 'predicted_n' is the label for the nth largest score. We care about 'predicted', the final model prediction.
!head -n 5 ./batch_predict/predict_results_eval.csv %%ml evaluate confusion_matrix --plot csv: ./batch_predict/predict_results_eval.csv %%ml evaluate accuracy csv: ./batch_predict/predict_results_eval.csv
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Step 5: BigQuery to analyze evaluate results Sometimes you want to query your prediction/evaluation results using SQL. It is easy.
# Create bucket !gsutil mb gs://bq-mlworkbench-20news-lab !gsutil cp -r ./batch_predict/predict_results_eval.csv gs://bq-mlworkbench-20news-lab # Use Datalab's Bigquery API to load CSV files into table. import google.datalab.bigquery as bq import json with open('./batch_predict/predict_results_schema.json', 'r') as f: schema = json.load(f) # Create BQ Dataset bq.Dataset('newspredict').create() # Create the table table = bq.Table('newspredict.result1').create(schema=schema, overwrite=True) table.load('gs://bq-mlworkbench-20news-lab/predict_results_eval.csv', mode='overwrite', source_format='csv', csv_options=bq.CSVOptions(skip_leading_rows=1))
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Now, run any SQL queries on "table newspredict.result1". Below we query all wrong predictions.
%%bq query SELECT * FROM newspredict.result1 WHERE predicted != target
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Prediction Local Instant Prediction The MLWorkbench also supports running prediction and displaying the results within the notebook. Note that we use the non-evaluation model below (./train/model) which takes input with no target column.
%%ml predict model: ./train/model/ headers: text data: - nasa - windows xp
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Why Does My Model Predict this? Prediction Explanation. "%%ml explain" gives you insights on what are important features in the prediction data that contribute positively or negatively to certain labels. We use LIME under "%%ml explain". (LIME is an open sourced library performing feature sensitivity analysis. It is based on the work presented in this paper. LIME is included in Datalab.) In this case, we will check which words in text are contributing most to the predicted label.
# Pick some data from eval csv file. They are cleaned text. # The truth labels for the following 3 instances are # - rec.autos # - comp.windows.x # - talk.politics.mideast instance0 = ('little confused models [number] [number] heard le se someone tell differences far features ' + 'performance curious book value [number] model less book value usually words demand ' + 'year heard mid spring early summer best buy') instance1 = ('hi requirement closing opening different display servers within x application manner display ' + 'associated client proper done during transition problems') instance2 = ('attacking drive kuwait country whose citizens close blood business ties saudi citizens thinks ' + 'helped saudi arabia least eastern muslim country doing anything help kuwait protect saudi arabia ' + 'indeed masses citizens demonstrating favor butcher saddam killed muslims killing relatively rich ' + 'muslims nose west saudi arabia rolled iraqi invasion charge saudi arabia idea governments official ' + 'religion de facto de human nature always ones rise power world country citizens leader slick ' + 'operator sound guys angels posting edited stuff following friday york times reported group definitely ' + 'conservative followers house rule country enough reported besides complaining government conservative ' + 'enough asserted approx [number] [number] kingdom charge under saudi islamic law brings death penalty ' + 'diplomatic guy bin isn called severe punishment [number] women drove public while protest ban women ' + 'driving guy group said al said women fired jobs happen heard muslims ban women driving basis qur etc ' + 'yet folks ban women called choose rally behind hate women allowed tv radio immoral kingdom house neither ' + 'least nor favorite government earth restrict religious political lot among things likely replacements ' + 'going lot worse citizens country house feeling heat lately last six months read religious police ' + 'government western women fully stupid women imo sends wrong signals morality read cracked down few home ' + 'based religious posted government owned newspapers offering money turns group dare worship homes secret ' + 'place government grown try take wind conservative opposition things small taste happen guys house trying ' + 'long run others general west evil zionists rule hate west crowd') data = [instance0, instance1, instance2] %%ml predict model: ./train/model/ headers: text data: $data
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
The first and second instances are predicted correctly. The third is wrong. Below we run "%%ml explain" to understand more.
%%ml explain --detailview_only model: ./train/model labels: rec.autos type: text data: $instance0 %%ml explain --detailview_only model: ./train/model labels: comp.windows.x type: text data: $instance1
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
On instance 2, the top prediction result does not match truth. Predicted is "talk.politics.guns" while truth is "talk.politics.mideast". So let's analyze these two labels.
%%ml explain --detailview_only model: ./train/model labels: talk.politics.guns,talk.politics.mideast type: text data: $instance2
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Deploying Model to ML Engine Now that we have a trained model, have analyzed the results, and have tested the model output locally, we are ready to deploy it to the cloud for real predictions. Deploying a model requires the files are on GCS. The next few cells makes a bucket on GCS, copies the locally trained model, and deploys it.
!gsutil -q mb gs://bq-mlworkbench-20news-lab # Move the regular model to GCS !gsutil -m cp -r ./train/model gs://bq-mlworkbench-20news-lab
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
See this doc https://cloud.google.com/ml-engine/docs/how-tos/managing-models-jobs for a the definition of ML Engine models and versions. An ML Engine version runs predictions and is contained in a ML Engine model. We will create a new ML Engine model, and depoly the TensorFlow graph as a ML Engine version. This can be done using gcloud (see https://cloud.google.com/ml-engine/docs/how-tos/deploying-models), or Datalab which we use below.
%%ml model deploy path: gs://bq-mlworkbench-20news-lab name: news.alpha
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
How to Build Your Own Prediction Client A common task is to call a deployed model from different applications. Below is an example of writing a python client to run prediction. Covering model permissions topics is outside the scope of this notebook, but for more information see https://cloud.google.com/ml-engine/docs/tutorials/python-guide and https://developers.google.com/identity/protocols/application-default-credentials .
from oauth2client.client import GoogleCredentials from googleapiclient import discovery from googleapiclient import errors # Store your project ID, model name, and version name in the format the API needs. api_path = 'projects/{your_project_ID}/models/{model_name}/versions/{version_name}'.format( your_project_ID=google.datalab.Context.default().project_id, model_name='news', version_name='alpha') # Get application default credentials (possible only if the gcloud tool is # configured on your machine). See https://developers.google.com/identity/protocols/application-default-credentials # for more info. credentials = GoogleCredentials.get_application_default() # Build a representation of the Cloud ML API. ml = discovery.build('ml', 'v1', credentials=credentials) # Create a dictionary containing data to predict. # Note that the data is a list of csv strings. body = { 'instances': ['nasa', 'windows ex']} # Create a request request = ml.projects().predict( name=api_path, body=body) print('The JSON request: \n') print(request.to_json()) # Make the call. try: response = request.execute() print('\nThe response:\n') print(json.dumps(response, indent=2)) except errors.HttpError, err: # Something went wrong, print out some information. print('There was an error. Check the details:') print(err._get_reason())
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
To demonstrate prediction client further, check API Explorer (https://developers.google.com/apis-explorer). it allows you to send raw HTTP requests to many Google APIs. This is useful for understanding the requests and response, and help you build your own client with your favorite language. Please visit https://developers.google.com/apis-explorer/#search/ml%20engine/ml/v1/ml.projects.predict and enter the following values for each text box.
# The output of this cell is placed in the name box # Store your project ID, model name, and version name in the format the API needs. api_path = 'projects/{your_project_ID}/models/{model_name}/versions/{version_name}'.format( your_project_ID=google.datalab.Context.default().project_id, model_name='news', version_name='alpha') print('Place the following in the name box') print(api_path)
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
The fields text box can be empty. Note that because we deployed the non-evaluation model, our depolyed model takes a csv input which only has one column. In general, the "instances" is a list of csv strings for models trained by MLWorkbench. Click in the request body box, and note a small drop down menu appears in the FAR RIGHT of the input box. Slect "Freeform editor". Then enter the following in the request body box.
print('Place the following in the request body box') request = {'instances': ['nasa', 'windows xp']} print(json.dumps(request))
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Then click the "Authorize and execute" button. The prediction results are returned in the browser. Cleaning up the deployed model
%%ml model delete name: news.alpha %%ml model delete name: news # Delete the GCS bucket !gsutil -m rm -r gs://bq-mlworkbench-20news-lab # Delete BQ table bq.Dataset('newspredict').delete(delete_contents = True)
samples/contrib/mlworkbench/text_classification_20newsgroup/Text Classification --- 20NewsGroup (small data).ipynb
googledatalab/notebooks
apache-2.0
Fichiers Les fichiers permettent deux usages principaux : récupérer des données d'une exécution du programme à l'autre (lorsque le programme s'arrête, toutes les variables sont perdues) échanger des informations avec d'autres programmes (Excel par exemple). Le format le plus souvent utilisé est le fichier plat, texte, txt, csv, tsv. C'est un fichier qui contient une information structurée sous forme de matrice, en ligne et colonne car c'est comme que les informations numériques se présentent le plus souvent. Un fichier est une longue séquence de caractères. Il a fallu choisir une convention pour dire que deux ensembles de caractères ne font pas partie de la même colonne ou de la même ligne. La convention la plus répandue est : \t : séparateur de colonnes \n : séparateur de lignes Le caractère \ indique au langage python que le caractère qui suit fait partie d'un code. Vous trouverez la liste des codes : String and Bytes literals. Aparté : aujourd'hui, lire et écrire des fichiers est tellement fréquent qu'il existe des outils qui font ça dans une grande variété de formats. Vous découvrirez cela lors de la séance 10. Il est utile pourtant de le faire au moins une fois soi-même pour comprendre la logique des outils et pour ne pas être bloqué dans les cas non prévus. Ecrire et lire des fichiers est beaucoup plus long que de jouer avec des variables. Ecrire signifie qu'on enregistre les données sur le disque dur : elles passent du programme au disque dur (elles deviennent permanentes). Elles font le chemin inverse lors de la lecture. Ecriture Il est important de retenir qu'un fichier texte ne peut recevoir que des chaînes de caractères.
mat = [[1.0, 0.0],[0.0,1.0] ] # matrice de type liste de listes with open ("mat.txt", "w") as f : # création d'un fichier en mode écriture for i in range (0,len (mat)) : # for j in range (0, len (mat [i])) : # s = str (mat [i][j]) # conversion en chaîne de caractères f.write (s + "\t") # f.write ("\n") # # on vérifie que le fichier existe : import os print([ _ for _ in os.listdir(".") if "mat" in _ ] ) # la ligne précédente utilise le symbole _ : c'est une variable # le caractère _ est une lettre comme une autre # on pourrait écrire : # print([ fichier for fichier in os.listdir(".") if "mat" in fichier ] ) # on utilise cette convention pour dire que cette variable n'a pas vocation à rester
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
Le même programme mais écrit avec une écriture condensée :
mat = [[1.0, 0.0],[0.0,1.0] ] # matrice de type liste de listes with open ("mat.txt", "w") as f : # création d'un fichier s = '\n'.join ( '\t'.join( str(x) for x in row ) for row in mat ) f.write ( s ) # on vérifie que le fichier existe : print([ _ for _ in os.listdir(".") if "mat" in _ ] )
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
On regare les premières lignes du fichier mat2.txt :
import pyensae %load_ext pyensae %head mat.txt
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
Lecture
with open ("mat.txt", "r") as f : # ouverture d'un fichier mat = [ row.strip(' \n').split('\t') for row in f.readlines() ] print(mat)
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
On retrouve les mêmes informations à ceci près qu'il ne faut pas oublier de convertir les nombres initiaux en float.
with open ("mat.txt", "r") as f : # ouverture d'un fichier mat = [ [ float(x) for x in row.strip(' \n').split('\t') ] for row in f.readlines() ] print(mat)
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
Voilà qui est mieux. Le module os.path propose différentes fonctions pour manipuler les noms de fichiers. Le module os propose différentes fonctions pour manipuler les fichiers :
import os for f in os.listdir('.'): print (f)
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
with De façon pragmatique, l'instruction with permet d'écrire un code plus court d'une instruction : close. Les deux bouts de code suivant sont équivalents :
with open("exemple_fichier.txt", "w") as f: f.write("something") f = open("exemple_fichier.txt", "w") f.write("something") f.close()
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
L'instruction close ferme le fichier. A l'ouverture, le fichier est réservé par le programme Python, aucune autre application ne peut écrire dans le même fichier. Après l'instruction close, une autre application pour le supprimer, le modifier. Avec le mot clé with, la méthode close est implicitement appelée. à quoi ça sert ? On écrit très rarement un fichier texte. Ce format est le seul reconnu par toutes les applications. Tous les logiciels, tous les langages proposent des fonctionnalités qui exportent les données dans un format texte. Dans certaines circonstances, les outils standards ne fonctionnent pas - trop grops volumes de données, problème d'encoding, caractère inattendu -. Il faut se débrouiller. Exercice 1 : Excel $\rightarrow$ Python $\rightarrow$ Excel Il faut télécharger le fichier seance4_excel.xlsx qui contient une table de trois colonnes. Il faut : enregistrer le fichier au format texte, le lire sous python créer une matrice carrée 3x3 où chaque valeur est dans sa case (X,Y), enregistrer le résultat sous format texte, le récupérer sous Excel. Autres formats de fichiers Les fichiers texte sont les plus simples à manipuler mais il existe d'autres formats classiques~: html : les pages web xml : données structurées [zip](http://fr.wikipedia.org/wiki/ZIP_(format_de_fichier), gz : données compressées wav, mp3, ogg : musique mp4, Vorbis : vidéo ... Modules Les modules sont des extensions du langages. Python ne sait quasiment rien faire seul mais il bénéficie de nombreuses extensions. On distingue souvent les extensions présentes lors de l'installation du langage (le module math) des extensions externes qu'il faut soi-même installer (numpy). Deux liens : modules officiels modules externes Le premier réflexe est toujours de regarder si un module ne pourrait pas vous être utile avant de commencer à programmer. Pour utiliser une fonction d'un module, on utilise l'une des syntaxes suivantes :
import math print (math.cos(1)) from math import cos print (cos(1)) from math import * # cette syntaxe est déconseillée car il est possible qu'une fonction print (cos(1)) # porte le même nom qu'une des vôtres
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
Exercice 2 : trouver un module (1) Aller à la page modules officiels (ou utiliser un moteur de recherche) pour trouver un module permettant de générer des nombres aléatoires. Créer une liste de nombres aléatoires selon une loi uniforme puis faire une permutation aléatoire de cette séquence. Exercice 3 : trouver un module (2) Trouver un module qui vous permette de calculer la différence entre deux dates puis déterminer le jour de la semaine où vous êtes nés. Module qu'on crée soi-même Il est possible de répartir son programme en plusieurs fichiers. Par exemple, un premier fichier monmodule.py qui contient une fonction :
# fichier monmodule.py import math def fonction_cos_sequence(seq) : return [ math.cos(x) for x in seq ] if __name__ == "__main__" : print ("ce message n'apparaît que si ce programme est le point d'entrée")
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
La cellule suivante vous permet d'enregistrer le contenu de la cellule précédente dans un fichier appelée monmodule.py.
code = """ # -*- coding: utf-8 -*- import math def fonction_cos_sequence(seq) : return [ math.cos(x) for x in seq ] if __name__ == "__main__" : print ("ce message n'apparaît que si ce programme est le point d'entrée") """ with open("monmodule.py", "w", encoding="utf8") as f : f.write(code)
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
Le second fichier :
import monmodule print ( monmodule.fonction_cos_sequence ( [ 1, 2, 3 ] ) )
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
Note : Si le fichier monmodule.py est modifié, python ne recharge pas automatiquement le module si celui-ci a déjà été chargé. On peut voir la liste des modules en mémoire dans la variable sys.modules :
import sys list(sorted(sys.modules))[:10]
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
Pour retirer le module de la mémoire, il faut l'enlever de sys.modules avec l'instruction del sys.modules['monmodule']. Python aura l'impression que le module monmodule.py est nouveau et il l'importera à nouveau. Exercice 4 : son propre module Que se passe-t-il si vous remplacez if __name__ == "__main__": par if True :, ce qui équivaut à retirer la ligne if __name__ == "__main__": ? Expressions régulières Pour la suite de la séance, on utilise comme préambule les instructions suivantes :
import pyensae.datasource discours = pyensae.datasource.download_data('voeux.zip', website = 'xd')
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
La documentation pour les expressions régulières est ici : regular expressions. Elles permettent de rechercher des motifs dans un texte : 4 chiffres / 2 chiffres / 2 chiffres correspond au motif des dates, avec une expression régulière, il s'écrit : [0-9]{4}/[0-9]{2}/[0-9]{2} la lettre a répété entre 2 et 10 fois est un autre motif, il s'écrit : a{2,10}.
import re # les expressions régulières sont accessibles via le module re expression = re.compile("[0-9]{2}/[0-9]{2}/[0-9]{4}") texte = """Je suis né le 28/12/1903 et je suis mort le 08/02/1957. Ma seconde femme est morte le 10/11/63.""" cherche = expression.findall(texte) print(cherche)
_doc/notebooks/td1a/td1a_cenonce_session4.ipynb
sdpython/ensae_teaching_cs
mit
2. Build a CLM with default parameters Building a CLM using Menpo can be done using a single line of code.
from menpofit.clm import CLM clm = CLM( training_images, verbose=True, group='PTS', diagonal=200 ) print(clm) clm.view_clm_widget()
notebooks/DeformableModels/ConstrainedLocalModel/CLMs Basics.ipynb
menpo/menpofit-notebooks
bsd-3-clause
3. Fit the previous CLM In Menpo, CLMs can be fitted to images by creating Fitter objects around them. One of the most popular algorithms for fitting CLMs is the Regularized Landmark Mean-Shift algorithm. In order to fit our CLM using this algorithm using Menpo, the user needs to define a GradientDescentCLMFitter object. This can be done again using a single line of code!!!
from menpofit.clm import GradientDescentCLMFitter fitter = GradientDescentCLMFitter(clm, n_shape=[6, 12])
notebooks/DeformableModels/ConstrainedLocalModel/CLMs Basics.ipynb
menpo/menpofit-notebooks
bsd-3-clause
Fitting a GradientDescentCLMFitter to an image is as simple as calling its fit method. Let's try it by fitting some images of the LFPW database test set!!!
import menpo.io as mio # load test images test_images = [] for i in mio.import_images(path_to_lfpw / 'testset', max_images=5, verbose=True): # crop image i = i.crop_to_landmarks_proportion(0.5) # convert it to grayscale if needed if i.n_channels == 3: i = i.as_greyscale(mode='luminosity') # append it to the list test_images.append(i)
notebooks/DeformableModels/ConstrainedLocalModel/CLMs Basics.ipynb
menpo/menpofit-notebooks
bsd-3-clause
Note that for the purpose of this simple fitting demonstration we will just fit the first 5 images of the LFPW test set.
from menpofit.fitter import noisy_shape_from_bounding_box fitting_results = [] for i in test_images: gt_s = i.landmarks['PTS'].lms # generate perturbed landmarks s = noisy_shape_from_bounding_box(gt_s, gt_s.bounding_box()) # fit image fr = fitter.fit_from_shape(i, s, gt_shape=gt_s) fitting_results.append(fr) # print fitting error print(fr) from menpowidgets import visualize_fitting_result visualize_fitting_result(fitting_results)
notebooks/DeformableModels/ConstrainedLocalModel/CLMs Basics.ipynb
menpo/menpofit-notebooks
bsd-3-clause
shift data to correct column using loc for assignment: df.loc[destination condition, column] = df.loc[source]
df.loc[df.type =='map',['mapPhoto']]=df['url'] #moving cell values to correct column df.loc[df.type.str.contains('lineminus'),['miscPhoto']]=df['url'] df.loc[df.type.str.contains('lineplus'),['miscPhoto']]=df['url'] df.loc[df.type.str.contains('misc'),['miscPhoto']]=df['url'] #now to deal with type='photo' photos = df[df.type=='photo'] nonphotos = df[df.type != 'photo'] #we can concatenate these later grouped = photos.groupby(['id','date']) photos.shape values=grouped.groups.values() for value in values: photos.loc[value[2],['type']] = 'misc' #photos.loc[value[1],['type']] = 'linephoto2' photos.loc[photos.type=='linephoto1'] for name, group in grouped: print(grouped[name]) photos = df[df.type == 'photo'] photos.set_index(['id','date'],inplace=True) photos.index[1] photos=df[df.type=='photo'] photos.groupby(['id','date']).count() photos.loc[photos.index[25],['type','note']] #combine photo captions df['caption']='' df.loc[(df.type.str.contains('lineminus'))|(df.type.str.contains('lineplus')),['caption']]=df['type'] + ' | ' + df['note'] df.loc[df.type.str.contains('lineplus'),['caption']]=df['url'] df.loc[df.type.str.contains('misc'),['caption']]=df['url'] df['mystart'] = 'Baseline summary:' df.loc[df.type =='transect',['site_description']]= df[['mystart','label1','value1','label2','value2','label3','value3','note']].apply(' | '.join, axis=1) df.loc[df.type.str.contains('line-'),['linephoto1']]=df['url'] df.loc[df.type.str.contains('line\+'),['linephoto2']]=df['url']#be sure to escape the + df.loc[df.type.str.contains('linephoto1'),['linephoto1']]=df['url'] df.loc[df.type.str.contains('linephoto2'),['linephoto2']]=df['url'] df.loc[df.type == 'plants',['general_observations']]=df['note']
Combining rows w groupby, transform, or multiIndex.ipynb
Soil-Carbon-Coalition/atlasdata
mit
use groupby and transform to fill the row
#since we're using string methods, NaNs won't work mycols =['general_observations','mapPhoto','linephoto1','linephoto2','miscPhoto','site_description'] for item in mycols: df[item] = df[item].fillna('') df.mapPhoto = df.groupby('id')['mapPhoto'].transform(lambda x: "%s" % ''.join(x)) df.linephoto1 = df.groupby(['id','date'])['linephoto1'].transform(lambda x: "%s" % ''.join(x)) df.linephoto2 = df.groupby(['id','date'])['linephoto2'].transform(lambda x: "%s" % ''.join(x)) df.miscPhoto = df.groupby(['id','date'])['miscPhoto'].transform(lambda x: "%s" % ''.join(x)) df['site_description'] = df['site_description'].str.strip() df.to_csv('test.csv') #done to here. Next, figure out what to do with linephotos, unclassified photos, and their notes. #make column for photocaptions. When adding linephoto1, add 'note' and 'type' fields to caption column. E.g. 'linephoto1: 100line- | view east along transect.' Then join the rows in the groupby transform and add to site_description field. df.shape df[(df.type.str.contains('line\+'))&(df.linephoto2.str.len()<50)] maps.str.len().sort_values()
Combining rows w groupby, transform, or multiIndex.ipynb
Soil-Carbon-Coalition/atlasdata
mit
shift data to correct row using a multi-Index
ids = list(df['id'])#make a list of ids to iterate over, before the hierarchical index #df.type = df.type.map({'\*plot summary':'transect','\*remonitoring notes':'transect'}) df.loc[df.type =='map',['mapPhoto']]=df['url'] #moving cell values to correct column df.set_index(['id','type'],inplace=True) # hierarchical index so we can call locations #a hierarchical index uses a tuple. You can set values using loc. #this format: df.loc[destination] = df.loc[source].values[0] for item in ids: df.loc[(item,'*plot summary'),'mapPhoto'] = df.loc[(item,'map'),'mapPhoto'].values[0] #generates a pink warning about performance, but oh well. #here we are using an expression in parens to test for a condition (df['type'].str.contains('\s') & df['note'].notnull()).value_counts() df.url = df.url.str.replace(' ','_');df.url df.url.head() df['newurl'] = df.url.str.replace df.newurl.head() #for combining rows try something like this: print(df.groupby('somecolumn')['temp variable'].apply(' '.join).reset_index())
Combining rows w groupby, transform, or multiIndex.ipynb
Soil-Carbon-Coalition/atlasdata
mit
Introduction for the two-level system The quantum two-level system (TLS) is the simplest possible model for quantum light-matter interaction. In the version we simulate here, the system is driven by a continuous-mode coherent state, whose dipolar interaction with the system is represented by the following Hamiltonain $$ H_\mathrm{TLS} =\hbar \omega_0 \sigma^\dagger \sigma + \frac{\hbar\Omega_\mathrm{TLS}(t)}{2}\left( \sigma\textrm{e}^{-i\omega_dt} + \sigma^\dagger \textrm{e}^{i\omega_dt}\right),$$ where $\omega_0$ is the system's transition frequency, $\sigma$ is the system's atomic lowering operator, $\omega_d$ is the coherent state's center frequency, and $\Omega_\mathrm{TLS}(t)$ is the coherent state's driving strength. The time-dependence can be removed to simplify the simulation by a rotating frame transformation, and is particularly simple when the driving field is resonant with the transition frequency ($\omega_d=\omega_0$). Then, $$ \tilde{H}_\mathrm{TLS} =\frac{\hbar\Omega(t)}{2}\left( \sigma+ \sigma^\dagger \right).$$ Setup the two-level system properties
# define system operators gamma = 1 # decay rate sm_TLS = destroy(2) # dipole operator c_op_TLS = [np.sqrt(gamma)*sm_TLS] # represents spontaneous emission # choose range of driving strengths to simulate Om_list_TLS = gamma*np.logspace(-2, 1, 300) # calculate steady-state density matricies for the driving strengths rho_ss_TLS = [] for Om in Om_list_TLS: H_TLS = Om * (sm_TLS + sm_TLS.dag()) rho_ss_TLS.append(steadystate(H_TLS, c_op_TLS))
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
The emission can be decomposed into a so-called coherent and incoherent portion. The coherent portion is simply due to the classical mean of the dipole moment, i.e. $$I_\mathrm{c}=\lim_{t\rightarrow\infty}\Gamma\langle\sigma^\dagger(t)\rangle\langle\sigma(t)\rangle,$$ while the incoherent portion is due to the standard deviation of the dipole moment (which represents its quantum fluctuations), i.e. $$I_\mathrm{inc}=\lim_{t\rightarrow\infty}\Gamma\langle\sigma^\dagger(t)\sigma(t)\rangle-I_\mathrm{c}.$$ Together, these emissions conspire in a way to result in zero second-order coherence for the two-level system, i.e. $g^{(2)}(0)=0$.
# decompose the emitted light into the coherent and incoherent # portions I_c_TLS = expect(sm_TLS.dag(), rho_ss_TLS)*expect(sm_TLS, rho_ss_TLS) I_inc_TLS = expect(sm_TLS.dag()*sm_TLS, rho_ss_TLS) - I_c_TLS
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Visualize the incoherent and coherent emissions
plt.semilogx(Om_list_TLS, abs(I_c_TLS), label='TLS $I_\mathrm{c}$') plt.semilogx(Om_list_TLS, abs(I_inc_TLS), 'r', label='TLS $I_\mathrm{inc}$') plt.xlabel('Driving strength [$\Gamma$]') plt.ylabel('Normalized flux [$\Gamma$]') plt.legend(loc=2);
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Introduction for the Jaynes-Cummings system The quantum Jaynes-Cummings (JC) system represents one of the most fundamental models for quantum light-matter interaction, which models the interaction between a quantum two-level system (e.g. an atomic transition) and a single photonic mode. Here, the strong interaction between light and matter creates new quantum states known as polaritons in an anharmonic ladder of states. In a phenomenon known as photon blockade, the most anharmonic polariton is used as a two-level system to produce emission with $g^{(2)}(0)<1$. We will investigate how well the emission compares to that of a two-level system by comparing both its coherent and incoherent components as well as its $g^{(2)}(0)$. In the version we simulate here, the Jaynes-Cummings system is driven by a continuous-mode coherent state, whose dipolar interaction with the system is represented by the following Hamiltonain $$ H =\hbar \omega_a a^\dagger a + \hbar \left(\omega_a+\Delta\right) \sigma^\dagger \sigma+ \hbar g\left(a^\dagger\sigma +a\sigma^\dagger\right) + \frac{\hbar\Omega(t)}{2}\left( a\textrm{e}^{-i\omega_dt} + a^\dagger \textrm{e}^{i\omega_dt}\right),$$ where additionally $\omega_a$ is the cavity's resonant frequency and $\Delta$ is the cavity-atom detuning. We will investigate for finite $\Delta$ because this increases the anharmonicity of the Jaynes-Cummings ladder. The time-dependence can additionally be removed to simplify the simulation by a rotating frame transformation in a very similar manner as before. Setup the JC system properties
# truncate size of cavity's Fock space N = 15 # setup system operators sm = tensor(destroy(2), qeye(N)) a = tensor(qeye(2), destroy(N)) # define system parameters, barely into strong coupling regime kappa = 1 g = 0.6 * kappa detuning = 3 * g # cavity-atom detuning delta_s = detuning/2 + np.sqrt(detuning ** 2 / 4 + g ** 2) # we only consider cavities in the good-emitter limit, where # the atomic decay is irrelevant c_op = [np.sqrt(kappa)*a]
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Effective polaritonic two-level system In the ideal scenario, the most anharmonic polariton and the ground state form an ideal two-level system with effective emission rate of $$\Gamma_\mathrm{eff}= \frac{\kappa}{2}+2\,\textrm{Im} \left{\sqrt{ g^2-\left( \frac{\kappa}{4}+\frac{\textbf{i}\Delta}{2} \right)^2 }\right}.$$
effective_gamma = kappa / 2 + 2 * np.imag( np.sqrt(g ** 2 - (kappa / 4 + 1j * detuning / 2) ** 2)) # set driving strength based on the effective polariton's # emission rate (driving strength goes as sqrt{gamma}) Om = 0.4 * np.sqrt(effective_gamma)
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Define reference system for homodyne interference For the purposes of optimally homodyning the JC output, we wish to transmit light through a bare cavity (no atom involved) and calculate its coherent amplitude. (This of course could easily be analytically calculated but QuTiP certainly is trivially capable of such a calculation.)
# reference cavity operator a_r = destroy(N) c_op_r = [np.sqrt(kappa)*a_r] # reference cavity Hamiltonian, no atom coupling H_c = Om * (a_r + a_r.dag()) + delta_s * a_r.dag() * a_r # solve for coherent state amplitude at driving strength Om rho_ss_c = steadystate(H_c, c_op_r) alpha = -expect(rho_ss_c, a_r) alpha_c = alpha.conjugate()
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Calculate JC emission The steady-state emitted flux from the JC system is given by $T=\kappa\langle a^\dagger a \rangle$, however with an additional homodyne interference it is $T=\langle b^\dagger b \rangle$, where the operator $b=\sqrt{\kappa}/2\, a + \beta$ is a new operator representing the interference between the JC emssion and a coherent state of amplitude $\beta$. The interference present in the operator $b$ now allows for the alteration of the measured portion of the coherently scattered light, though it leaves the incoherent portion unchanged since the incident flux has only a coherent portion. We're interested in studying the optimal homodyne interference to allow the JC emission to match the TLS emission as closely as possible. This optimum is determined from the above reference cavity, such that $\beta=-\sqrt{\kappa}/2\langle a_\textrm{ref} \rangle$.
def calculate_rho_ss(delta_scan): H = Om * (a + a.dag()) + g * (sm.dag() * a + sm * a.dag()) + \ delta_scan * ( sm.dag() * sm + a.dag() * a) - detuning * sm.dag() * sm return steadystate(H, c_op) delta_list = np.linspace(-6 * g, 9 * g, 200) rho_ss = parfor(calculate_rho_ss, delta_list) # calculate JC emission I = expect(a.dag()*a, rho_ss) # calculate JC emission homodyned with optimal state beta I_int = expect((a.dag() + alpha_c) * (a + alpha), rho_ss)
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Visualize the emitted flux with and without interference The dashed black line shows the intensity without interference and the violet line shows the intensity with interference. The vertical gray line indicates the spectral position of the anharmonic polariton. Note its narrower linewidth due to the slower effective decay rate (more atom-like since we're in the good-emitter limit).
plt.figure(figsize=(8,5)) plt.plot(delta_list/g, I/effective_gamma, 'k', linestyle='dashed', label='JC') plt.plot(delta_list/g, I_int/effective_gamma, 'blueviolet', label='JC w/ interference') plt.vlines(delta_s/g, 0, 0.7, 'gray') plt.xlim(-6, 9) plt.ylim(0, 0.7) plt.xlabel('Detuning [g]') plt.ylabel('Noramlized flux [$\Gamma_\mathrm{eff}$]') plt.legend(loc=1);
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Calculate coherent/incoherent portions of emission from JC system and its $g^{(2)}(0)$ We note that $$g^{(2)}(0)=\frac{\langle a^\dagger a^\dagger a a \rangle}{\langle a^\dagger a \rangle^2}.$$
Om_list = kappa*np.logspace(-2, 1, 300)*np.sqrt(effective_gamma) def calculate_rho_ss(Om): H = Om * (a + a.dag()) + g * (sm.dag() * a + sm * a.dag()) + \ delta_s*(sm.dag()*sm + a.dag()*a) - detuning*sm.dag()*sm return steadystate(H, c_op) rho_ss = parfor(calculate_rho_ss, Om_list) # decompose emission again into incoherent and coherent portions I_c = expect(a.dag(), rho_ss)*expect(a, rho_ss) I_inc = expect(a.dag()*a, rho_ss) - I_c # additionally calculate g^(2)(0) g20 = expect(a.dag()*a.dag()*a*a, rho_ss)/expect(a.dag()*a, rho_ss)**2
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Visualize the results The dashed black line in the top figure represents the coherent portion of the emission and can clearly be seen to dominate the emission for large driving strengths. Here, the emission significantly deviates from that of a two-level system, which saturates by these driving strengths. The lack of saturation for the JC system occurs due to the harmonic ladder above the anharmonic polariton. Additionally, the $g^{(2)}(0)$ values are all quite large relative to the ideal TLS value of zero (bottom plot).
plt.figure(figsize=(8,8)) plt.subplot(211) plt.semilogx(Om_list/np.sqrt(effective_gamma), abs(I_c)/kappa, 'k', linestyle='dashed', label='JC $I_\mathrm{c}$') plt.semilogx(Om_list/np.sqrt(effective_gamma), abs(I_inc)/kappa, 'r', linestyle='dashed', label='JC $I_\mathrm{inc}$') plt.xlabel(r'Driving strength [$\Gamma_\mathrm{eff}$]') plt.ylabel('Normalized Flux [$\kappa$]') plt.legend(loc=2) plt.subplot(212) plt.loglog(Om_list/np.sqrt(effective_gamma), g20, 'k', linestyle='dashed') lim = (1e-4, 2e0) plt.ylim(lim) plt.xlabel(r'Driving strength [$\Gamma_\mathrm{eff}$]') plt.ylabel('$g^{(2)}(0)$');
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Calculate homodyned JC emission Now we recalculate the coherent and incoherent portions as well as the $g^{(2)}(0)$ for the homodyned JC emission, but use the operator $b$ instead of $\sqrt{\kappa}/2\,a$. Thus $$g^{(2)}(0)=\frac{\langle b^\dagger b^\dagger b b \rangle}{\langle b^\dagger b \rangle^2}.$$
def calculate_rho_ss_c(Om): H_c = Om * (a_r + a_r.dag()) + delta_s * a_r.dag() * a_r return steadystate(H_c, c_op_r) rho_ss_c = parfor(calculate_rho_ss_c, Om_list) # calculate list of interference values for all driving strengths alpha_list = -expect(rho_ss_c, a_r) alpha_c_list = alpha_list.conjugate() # decompose emission for all driving strengths g20_int = [] I_c_int = [] I_inc_int = [] for i, rho in enumerate(rho_ss): g20_int.append( expect((a.dag() + alpha_c_list[i]) * (a.dag() + alpha_c_list[i]) * (a + alpha_list[i]) * (a + alpha_list[i]), rho) / expect((a.dag() + alpha_c_list[i]) * (a + alpha_list[i]), rho)**2 ) I_c_int.append(expect(a.dag() + alpha_c_list[i], rho) * expect(a + alpha_list[i], rho)) I_inc_int.append(expect( (a.dag() + alpha_c_list[i]) * (a + alpha_list[i]), rho) - I_c_int[-1])
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Calculate the results The dashed red and blue lines, which represent the TLS decomposition are now matched well by the JC decomposition with optimal homodyne interference (red and blue). The dashed black line is shown again as a reminder of the JC system's coherent emission without interference, which does not saturate for large driving strengths. Additionally, with the interference the $g^{(2)}(0)$ value improves by many orders of magnitude.
plt.figure(figsize=(8,8)) plt.subplot(211) plt.semilogx(Om_list_TLS, abs(I_c_TLS), linestyle='dashed', label='TLS $I_\mathrm{c}$') plt.semilogx(Om_list_TLS, abs(I_inc_TLS), 'r', linestyle='dashed', label='TLS $I_\mathrm{inc}$') plt.semilogx(Om_list/np.sqrt(effective_gamma), abs(I_c/effective_gamma), 'k', linestyle='dashed', label='JC $I_\mathrm{c}$') plt.semilogx(Om_list/np.sqrt(effective_gamma), abs(I_inc/effective_gamma), 'r', label='JC $I_\mathrm{inc}$') plt.semilogx(Om_list/np.sqrt(effective_gamma), abs(I_c_int/effective_gamma), 'b', label='JC w/ homodyne $I_\mathrm{c}$') plt.semilogx(Om_list/np.sqrt(effective_gamma), abs(I_inc_int/effective_gamma), 'r') plt.ylim(5e-4, 0.6) plt.xlabel(r'Driving strength [$\Gamma_\mathrm{eff}$]') plt.ylabel('Normalized flux [$\Gamma_\mathrm{eff}$]') plt.legend(loc=2) plt.subplot(212) plt.loglog(Om_list/np.sqrt(effective_gamma), g20, 'k', linestyle='dashed', label='JC') plt.loglog(Om_list/np.sqrt(effective_gamma), g20_int, 'blueviolet', label='JC w/ interference') plt.ylim(lim) plt.xlabel(r'Driving strength [$\Gamma_\mathrm{eff}$]') plt.ylabel(r'$g^{(2)}(0)$') plt.legend(loc=4);
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Second-order coherence with delay We additionally consider the second-order coherence as a function of time delay, i.e. $$g^{(2)}(\tau)=\lim_{t\rightarrow\infty}\frac{\langle b^\dagger(t)b^\dagger(t+\tau)b(t+\tau)b(t)\rangle}{\langle b^\dagger(t)b(t)\rangle^2},$$ and show how it is calculated in the context of homodyne interference.
# first calculate the steady state H = Om * (a + a.dag()) + g * (sm.dag() * a + sm * a.dag()) + \ delta_s * (sm.dag() * sm + a.dag() * a) - \ detuning * sm.dag() * sm rho0 = steadystate(H, c_op) taulist = np.linspace(0, 5/effective_gamma, 1000) # next evolve the states according the quantum regression theorem # ...with the b operator corr_vec_int = expect( (a.dag() + alpha.conjugate()) * (a + alpha), mesolve( H, (a + alpha) * rho0 * (a.dag() + alpha.conjugate()), taulist, c_op, [], options=Options(atol=1e-13, rtol=1e-11) ).states ) n_int = expect(rho0, (a.dag() + alpha.conjugate()) * (a + alpha)) # ...with the a operator corr_vec = expect( a.dag() * a , mesolve( H, a * rho0 * a.dag(), taulist, c_op, [], options=Options(atol=1e-12, rtol=1e-10) ).states ) n = expect(rho0, a.dag() * a) # ...perform the same for the TLS comparison H_TLS = Om*(sm_TLS + sm_TLS.dag())*np.sqrt(effective_gamma) c_ops_TLS = [sm_TLS*np.sqrt(effective_gamma)] rho0_TLS = steadystate(H_TLS, c_ops_TLS) corr_vec_TLS = expect( sm_TLS.dag() * sm_TLS, mesolve( H_TLS, sm_TLS * rho0_TLS * sm_TLS.dag(), taulist, c_ops_TLS, [] ).states ) n_TLS = expect(rho0_TLS, sm_TLS.dag() * sm_TLS)
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
Visualize the comparison to TLS correlations At a moderate driving strength, the JC correlation (dashed black line) is seen to significantly deviate from that of the TLS (dotted purple line). On the other hand, after the optimal homodyne inteference, the emission correlations (solid purple line) match the ideal correlations very well.
plt.figure(figsize=(8,5)) l1, = plt.plot(taulist*effective_gamma, corr_vec_TLS/n_TLS**2, 'blueviolet', linestyle='dotted', label='TLS') plt.plot(taulist*effective_gamma, corr_vec/n**2, 'k', linestyle='dashed', label='JC') plt.plot(taulist*effective_gamma, corr_vec_int/n_int**2, 'blueviolet', label='JC w/ interference') plt.xlabel('$\\tau$ [$1/\Gamma_\mathrm{eff}$]') plt.ylabel('$g^{(2)}(\\tau)$') plt.legend(loc=2);
examples/homodyned-Jaynes-Cummings-emission.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
<div class="span5 alert alert-info"> <h3>Exercise Set I</h3> <br/> <b>Exercise/Answers:</b> <br/> <li> Look at the histogram above. Tell a story about the average ratings per critic. <b> The average fresh ratings per critic is around 0.6 with a minimum ratings of 0.35 and max of 0.81 </b> <li> What shape does the distribution look like? <b> The shape looks like a normal distribution or bell shape </b> <li> What is interesting about the distribution? What might explain these interesting things? <b> </b> </div> The Vector Space Model and a Search Engine All the diagrams here are snipped from Introduction to Information Retrieval by Manning et. al. which is a great resource on text processing. For additional information on text mining and natural language processing, see Foundations of Statistical Natural Language Processing by Manning and Schutze. Also check out Python packages nltk, spaCy, pattern, and their associated resources. Also see word2vec. Let us define the vector derived from document $d$ by $\bar V(d)$. What does this mean? Each document is treated as a vector containing information about the words contained in it. Each vector has the same length and each entry "slot" in the vector contains some kind of data about the words that appear in the document such as presence/absence (1/0), count (an integer) or some other statistic. Each vector has the same length because each document shared the same vocabulary across the full collection of documents -- this collection is called a corpus. To define the vocabulary, we take a union of all words we have seen in all documents. We then just associate an array index with them. So "hello" may be at index 5 and "world" at index 99. Suppose we have the following corpus: A Fox one day spied a beautiful bunch of ripe grapes hanging from a vine trained along the branches of a tree. The grapes seemed ready to burst with juice, and the Fox's mouth watered as he gazed longingly at them. Suppose we treat each sentence as a document $d$. The vocabulary (often called the lexicon) is the following: $V = \left{\right.$ a, along, and, as, at, beautiful, branches, bunch, burst, day, fox, fox's, from, gazed, grapes, hanging, he, juice, longingly, mouth, of, one, ready, ripe, seemed, spied, the, them, to, trained, tree, vine, watered, with$\left.\right}$ Then the document A Fox one day spied a beautiful bunch of ripe grapes hanging from a vine trained along the branches of a tree may be represented as the following sparse vector of word counts: $$\bar V(d) = \left( 4,1,0,0,0,1,1,1,0,1,1,0,1,0,1,1,0,0,0,0,2,1,0,1,0,0,1,0,0,0,1,1,0,0 \right)$$ or more succinctly as [(0, 4), (1, 1), (5, 1), (6, 1), (7, 1), (9, 1), (10, 1), (12, 1), (14, 1), (15, 1), (20, 2), (21, 1), (23, 1), (26, 1), (30, 1), (31, 1)] along with a dictionary { 0: a, 1: along, 5: beautiful, 6: branches, 7: bunch, 9: day, 10: fox, 12: from, 14: grapes, 15: hanging, 19: mouth, 20: of, 21: one, 23: ripe, 24: seemed, 25: spied, 26: the, 30: tree, 31: vine, } Then, a set of documents becomes, in the usual sklearn style, a sparse matrix with rows being sparse arrays representing documents and columns representing the features/words in the vocabulary. Notice that this representation loses the relative ordering of the terms in the document. That is "cat ate rat" and "rat ate cat" are the same. Thus, this representation is also known as the Bag-Of-Words representation. Here is another example, from the book quoted above, although the matrix is transposed here so that documents are columns: Such a matrix is also catted a Term-Document Matrix. Here, the terms being indexed could be stemmed before indexing; for instance, jealous and jealousy after stemming are the same feature. One could also make use of other "Natural Language Processing" transformations in constructing the vocabulary. We could use Lemmatization, which reduces words to lemmas: work, working, worked would all reduce to work. We could remove "stopwords" from our vocabulary, such as common words like "the". We could look for particular parts of speech, such as adjectives. This is often done in Sentiment Analysis. And so on. It all depends on our application. From the book: The standard way of quantifying the similarity between two documents $d_1$ and $d_2$ is to compute the cosine similarity of their vector representations $\bar V(d_1)$ and $\bar V(d_2)$: $$S_{12} = \frac{\bar V(d_1) \cdot \bar V(d_2)}{|\bar V(d_1)| \times |\bar V(d_2)|}$$ There is a far more compelling reason to represent documents as vectors: we can also view a query as a vector. Consider the query q = jealous gossip. This query turns into the unit vector $\bar V(q)$ = (0, 0.707, 0.707) on the three coordinates below. The key idea now: to assign to each document d a score equal to the dot product: $$\bar V(q) \cdot \bar V(d)$$ Then we can use this simple Vector Model as a Search engine. In Code
from sklearn.feature_extraction.text import CountVectorizer text = ['Hop on pop', 'Hop off pop', 'Hop Hop hop'] print("Original text is\n{}".format('\n'.join(text))) vectorizer = CountVectorizer(min_df=0) # call `fit` to build the vocabulary vectorizer.fit(text) # call `transform` to convert text to a bag of words x = vectorizer.transform(text) # CountVectorizer uses a sparse array to save memory, but it's easier in this assignment to # convert back to a "normal" numpy array x = x.toarray() print("") print("Transformed text vector is \n{}".format(x)) # `get_feature_names` tracks which word is associated with each column of the transformed x print("") print("Words for each feature:") print(vectorizer.get_feature_names()) # Notice that the bag of words treatment doesn't preserve information about the *order* of words, # just their frequency def make_xy(critics, vectorizer=None): #Your code here if vectorizer is None: vectorizer = CountVectorizer() X = vectorizer.fit_transform(critics.quote) X = X.tocsc() # some versions of sklearn return COO format y = (critics.fresh == 'fresh').values.astype(np.int) return X, y X, y = make_xy(critics)
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
Naive Bayes From Bayes' Theorem, we have that $$P(c \vert f) = \frac{P(c \cap f)}{P(f)}$$ where $c$ represents a class or category, and $f$ represents a feature vector, such as $\bar V(d)$ as above. We are computing the probability that a document (or whatever we are classifying) belongs to category c given the features in the document. $P(f)$ is really just a normalization constant, so the literature usually writes Bayes' Theorem in context of Naive Bayes as $$P(c \vert f) \propto P(f \vert c) P(c) $$ $P(c)$ is called the prior and is simply the probability of seeing class $c$. But what is $P(f \vert c)$? This is the probability that we see feature set $f$ given that this document is actually in class $c$. This is called the likelihood and comes from the data. One of the major assumptions of the Naive Bayes model is that the features are conditionally independent given the class. While the presence of a particular discriminative word may uniquely identify the document as being part of class $c$ and thus violate general feature independence, conditional independence means that the presence of that term is independent of all the other words that appear within that class. This is a very important distinction. Recall that if two events are independent, then: $$P(A \cap B) = P(A) \cdot P(B)$$ Thus, conditional independence implies $$P(f \vert c) = \prod_i P(f_i | c) $$ where $f_i$ is an individual feature (a word in this example). To make a classification, we then choose the class $c$ such that $P(c \vert f)$ is maximal. There is a small caveat when computing these probabilities. For floating point underflow we change the product into a sum by going into log space. This is called the LogSumExp trick. So: $$\log P(f \vert c) = \sum_i \log P(f_i \vert c) $$ There is another caveat. What if we see a term that didn't exist in the training data? This means that $P(f_i \vert c) = 0$ for that term, and thus $P(f \vert c) = \prod_i P(f_i | c) = 0$, which doesn't help us at all. Instead of using zeros, we add a small negligible value called $\alpha$ to each count. This is called Laplace Smoothing. $$P(f_i \vert c) = \frac{N_{ic}+\alpha}{N_c + \alpha N_i}$$ where $N_{ic}$ is the number of times feature $i$ was seen in class $c$, $N_c$ is the number of times class $c$ was seen and $N_i$ is the number of times feature $i$ was seen globally. $\alpha$ is sometimes called a regularization parameter. Multinomial Naive Bayes and Other Likelihood Functions Since we are modeling word counts, we are using variation of Naive Bayes called Multinomial Naive Bayes. This is because the likelihood function actually takes the form of the multinomial distribution. $$P(f \vert c) = \frac{\left( \sum_i f_i \right)!}{\prod_i f_i!} \prod_{f_i} P(f_i \vert c)^{f_i} \propto \prod_{i} P(f_i \vert c)$$ where the nasty term out front is absorbed as a normalization constant such that probabilities sum to 1. There are many other variations of Naive Bayes, all which depend on what type of value $f_i$ takes. If $f_i$ is continuous, we may be able to use Gaussian Naive Bayes. First compute the mean and variance for each class $c$. Then the likelihood, $P(f \vert c)$ is given as follows $$P(f_i = v \vert c) = \frac{1}{\sqrt{2\pi \sigma^2_c}} e^{- \frac{\left( v - \mu_c \right)^2}{2 \sigma^2_c}}$$ <div class="span5 alert alert-info"> <h3>Exercise Set II</h3> <p><b>Exercise:</b> Implement a simple Naive Bayes classifier:</p> <ol> <li> split the data set into a training and test set <li> Use `scikit-learn`'s `MultinomialNB()` classifier with default parameters. <li> train the classifier over the training set and test on the test set <li> print the accuracy scores for both the training and the test sets </ol> What do you notice? Is this a good classifier? If not, why not? <b>Noticed that the accuracy on test set is 100%. The model perfectly predicted if the movie will be rated as fresh based on the reviews and this is a very good classifier </b> </div>
# your turn # split the data set into a training and test set from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=5) clf = MultinomialNB() clf.fit(X_train, y_train) print('accuracy score on training set: ', clf.score(X_train, y_train)) print('accuracy score on test set: ', clf.score(X_test, clf.predict(X_test))) print('Noticed that the accuracy on test set is 100%.') print('The model perfectly predicted if the movie will be rated as fresh based on the reviews') print('This is a very good classifier')
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
Picking Hyperparameters for Naive Bayes and Text Maintenance We need to know what value to use for $\alpha$, and we also need to know which words to include in the vocabulary. As mentioned earlier, some words are obvious stopwords. Other words appear so infrequently that they serve as noise, and other words in addition to stopwords appear so frequently that they may also serve as noise. First, let's find an appropriate value for min_df for the CountVectorizer. min_df can be either an integer or a float/decimal. If it is an integer, min_df represents the minimum number of documents a word must appear in for it to be included in the vocabulary. If it is a float, it represents the minimum percentage of documents a word must appear in to be included in the vocabulary. From the documentation: min_df: When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. <div class="span5 alert alert-info"> <h3>Exercise Set III</h3> <p><b>ANSWERS:</b> Construct the cumulative distribution of document frequencies (df). The $x$-axis is a document count $x_i$ and the $y$-axis is the percentage of words that appear less than $x_i$ times. For example, at $x=5$, plot a point representing the percentage or number of words that appear in 5 or fewer documents.</p> <b> Done, please see below cell </b> <p><b>Exercise:</b> Look for the point at which the curve begins climbing steeply. This may be a good value for `min_df`. If we were interested in also picking `max_df`, we would likely pick the value where the curve starts to plateau. What value did you choose?</p> <b>The curve climbing steeply at 1 and starts to plateau at 60. min_df=1 while max_df=60</b> </div>
# Your turn. # contruct the frequency of words vectorizer = CountVectorizer(stop_words='english') X = vectorizer.fit_transform(critics.quote) word_freq_df = pd.DataFrame({'term': vectorizer.get_feature_names(), 'occurrences':np.asarray(X.sum(axis=0)).ravel().tolist()}) word_freq_df['frequency'] = word_freq_df['occurrences']/np.sum(word_freq_df['occurrences']) word_freq_sorted=word_freq_df.sort_values('occurrences', ascending = False) word_freq_sorted.reset_index(drop=True, inplace=True) sum_words = len(word_freq_sorted) # create the cum frequency distribution saved_cnt=0 df=[] for i in range(1, 100): prev_cnt = len(word_freq_sorted[word_freq_sorted['occurrences']==i]) saved_cnt += prev_cnt if i==1: df=pd.DataFrame([[i, prev_cnt, prev_cnt, prev_cnt/sum_words]], columns=['x', 'freq','cumfreq', 'percent']) else: df2=pd.DataFrame([[i, prev_cnt, saved_cnt, saved_cnt/sum_words]], columns=['x', 'freq','cumfreq', 'percent']) df = df.append(df2, ignore_index=True) # create the bar grapp plt.bar(df.x, df.percent, align='center', alpha=0.5) plt.xticks(range(0,101,10)) plt.ylabel('Percentage of words that appears less than x') plt.xlabel('Document count of words (x)') plt.title('Cumulative percent distribution of words that appears in the reviews') plt.show()
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
<div class="span5 alert alert-info"> <h3>Exercise Set IV</h3> <p><b>Exercise:</b> What does using the function `log_likelihood` as the score mean? What are we trying to optimize for?</p> <b> ANSWER: The function log_likelihood is the logarithmic value of the probability </b> <p><b>Exercise:</b> Without writing any code, what do you think would happen if you choose a value of $\alpha$ that is too high?</p> <b>ANSWER: A large value of alpha will overfit the model </b> <p><b>Exercise:</b> Using the skeleton code below, find the best values of the parameter `alpha`, and use the value of `min_df` you chose in the previous exercise set. Use the `cv_score` function above with the `log_likelihood` function for scoring.</p> <b/> ANSWER: the best `alpha` is equal to 1 </div>
from sklearn.naive_bayes import MultinomialNB #the grid of parameters to search over alphas = [.1, 1, 5, 10, 50] best_min_df = 1 # YOUR TURN: put your value of min_df here. #Find the best value for alpha and min_df, and the best classifier best_alpha = None maxscore=-np.inf for alpha in alphas: vectorizer = CountVectorizer(min_df=best_min_df) Xthis, ythis = make_xy(critics, vectorizer) Xtrainthis = Xthis[mask] ytrainthis = ythis[mask] # your turn clf = MultinomialNB(alpha) clf.fit(Xtrainthis, ytrainthis) score = cv_score(clf, Xtrainthis, ytrainthis, log_likelihood) if (best_alpha is None) or (score > best_score): print('cv_score for ', alpha, score ) best_score = score best_alpha = alpha print("alpha: {}".format(best_alpha))
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
<div class="span5 alert alert-info"> <h3>Exercise Set V: Working with the Best Parameters</h3> <p><b>Exercise:</b> Using the best value of `alpha` you just found, calculate the accuracy on the training and test sets. Is this classifier better? Why (not)?</p> <b/> ANSWER: Yes, it is a better classifier since it improves the accuracy on test data from 72 (`alpha`= .1) to 74 percent (`alpha` = 1) </div>
vectorizer = CountVectorizer(min_df=best_min_df) X, y = make_xy(critics, vectorizer) xtrain=X[mask] ytrain=y[mask] xtest=X[~mask] ytest=y[~mask] clf = MultinomialNB(alpha=best_alpha).fit(xtrain, ytrain) #your turn. Print the accuracy on the test and training dataset training_accuracy = clf.score(xtrain, ytrain) test_accuracy = clf.score(xtest, ytest) print("Accuracy on training data: {:2f}".format(training_accuracy)) print("Accuracy on test data: {:2f}".format(test_accuracy)) from sklearn.metrics import confusion_matrix print(confusion_matrix(ytest, clf.predict(xtest))) print(xtest.shape)
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
Interpretation What are the strongly predictive features? We use a neat trick to identify strongly predictive features (i.e. words). first, create a data set such that each row has exactly one feature. This is represented by the identity matrix. use the trained classifier to make predictions on this matrix sort the rows by predicted probabilities, and pick the top and bottom $K$ rows
words = np.array(vectorizer.get_feature_names()) x = np.matrix(np.identity(xtest.shape[1]), copy=False) probs = clf.predict_log_proba(x)[:, 0] ind = np.argsort(probs) good_words = words[ind[:10]] bad_words = words[ind[-10:]] good_prob = probs[ind[:10]] bad_prob = probs[ind[-10:]] print("Good words\t P(fresh | word)") for w, p in list(zip(good_words, good_prob)): print("{:>20}".format(w), "{:.2f}".format(1 - np.exp(p))) print("Bad words\t P(fresh | word)") for w, p in list(zip(bad_words, bad_prob)): print("{:>20}".format(w), "{:.2f}".format(1 - np.exp(p)))
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
<br/> <b>good words P(fresh | word) </b> <br/> touching 0.96 <br/> delight 0.95 <br/> delightful 0.95 <br/> brilliantly 0.94 <br/> energetic 0.94 <br/> superb 0.94 <br/> ensemble 0.93 <br/> childhood 0.93 <br/> engrossing 0.93 <br/> absorbing 0.93 <br/> <b> Bad words P(fresh | word) </b> <br/> sorry 0.13 <br/> plodding 0.13 <br/> dull 0.11 <br/> bland 0.11 <br/> disappointing 0.10 <br/> forced 0.10 <br/> uninspired 0.08 <br/> pointless 0.07 <br/> unfortunately 0.07 <br/> stupid 0.06 <div class="span5 alert alert-info"> <h3>Exercise Set VI</h3> <p><b>Exercise:</b> Why does this method work? What does the probability for each row in the identity matrix represent</p> </div> The above exercise is an example of feature selection. There are many other feature selection methods. A list of feature selection methods available in sklearn is here. The most common feature selection technique for text mining is the chi-squared $\left( \chi^2 \right)$ method. Prediction Errors We can see mis-predictions as well.
x, y = make_xy(critics, vectorizer) prob = clf.predict_proba(x)[:, 0] predict = clf.predict(x) bad_rotten = np.argsort(prob[y == 0])[:5] bad_fresh = np.argsort(prob[y == 1])[-5:] print("Mis-predicted Rotten quotes") print('---------------------------') for row in bad_rotten: print(critics[y == 0].quote.iloc[row]) print("") print("Mis-predicted Fresh quotes") print('--------------------------') for row in bad_fresh: print(critics[y == 1].quote.iloc[row]) print("")
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
<div class="span5 alert alert-info"> <h3>Exercise Set VII: Predicting the Freshness for a New Review</h3> <br/> <div> <b>Exercise:</b> <ul> <li> Using your best trained classifier, predict the freshness of the following sentence: *'This movie is not remarkable, touching, or superb in any way'* <li> Is the result what you'd expect? Why (not)? <b/> The predicted result is "Fresh" which is not I expect. The word 'Not' is not taken into account thus the analysis mistakenly predicted it as "Fresh" based on the words remarkable, touching and superb which have a high probability of being a good review. The solution is to take the analysis into a bi-gram level which will take pair each words together and come up with an analysis based on consecutive pair of words. This will in effect see that the review is rotten since "not remarkable" will be taken as a negative review. </ul> </div> </div>
#your turn # Predicting the Freshness for a New Review docs_new = ['This movie is not remarkable, touching, or superb in any way'] X_new = vectorizer.transform(docs_new) X_new = X_new.tocsc() str = "Fresh" if clf.predict(X_new) == 1 else "Rotten" print('"', docs_new[0], '"==> ', "", str)
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
<div class="span5 alert alert-info"> <h3>Exercise Set VIII: Enrichment</h3> <p> There are several additional things we could try. Try some of these as exercises: <ol> <li> Build a Naive Bayes model where the features are n-grams instead of words. N-grams are phrases containing n words next to each other: a bigram contains 2 words, a trigram contains 3 words, and 6-gram contains 6 words. This is useful because "not good" and "so good" mean very different things. On the other hand, as n increases, the model does not scale well since the feature set becomes more sparse. <li> Try a model besides Naive Bayes, one that would allow for interactions between words -- for example, a Random Forest classifier. <li> Try adding supplemental features -- information about genre, director, cast, etc. <li> Use word2vec or [Latent Dirichlet Allocation](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation) to group words into topics and use those topics for prediction. <li> Use TF-IDF weighting instead of word counts. </ol> </p> <b>Exercise:</b> Try a few of these ideas to improve the model (or any other ideas of your own). Implement here and report on the result. </div> BIGRAM USING NAIVE BAYES
def print_top_words(model, feature_names, n_top_words): for topic_idx, topic in enumerate(model.components_): print("Topic #%d:" % topic_idx) print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) print() # Your turn def make_xy_bigram(critics, bigram_vectorizer=None): #Your code here if bigram_vectorizer is None: bigram_vectorizer = CountVectorizer(ngram_range=(1, 2),token_pattern=r'\b\w+\b', min_df=1) X = bigram_vectorizer.fit_transform(critics.quote) X = X.tocsc() # some versions of sklearn return COO format y = (critics.fresh == 'fresh').values.astype(np.int) return X, y vectorizer = CountVectorizer(ngram_range=(1, 2), token_pattern=r'\b\w+\b', min_df=1, stop_words='english') X, y = make_xy_bigram(critics, vectorizer) xtrain=X[mask] ytrain=y[mask] xtest=X[~mask] ytest=y[~mask] clf = MultinomialNB(alpha=best_alpha).fit(xtrain, ytrain) #your turn. Print the accuracy on the test and training dataset training_accuracy = clf.score(xtrain, ytrain) test_accuracy = clf.score(xtest, ytest) print("Accuracy on training data: {:2f}".format(training_accuracy)) print("Accuracy on test data: {:2f}".format(test_accuracy))
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
Using bigram from nltk package
import itertools import pandas as pd from nltk.collocations import BigramCollocationFinder from nltk.metrics import BigramAssocMeasures def bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=200): bigram_finder = BigramCollocationFinder.from_words(words) bigrams = bigram_finder.nbest(score_fn, n) return dict([(ngram, True) for ngram in itertools.chain(words, bigrams)]) import collections import nltk.classify.util, nltk.metrics from nltk import precision, recall from nltk.classify import NaiveBayesClassifier from nltk.corpus import movie_reviews pos_review = critics[critics['fresh']=='fresh'] neg_review = critics[critics['fresh']=='rotten'] negfeats = [(bigram_word_feats(row['quote'].split()),'neg') for index, row in neg_review.iterrows()] posfeats = [(bigram_word_feats(row['quote'].split()),'pos') for index, row in pos_review.iterrows()] negcutoff = int(len(negfeats)*.7) poscutoff = int(len(posfeats)*.7) trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff] testfeats = negfeats[negcutoff:] + posfeats[poscutoff:] classifier = NaiveBayesClassifier.train(trainfeats) refsets = collections.defaultdict(set) testsets = collections.defaultdict(set) for i, (feats, label) in enumerate(testfeats): refsets[label].add(i) observed = classifier.classify(feats) testsets[observed].add(i) classifier.show_most_informative_features()
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
Using RANDOM FOREST classifier instead of Naive Bayes
from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0) scores = cross_val_score(clf, X, y) scores.mean()
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
Try adding supplemental features -- information about genre, director, cast, etc.
# Create a random forest classifier. By convention, clf means 'classifier' #clf = RandomForestClassifier(n_jobs=2) # Train the classifier to take the training features and learn how they relate # to the training y (the species) #clf.fit(train[features], y) critics.head()
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
Use word2vec or Latent Dirichlet Allocation to group words into topics and use those topics for prediction.
from sklearn.decomposition import NMF, LatentDirichletAllocation vectorizer = CountVectorizer(min_df=best_min_df) X, y = make_xy(critics, vectorizer) xtrain=X[mask] ytrain=y[mask] xtest=X[~mask] ytest=y[~mask] lda = LatentDirichletAllocation(n_topics=10, max_iter=5, learning_method='online', learning_offset=50., random_state=0) lda.fit(X) print("\nTopics in LDA model:") feature_names = vectorizer.get_feature_names() print_top_words(lda, feature_names, n_top_words=20)
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
Use TF-IDF weighting instead of word counts.
# http://scikit-learn.org/dev/modules/feature_extraction.html#text-feature-extraction # http://scikit-learn.org/dev/modules/classes.html#text-feature-extraction-ref from sklearn.feature_extraction.text import TfidfVectorizer tfidfvectorizer = TfidfVectorizer(min_df=1, stop_words='english') Xtfidf=tfidfvectorizer.fit_transform(critics.quote) X = Xtfidf.tocsc() # some versions of sklearn return COO format y = (critics.fresh == 'fresh').values.astype(np.int) xtrain=X[mask] ytrain=y[mask] xtest=X[~mask] ytest=y[~mask] clf = MultinomialNB(alpha=best_alpha).fit(xtrain, ytrain) #your turn. Print the accuracy on the test and training dataset training_accuracy = clf.score(xtrain, ytrain) test_accuracy = clf.score(xtest, ytest) print("Accuracy on training data: {:2f}".format(training_accuracy)) print("Accuracy on test data: {:2f}".format(test_accuracy))
Mini_Project_Naive_Bayes.ipynb
anonyXmous/CapstoneProject
unlicense
Set parameters
data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) events = mne.read_events(event_fname) # Add a bad channel raw.info['bads'] += ['MEG 2443'] # Pick MEG gradiometers picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True, exclude='bads') # Create epochs for the visual condition event_id, tmin, tmax = 3, -0.2, 1.5 # need a long enough epoch for 5 cycles epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6)) # Compute connectivity for band containing the evoked response. # We exclude the baseline period fmin, fmax = 3., 9. sfreq = raw.info['sfreq'] # the sampling frequency tmin = 0.0 # exclude the baseline period con, freqs, times, n_epochs, n_tapers = spectral_connectivity( epochs, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax, faverage=True, tmin=tmin, mt_adaptive=False, n_jobs=1) # the epochs contain an EOG channel, which we remove now ch_names = epochs.ch_names idx = [ch_names.index(name) for name in ch_names if name.startswith('MEG')] con = con[idx][:, idx] # con is a 3D array where the last dimension is size one since we averaged # over frequencies in a single band. Here we make it 2D con = con[:, :, 0] # Now, visualize the connectivity in 3D from mayavi import mlab # noqa mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5)) # Plot the sensor locations sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx] sens_loc = np.array(sens_loc) pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2], color=(1, 1, 1), opacity=1, scale_factor=0.005) # Get the strongest connections n_con = 20 # show up to 20 connections min_dist = 0.05 # exclude sensors that are less than 5cm apart threshold = np.sort(con, axis=None)[-n_con] ii, jj = np.where(con >= threshold) # Remove close connections con_nodes = list() con_val = list() for i, j in zip(ii, jj): if linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist: con_nodes.append((i, j)) con_val.append(con[i, j]) con_val = np.array(con_val) # Show the connections as tubes between sensors vmax = np.max(con_val) vmin = np.min(con_val) for val, nodes in zip(con_val, con_nodes): x1, y1, z1 = sens_loc[nodes[0]] x2, y2, z2 = sens_loc[nodes[1]] points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val], vmin=vmin, vmax=vmax, tube_radius=0.001, colormap='RdBu') points.module_manager.scalar_lut_manager.reverse_lut = True mlab.scalarbar(title='Phase Lag Index (PLI)', nb_labels=4) # Add the sensor names for the connections shown nodes_shown = list(set([n[0] for n in con_nodes] + [n[1] for n in con_nodes])) for node in nodes_shown: x, y, z = sens_loc[node] mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=0.005, color=(0, 0, 0)) view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2])) mlab.view(*view)
0.16/_downloads/plot_sensor_connectivity.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Imports
import tensorflow_hub as hub import joblib import gzip import kipoiseq from kipoiseq import Interval import pyfaidx import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns %matplotlib inline %config InlineBackend.figure_format = 'retina' transform_path = 'gs://dm-enformer/models/enformer.finetuned.SAD.robustscaler-PCA500-robustscaler.transform.pkl' model_path = 'https://tfhub.dev/deepmind/enformer/1' fasta_file = '/root/data/genome.fa' clinvar_vcf = '/root/data/clinvar.vcf.gz' # Download targets from Basenji2 dataset # Cite: Kelley et al Cross-species regulatory sequence activity prediction. PLoS Comput. Biol. 16, e1008050 (2020). targets_txt = 'https://raw.githubusercontent.com/calico/basenji/master/manuscripts/cross2020/targets_human.txt' df_targets = pd.read_csv(targets_txt, sep='\t') df_targets.head(3)
enformer/enformer-usage.ipynb
deepmind/deepmind-research
apache-2.0
Download files Download and index the reference genome fasta file Credit to Genome Reference Consortium: https://www.ncbi.nlm.nih.gov/grc Schneider et al 2017 http://dx.doi.org/10.1101/gr.213611.116: Evaluation of GRCh38 and de novo haploid genome assemblies demonstrates the enduring quality of the reference assembly
!mkdir -p /root/data !wget -O - http://hgdownload.cse.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz | gunzip -c > {fasta_file} pyfaidx.Faidx(fasta_file) !ls /root/data
enformer/enformer-usage.ipynb
deepmind/deepmind-research
apache-2.0
Download the clinvar file. Reference: Landrum MJ, Lee JM, Benson M, Brown GR, Chao C, Chitipiralla S, Gu B, Hart J, Hoffman D, Jang W, Karapetyan K, Katz K, Liu C, Maddipatla Z, Malheiro A, McDaniel K, Ovetsky M, Riley G, Zhou G, Holmes JB, Kattman BL, Maglott DR. ClinVar: improving access to variant interpretations and supporting evidence. Nucleic Acids Res . 2018 Jan 4. PubMed PMID: 29165669 .
!wget https://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh38/clinvar.vcf.gz -O /root/data/clinvar.vcf.gz
enformer/enformer-usage.ipynb
deepmind/deepmind-research
apache-2.0
Code (double click on the title to show the code)
# @title `Enformer`, `EnformerScoreVariantsNormalized`, `EnformerScoreVariantsPCANormalized`, SEQUENCE_LENGTH = 393216 class Enformer: def __init__(self, tfhub_url): self._model = hub.load(tfhub_url).model def predict_on_batch(self, inputs): predictions = self._model.predict_on_batch(inputs) return {k: v.numpy() for k, v in predictions.items()} @tf.function def contribution_input_grad(self, input_sequence, target_mask, output_head='human'): input_sequence = input_sequence[tf.newaxis] target_mask_mass = tf.reduce_sum(target_mask) with tf.GradientTape() as tape: tape.watch(input_sequence) prediction = tf.reduce_sum( target_mask[tf.newaxis] * self._model.predict_on_batch(input_sequence)[output_head]) / target_mask_mass input_grad = tape.gradient(prediction, input_sequence) * input_sequence input_grad = tf.squeeze(input_grad, axis=0) return tf.reduce_sum(input_grad, axis=-1) class EnformerScoreVariantsRaw: def __init__(self, tfhub_url, organism='human'): self._model = Enformer(tfhub_url) self._organism = organism def predict_on_batch(self, inputs): ref_prediction = self._model.predict_on_batch(inputs['ref'])[self._organism] alt_prediction = self._model.predict_on_batch(inputs['alt'])[self._organism] return alt_prediction.mean(axis=1) - ref_prediction.mean(axis=1) class EnformerScoreVariantsNormalized: def __init__(self, tfhub_url, transform_pkl_path, organism='human'): assert organism == 'human', 'Transforms only compatible with organism=human' self._model = EnformerScoreVariantsRaw(tfhub_url, organism) with tf.io.gfile.GFile(transform_pkl_path, 'rb') as f: transform_pipeline = joblib.load(f) self._transform = transform_pipeline.steps[0][1] # StandardScaler. def predict_on_batch(self, inputs): scores = self._model.predict_on_batch(inputs) return self._transform.transform(scores) class EnformerScoreVariantsPCANormalized: def __init__(self, tfhub_url, transform_pkl_path, organism='human', num_top_features=500): self._model = EnformerScoreVariantsRaw(tfhub_url, organism) with tf.io.gfile.GFile(transform_pkl_path, 'rb') as f: self._transform = joblib.load(f) self._num_top_features = num_top_features def predict_on_batch(self, inputs): scores = self._model.predict_on_batch(inputs) return self._transform.transform(scores)[:, :self._num_top_features] # TODO(avsec): Add feature description: Either PCX, or full names. # @title `variant_centered_sequences` class FastaStringExtractor: def __init__(self, fasta_file): self.fasta = pyfaidx.Fasta(fasta_file) self._chromosome_sizes = {k: len(v) for k, v in self.fasta.items()} def extract(self, interval: Interval, **kwargs) -> str: # Truncate interval if it extends beyond the chromosome lengths. chromosome_length = self._chromosome_sizes[interval.chrom] trimmed_interval = Interval(interval.chrom, max(interval.start, 0), min(interval.end, chromosome_length), ) # pyfaidx wants a 1-based interval sequence = str(self.fasta.get_seq(trimmed_interval.chrom, trimmed_interval.start + 1, trimmed_interval.stop).seq).upper() # Fill truncated values with N's. pad_upstream = 'N' * max(-interval.start, 0) pad_downstream = 'N' * max(interval.end - chromosome_length, 0) return pad_upstream + sequence + pad_downstream def close(self): return self.fasta.close() def variant_generator(vcf_file, gzipped=False): """Yields a kipoiseq.dataclasses.Variant for each row in VCF file.""" def _open(file): return gzip.open(vcf_file, 'rt') if gzipped else open(vcf_file) with _open(vcf_file) as f: for line in f: if line.startswith('#'): continue chrom, pos, id, ref, alt_list = line.split('\t')[:5] # Split ALT alleles and return individual variants as output. for alt in alt_list.split(','): yield kipoiseq.dataclasses.Variant(chrom=chrom, pos=pos, ref=ref, alt=alt, id=id) def one_hot_encode(sequence): return kipoiseq.transforms.functional.one_hot_dna(sequence).astype(np.float32) def variant_centered_sequences(vcf_file, sequence_length, gzipped=False, chr_prefix=''): seq_extractor = kipoiseq.extractors.VariantSeqExtractor( reference_sequence=FastaStringExtractor(fasta_file)) for variant in variant_generator(vcf_file, gzipped=gzipped): interval = Interval(chr_prefix + variant.chrom, variant.pos, variant.pos) interval = interval.resize(sequence_length) center = interval.center() - interval.start reference = seq_extractor.extract(interval, [], anchor=center) alternate = seq_extractor.extract(interval, [variant], anchor=center) yield {'inputs': {'ref': one_hot_encode(reference), 'alt': one_hot_encode(alternate)}, 'metadata': {'chrom': chr_prefix + variant.chrom, 'pos': variant.pos, 'id': variant.id, 'ref': variant.ref, 'alt': variant.alt}} # @title `plot_tracks` def plot_tracks(tracks, interval, height=1.5): fig, axes = plt.subplots(len(tracks), 1, figsize=(20, height * len(tracks)), sharex=True) for ax, (title, y) in zip(axes, tracks.items()): ax.fill_between(np.linspace(interval.start, interval.end, num=len(y)), y) ax.set_title(title) sns.despine(top=True, right=True, bottom=True) ax.set_xlabel(str(interval)) plt.tight_layout()
enformer/enformer-usage.ipynb
deepmind/deepmind-research
apache-2.0