markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
Experiment parameters Total experiment lengthIn years, supports decimals | w["total_experiment_length_years"] = widgets.BoundedFloatText(
value=7,
min=0,
max=15,
step=0.1,
description='Years:',
disabled=False
)
display(w["total_experiment_length_years"])
w["observing_efficiency"] = widgets.BoundedFloatText(
value=0.2,
min=0,
max=1,
step=0.01,
description='Efficiency:',
disabled=False
) | _____no_output_____ | Apache-2.0 | 06_ui.ipynb | CMB-S4/s4_design_sim_tool |
Observing efficiencyTypically 20%, use decimal notation | display(w["observing_efficiency"])
w["number_of_splits"] = widgets.BoundedIntText(
value=1,
min=1,
max=7,
step=1,
description='Splits:',
disabled=False
) | _____no_output_____ | Apache-2.0 | 06_ui.ipynb | CMB-S4/s4_design_sim_tool |
Number of splitsNumber of splits, 1 generates only full mission2-7 generates the full mission map and then the requested numberof splits scaled accordingly. E.g. 7 generates the full missionmap and 7 equal (yearly) maps | display(w["number_of_splits"]) | _____no_output_____ | Apache-2.0 | 06_ui.ipynb | CMB-S4/s4_design_sim_tool |
Telescope configurationCurrently we constraint to have a total of 6 SAT and 3 LAT,each SAT has a maximum of 3 tubes, each LAT of 19.The checkbox on the right of each telescope checks that the amount of number of tubes is correct. | import toml
config = toml.load("s4_design.toml")
def define_check_sum(telescope_widgets, max_tubes):
def check_sum(_):
total_tubes = sum([w.value for w in telescope_widgets[1:1+4]])
telescope_widgets[0].value = total_tubes == max_tubes
return check_sum
telescopes = {"SAT":{}, "LAT":{}}
for telescope, tubes in config["telescopes"]["SAT"].items():
telescopes["SAT"][telescope] = [widgets.Valid(
value=True, description=telescope, layout=widgets.Layout(width='120px')
)]
telescope_check = define_check_sum(telescopes["SAT"][telescope], 3)
for k,v in tubes.items():
if k == "site":
wid = widgets.Dropdown(
options=['Pole', 'Chile'],
value=v,
description=k,
disabled=False, layout=widgets.Layout(width='150px')
)
elif k == "years":
wid = widgets.BoundedFloatText(
value=v,
min=0,
max=20,
step=0.1,
description='years',
disabled=False, layout=widgets.Layout(width='130px')
)
else:
wid = widgets.BoundedIntText(
value=v,
min=0,
max=3,
step=1,
description=k,
disabled=False, layout=widgets.Layout(width='130px')
)
wid.observe(telescope_check)
telescopes["SAT"][telescope].append(wid)
for k, v in telescopes["SAT"].items():
display(widgets.HBox(v))
for telescope, tubes in config["telescopes"]["LAT"].items():
telescopes["LAT"][telescope] = [widgets.Valid(
value=True, description=telescope, layout=widgets.Layout(width='120px')
)]
telescope_check = define_check_sum(telescopes["LAT"][telescope], 19)
for k,v in tubes.items():
if k == "site":
wid = widgets.Dropdown(
options=['Pole', 'Chile'],
value=v,
description=k,
disabled=False, layout=widgets.Layout(width='150px')
)
elif k == "years":
wid = widgets.BoundedFloatText(
value=v,
min=0,
max=20,
step=0.1,
description='years',
disabled=False, layout=widgets.Layout(width='130px')
)
else:
wid = widgets.BoundedIntText(
value=v,
min=0,
max=19,
step=1,
description=k,
disabled=False, layout=widgets.Layout(width='130px')
)
wid.observe(telescope_check)
telescopes["LAT"][telescope].append(wid)
for k, v in telescopes["LAT"].items():
display(widgets.HBox(v))
import toml
toml_decoder = toml.decoder.TomlDecoder()
toml_encoder = toml.TomlPreserveInlineDictEncoder()
def generate_toml():
output_config = {}
for section in ["sky_emission", "experiment"]:
output_config[section] = {}
for k in config[section]:
output_config[section][k] = w[k].value
output_config["telescopes"] = {"SAT":{}, "LAT":{}}
for t in ["SAT", "LAT"]:
for telescope, tubes in telescopes[t].items():
output_config["telescopes"][t][telescope] = toml_decoder.get_empty_inline_table()
for tube_type in tubes[1:]:
output_config["telescopes"][t][telescope][tube_type.description] = tube_type.value
if tube_type.description == "years":
output_config["telescopes"][t][telescope][tube_type.description] = int(tube_type.value)
return toml.dumps(output_config, encoder=toml_encoder)
from s4_design_sim_tool.cli import md5sum_string, S4RefSimTool
from pathlib import Path | _____no_output_____ | Apache-2.0 | 06_ui.ipynb | CMB-S4/s4_design_sim_tool |
Generate a TOML configuration fileClick on the button to generate the TOML file and display it. | import os
output_location = os.environ.get("S4REFSIMTOOL_OUTPUT_URL", "")
button = widgets.Button(
description='Generate TOML',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check'
)
output_label = widgets.HTML(value="")
output = widgets.Output(layout={'border': '1px solid black'})
display(button, output_label, output)
def on_button_clicked(b):
output.clear_output()
toml_string = generate_toml()
md5sum = md5sum_string(toml_string)
output_path = Path("output") / md5sum
output_label.value = ""
if output_path.exists():
output_label.value = "This exact CMB-S4 configuration has <b>already been executed</b><br />" + \
f"<a href='{output_location}/output/{md5sum}' target='blank'><button class='p-Widget jupyter-widgets jupyter-button widget-button mod-success'>Access the maps </button></a>"
output_label.value += "<p>TOML file preview:</p>"
with output:
print(toml_string)
button.on_click(on_button_clicked)
import ipywidgets as widgets
import logging
class OutputWidgetHandler(logging.Handler):
""" Custom logging handler sending logs to an output widget """
def __init__(self, *args, **kwargs):
super(OutputWidgetHandler, self).__init__(*args, **kwargs)
layout = {
'width': '100%',
'height': '500px',
'border': '1px solid black'
}
self.out = widgets.Output(layout=layout)
def emit(self, record):
""" Overload of logging.Handler method """
formatted_record = self.format(record)
new_output = {
'name': 'stdout',
'output_type': 'stream',
'text': formatted_record+'\n'
}
self.out.outputs = (new_output, ) + self.out.outputs
def show_logs(self):
""" Show the logs """
display(self.out)
def clear_logs(self):
""" Clear the current logs """
self.out.clear_output()
logger = logging.root
handler = OutputWidgetHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s'))
logger.addHandler(handler)
logger.setLevel(logging.INFO) | _____no_output_____ | Apache-2.0 | 06_ui.ipynb | CMB-S4/s4_design_sim_tool |
Run the simulationGenerate the output maps | #export
def create_wget_script(folder, output_location):
with open(folder / "download_all.sh", "w") as f:
f.write("#!/bin/bash\n")
for o in folder.iterdir():
if not str(o).endswith("sh"):
f.write(f"wget {output_location}/{o}\n")
def run_simulation(toml_filename, md5sum):
output_path = toml_filename.parents[0]
sim = S4RefSimTool(toml_filename, output_folder=output_path)
sim.run(channels="all", sites=["Pole", "Chile"])
logger.info("Create the wget script")
create_wget_script(output_path, output_location)
run_button = widgets.Button(
description='Create the maps',
disabled=False,
button_style='danger', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check'
)
run_output_label = widgets.HTML(value="")
handler.clear_logs()
display(run_button, run_output_label, handler.out)
def on_run_button_clicked(_):
run_button.disabled = True
toml_string = generate_toml()
md5sum = md5sum_string(toml_string)
output_path = Path("output") / md5sum
if output_path.exists():
logger.error("This configuration has already been executed")
run_button.disabled = False
return
output_path.mkdir(parents=True, exist_ok=True)
toml_filename = output_path / "config.toml"
with open(toml_filename, "w") as f:
f.write(toml_string)
run_output_label.value = "<p> The simulation has been launched, see the logs below, access the TOML configuration file and the maps as they are created clicking on the button </p>" + \
f"<a href='{output_location}/output/{md5sum}' target='blank'><button class='p-Widget jupyter-widgets jupyter-button widget-button mod-success'>Access the maps </button></a>"
run_simulation(toml_filename, md5sum)
run_button.disabled = False
run_button.on_click(on_run_button_clicked) | _____no_output_____ | Apache-2.0 | 06_ui.ipynb | CMB-S4/s4_design_sim_tool |
Amazon SageMaker with XGBoost and Hyperparameter Tuning for Direct Marketing predictions _**Supervised Learning with Gradient Boosted Trees: A Binary Prediction Problem With Unbalanced Classes**_------ Contents1. [Objective](Objective)1. [Background](Background)1. [Environment Prepration](Environment-preparation)1. [Data Downloading](Data-downloading-and-exploration)1. [Data Transformation](Data-Transformation)1. [SageMaker: Training](Training)1. [SageMaker: Deploying and evaluating model](Deploying-and-evaluating-model)1. [SageMaker: Hyperparameter Optimization (HPO)](Hyperparameter-Optimization-(HPO))1. [Conclusions](Conclusions)1. [Releasing cloud resources](Releasing-cloud-resources)--- ObjectiveThe goal of this workshop is to serve as a **Minimum Viable Example about SageMaker**, teaching you how to do a **basic ML training** and **Hyper-Parameter Optimization (HPO)** in AWS. Teaching an in-depth Data Science approach is out of the scope of this workshop. We hope that you can use it as a starting point and modify it according to your future projects. --- Background (problem description and approach)- **Direct marketing**: contacting potential new customers via mail, email, phone call etc. - **Challenge**: A) too many potential customers. B) limited resources of the approacher (time, money etc.).- **Problem: Which are the potential customers with the higher chance of becoming actual customers**? (so as to focus the effort only on them). - **Our setting**: A bank who wants to predict *whether a customer will enroll for a term deposit, after one or more phone calls*.- **Our approach**: Build a ML model to do this prediction, from readily available information e.g. demographics, past interactions etc. (features).- **Our tools**: We will be using the **XGBoost** algorithm in AWS **SageMaker**, followed by **Hyperparameter Optimization (HPO)** to produce the best model.--- Environment preparationSageMaker requires some minimal setup at the begining. This setup is standard and you can use it for any of your future projects. Things to specify:- The **S3 bucket** and **prefix** that you want to use for training and model data. **This should be within the same region as SageMaker training**!- The **IAM role** used to give training access to your data. See SageMaker documentation for how to create these. | import numpy as np # For matrix operations and numerical processing
import pandas as pd # For munging tabular data
import time
import os
from util.ml_reporting_tools import generate_classification_report # helper function for classification reports
# setting up SageMaker parameters
import sagemaker
import boto3
sgmk_region = boto3.Session().region_name
sgmk_client = boto3.Session().client("sagemaker")
sgmk_role = sagemaker.get_execution_role()
sgmk_bucket = sagemaker.Session().default_bucket() # a default bucket has been created for this session
sgmk_prefix = "sagemaker/xgboost-hpo"
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
--- Data downloading and explorationLet's start by downloading the [direct marketing dataset](https://archive.ics.uci.edu/ml/datasets/bank+marketing) from UCI's ML Repository. We can run shell commands from Jupyter using the following code: | # (Running shell commands from Jupyter)
!wget -P data/ -N https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip
!unzip -o data/bank-additional.zip -d data/
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
Now lets read this into a Pandas data frame and take a look. | df_data = pd.read_csv("./data/bank-additional/bank-additional-full.csv", sep=";")
df_data.head() # show part of the dataframe
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
_**Specifics on each of the features:**_*Demographics:** `age`: Customer's age (numeric)* `job`: Type of job (categorical: 'admin.', 'services', ...)* `marital`: Marital status (categorical: 'married', 'single', ...)* `education`: Level of education (categorical: 'basic.4y', 'high.school', ...)*Past customer events:** `default`: Has credit in default? (categorical: 'no', 'unknown', ...)* `housing`: Has housing loan? (categorical: 'no', 'yes', ...)* `loan`: Has personal loan? (categorical: 'no', 'yes', ...)*Past direct marketing contacts:** `contact`: Contact communication type (categorical: 'cellular', 'telephone', ...)* `month`: Last contact month of year (categorical: 'may', 'nov', ...)* `day_of_week`: Last contact day of the week (categorical: 'mon', 'fri', ...)* `duration`: Last contact duration, in seconds (numeric). Important note: If duration = 0 then `y` = 'no'. *Campaign information:** `campaign`: Number of contacts performed during this campaign and for this client (numeric, includes last contact)* `pdays`: Number of days that passed by after the client was last contacted from a previous campaign (numeric)* `previous`: Number of contacts performed before this campaign and for this client (numeric)* `poutcome`: Outcome of the previous marketing campaign (categorical: 'nonexistent','success', ...)*External environment factors:** `emp.var.rate`: Employment variation rate - quarterly indicator (numeric)* `cons.price.idx`: Consumer price index - monthly indicator (numeric)* `cons.conf.idx`: Consumer confidence index - monthly indicator (numeric)* `euribor3m`: Euribor 3 month rate - daily indicator (numeric)* `nr.employed`: Number of employees - quarterly indicator (numeric)*Target variable* **(the one we want to eventually predict):*** `y`: Has the client subscribed to a term deposit? (binary: 'yes','no') --- Data TransformationCleaning up data is part of nearly every ML project. Several common steps include:* **Handling missing values**: In our case there are no missing values.* **Handling weird/outlier values**: There are some values in the dataset that may require manipulation.* **Converting categorical to numeric**: There are a lot of categorical variables in our dataset. We need to address this.* **Oddly distributed data**: We will be using XGBoost, which is a non-linear method, and is minimally affected by the data distribution.* **Remove unnecessary data**: There are lots of columns representing general economic features that may not be available during inference time.To summarise, we need to A) address some weird values, B) convert the categorical to numeric valriables and C) Remove unnecessary data: 1. Many records have the value of "999" for `pdays`. It is very likely to be a 'magic' number to represent that *no contact was made before*. Considering that, we will create a new column called "no_previous_contact", then grant it value of "1" when pdays is 999 and "0" otherwise.2. In the `job` column, there are more than one categories for people who don't work e.g., "student", "retired", and "unemployed". It is very likely the decision to enroll or not to a term deposit depends a lot on whether the customer is working or not. A such, we generate a new column to show whether the customer is working based on `job` column.3. We will remove the economic features and `duration` from our data as they would need to be forecasted with high precision to be used as features during inference time.4. We convert categorical variables to numeric using *one hot encoding*. | # Indicator variable to capture when pdays takes a value of 999
df_data["no_previous_contact"] = np.where(df_data["pdays"] == 999, 1, 0)
# Indicator for individuals not actively employed
df_data["not_working"] = np.where(np.in1d(df_data["job"], ["student", "retired", "unemployed"]), 1, 0)
# remove unnecessary data
df_model_data = df_data.drop(
["duration",
"emp.var.rate",
"cons.price.idx",
"cons.conf.idx",
"euribor3m",
"nr.employed"],
axis=1,
)
df_model_data = pd.get_dummies(df_model_data) # Convert categorical variables to sets of indicators
df_model_data.head() # Show part of the new transformed dataframe (which will be used for training)
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
--- TrainingBefore initializing training, there are some things that need to be done:1. Suffle and split dataset. 2. Convert the dataset to the right format the SageMaker algorithm expects (e.g. CSV).3. Copy the dataset to S3 in order to be accessed by SageMaker during training. 4. Create s3_inputs that our training function can use as a pointer to the files in S3.5. Specify the ECR container location for SageMaker's implementation of XGBoost.We will shuffle and split the dataset into **Training (70%)**, **Validation (20%)**, and **Test (10%)**. We will use the Training and Validation splits during the training phase, while the 'holdout' Test split will be used to evaluate the model performance after it is deployed to production. Amazon SageMaker's XGBoost algorithm expects data in the **libSVM** or **CSV** formats. For the CSV format, the following specifications should be met:- The first column must be the target variable.- No headers should be included. | # shuffle and splitting dataset
train_data, validation_data, test_data = np.split(
df_model_data.sample(frac=1, random_state=1729),
[int(0.7 * len(df_model_data)), int(0.9*len(df_model_data))],
)
# create CSV files for Train / Validation / Test
# XGBoost expects a CSV file with no headers, with the 1st row being the ground truth
# We are preparing such a CSV file in the following lines
pd.concat([train_data["y_yes"], train_data.drop(["y_no", "y_yes"], axis=1)], axis=1).to_csv("data/train.csv", index=False, header=False)
pd.concat([validation_data["y_yes"], validation_data.drop(["y_no", "y_yes"], axis=1)], axis=1).to_csv("data/validation.csv", index=False, header=False)
pd.concat([test_data["y_yes"], test_data.drop(["y_no", "y_yes"], axis=1)], axis=1).to_csv("data/test.csv", index=False, header=False)
# copy CSV files to S3 for SageMaker training (training files should reside in S3)
boto3.Session().resource("s3").Bucket(sgmk_bucket).Object(os.path.join(sgmk_prefix, "train.csv")).upload_file("data/train.csv")
boto3.Session().resource("s3").Bucket(sgmk_bucket).Object(os.path.join(sgmk_prefix, "validation.csv")).upload_file("data/validation.csv")
# create s3_inputs channels (objects pointing to the S3 locations)
s3_input_train = sagemaker.s3_input(s3_data="s3://{}/{}/train".format(sgmk_bucket, sgmk_prefix), content_type="csv")
s3_input_validation = sagemaker.s3_input(s3_data="s3://{}/{}/validation".format(sgmk_bucket, sgmk_prefix), content_type="csv")
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
Specify algorithm container image | # specify object of the xgboost container image
from sagemaker.amazon.amazon_estimator import get_image_uri
xgb_container_image = get_image_uri(sgmk_region, "xgboost", repo_version="latest")
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
A small competition: try to predict the best values for 4 hyper-parameters!SageMaker's XGBoost includes 38 parameters. You can find more information about them [here](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html).For simplicity, we choose to experiment only with 6 of them.**Please select values for the 4 hyperparameters (by replacing the "?") based on the provided ranges.** Later we will see which model performed best and compare it with the one from the Hyperparameter Optimization step. | sess = sagemaker.Session() # initiate a SageMaker session
# instantiate an XGBoost estimator object
xgb_estimator = sagemaker.estimator.Estimator(
image_name=xgb_container_image, # XGBoost algorithm container
role=sgmk_role, # IAM role to be used
train_instance_type="ml.m4.xlarge", # type of training instance
train_instance_count=1, # number of instances to be used
output_path="s3://{}/{}/output".format(sgmk_bucket, sgmk_prefix),
sagemaker_session=sess,
train_use_spot_instances=True, # Use spot instances to reduce cost
train_max_run=20*60, # Maximum allowed active runtime
train_max_wait=30*60, # Maximum clock time (including spot delays)
)
# scale_pos_weight is a paramater that controls the relative weights of the classes.
# Because the data set is so highly skewed, we set this parameter according to the ratio (y_no/y_yes)
scale_pos_weight = np.count_nonzero(train_data["y_yes"].values==0) / np.count_nonzero(train_data["y_yes"].values)
# define its hyperparameters
xgb_estimator.set_hyperparameters(
num_round=?, # int: [1,300]
max_depth=?, # int: [1,10]
alpha=?, # float: [0,5]
eta=?, # float: [0,1]
silent=0,
objective="binary:logistic",
scale_pos_weight=scale_pos_weight,
)
xgb_estimator.fit({"train": s3_input_train, "validation": s3_input_validation}, wait=True) # start a training (fitting) job
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
--- Deploying and evaluating model DeploymentNow that we've trained the xgboost algorithm on our data, deploying the model (hosting it behind a real-time endpoint) is just one line of code!*Attention! This may take up to 10 minutes, depending on the AWS instance you select*. | xgb_predictor = xgb_estimator.deploy(initial_instance_count=1, instance_type="ml.m5.large")
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
EvaluationFirst we'll need to determine how we pass data into and receive data from our endpoint. Our data is currently stored as NumPy a array in memory of our notebook instance. To send it in an HTTP POST request, we will serialize it as a CSV string and then decode the resulting CSV. Note: For inference with CSV format, SageMaker XGBoost requires that the data **does NOT include the target variable.** | # Converting strings for HTTP POST requests on inference
from sagemaker.predictor import csv_serializer
def predict_prob(predictor, data):
# predictor settings
predictor.content_type = "text/csv"
predictor.serializer = csv_serializer
return np.fromstring(predictor.predict(data).decode("utf-8"), sep=",") # convert back to numpy
# getting the predicted probabilities
predictions = predict_prob(xgb_predictor, test_data.drop(["y_no", "y_yes"], axis=1).values)
print(predictions)
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
These numbers are the **predicted probabilities** (in the interval [0,1]) of a potential customer enrolling for a term deposit. - 0: the person WILL NOT enroll.- 1: the person WILL enroll (which makes him/her good candidate for direct marketing).Now we will generate a **comprehensive model report**, using the following functions. | generate_classification_report(
y_actual=test_data["y_yes"].values,
y_predict_proba=predictions,
decision_threshold=0.5,
class_names_list=["Did not enroll","Enrolled"],
model_info="XGBoost SageMaker inbuilt"
)
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
--- Hyperparameter Optimization (HPO)*Note, with the default setting below, the hyperparameter tuning job can take up to 30 minutes to complete.*We will use SageMaker HyperParameter Optimization (HPO) to automate the searching process effectively. Specifically, we **specify a range**, or a list of possible values in the case of categorical hyperparameters, for each of the hyperparameter that we plan to tune. We will tune 4 hyperparameters in this example:* **eta**: Step size shrinkage used in updates to prevent overfitting. After each boosting step, you can directly get the weights of new features. The eta parameter actually shrinks the feature weights to make the boosting process more conservative. * **alpha**: L1 regularization term on weights. Increasing this value makes models more conservative. * **min_child_weight**: Minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, the building process gives up further partitioning. In linear regression models, this simply corresponds to a minimum number of instances needed in each node. The larger the algorithm, the more conservative it is. * **max_depth**: Maximum depth of a tree. Increasing this value makes the model more complex and likely to be overfitted. SageMaker hyperparameter tuning will automatically launch **multiple training jobs** with different hyperparameter settings, evaluate results of those training jobs based on a predefined "objective metric", and select the hyperparameter settings for future attempts based on previous results. For each hyperparameter tuning job, we will specify the maximum number of HPO tries (`max_jobs`) and how many of these can happen in parallel (`max_parallel_jobs`).Tip: `max_parallel_jobs` creates a **trade-off between parformance and speed** (better hyperparameter values vs how long it takes to find these values). If `max_parallel_jobs` is large, then HPO is faster, but the discovered values may not be optimal. Smaller `max_parallel_jobs` will increase the chance of finding optimal values, but HPO will take more time to finish.Next we'll specify the objective metric that we'd like to tune and its definition, which includes the regular expression (Regex) needed to extract that metric from the CloudWatch logs of the training job. Since we are using built-in XGBoost algorithm here, it emits two predefined metrics: **validation:auc** and **train:auc**, and we elected to monitor *validation:auc* as you can see below. In this case, we only need to specify the metric name and do not need to provide regex. If you bring your own algorithm, your algorithm emits metrics by itself. In that case, you'll need to add a MetricDefinition object here to define the format of those metrics through regex, so that SageMaker knows how to extract those metrics from your CloudWatch logs.For more information on the documentation of the Sagemaker HPO please refer [here](https://sagemaker.readthedocs.io/en/stable/tuner.html). | # import required HPO objects
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
# set up hyperparameter ranges
ranges = {
"num_round": IntegerParameter(1, 300),
"max_depth": IntegerParameter(1, 10),
"alpha": ContinuousParameter(0, 5),
"eta": ContinuousParameter(0, 1)
}
# set up the objective metric
objective = "validation:auc"
# instantiate a HPO object
tuner = HyperparameterTuner(
estimator=xgb_estimator, # the SageMaker estimator object
objective_metric_name=objective, # the objective metric to be used for HPO
hyperparameter_ranges=ranges, # the range of hyperparameters
max_jobs=20, # total number of HPO jobs
max_parallel_jobs=4, # how many HPO jobs can run in parallel
strategy="Bayesian", # the internal optimization strategy of HPO
objective_type="Maximize" # maximize or minimize the objective metric
)
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
Launch HPONow we can launch a hyperparameter tuning job by calling *fit()* function. After the hyperparameter tuning job is created, we can go to SageMaker console to track the progress of the hyperparameter tuning job until it is completed. | # start HPO
tuner.fit({"train": s3_input_train, "validation": s3_input_validation}, include_cls_metadata=False)
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
**Important notice**: HPO jobs are expected to take quite long to finsih and as such, **they do not wait by default** (the cell will look as 'done' while the job will still be running on the cloud). As such, all subsequent cells relying on the HPO output cannot run unless the job is finished. In order to check whether the HPO has finished (so we can proceed with executing the subsequent code) we can run the following polling script: | # wait, until HPO is finished
hpo_state = "InProgress"
while hpo_state == "InProgress":
hpo_state = sgmk_client.describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)["HyperParameterTuningJobStatus"]
print("-", end="")
time.sleep(60) # poll once every 1 min
print("\nHPO state:", hpo_state)
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
Deploy and test optimized modelDeploying the best model is simply one line of code: | # deploy the best model from HPO
best_model_predictor = tuner.deploy(initial_instance_count=1, instance_type="ml.m5.large")
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
Once deployed, we can now evaluate the performance of the best model. | # getting the predicted probabilities of the best model
predictions = predict_prob(best_model_predictor, test_data.drop(["y_no", "y_yes"], axis=1).values)
print(predictions)
# generate report for the best model
generate_classification_report(
y_actual=test_data["y_yes"].values,
y_predict_proba=predictions,
decision_threshold=0.5,
class_names_list=["Did not enroll","Enrolled"],
model_info="XGBoost SageMaker inbuilt + HPO"
) | _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
--- ConclusionsThe optimized HPO model exhibits approximately AUC=0.773.Depending on the number of tries, HPO can give a better performing model, compared to simply trying different hyperparameters (by trial and error). You can learn more in-depth details about HPO [here](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). --- Releasing cloud resourcesIt is generally a good practice to deactivate all endpoints which are not in use. Please uncomment the following lines and run the cell in order to deactive the 2 endpoints that were created before. | # xgb_predictor.delete_endpoint(delete_endpoint_config=True)
# best_model_predictor.delete_endpoint(delete_endpoint_config=True)
| _____no_output_____ | MIT | sagemaker_xgboost_hpo.ipynb | bbonik/sagemaker-xgboost-with-hpo |
Naoki AtkinsProject 3 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
np.set_printoptions(suppress=True) | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
***Question 1*** | data = np.load('./boston.npz') | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
***Question 2*** | features = data['features']
target = data['target']
X = features
y = target[:,None]
X = np.concatenate((np.ones((len(X),1)),X),axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=(2021-3-11)) | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
***Question 3*** | plt.plot(X_train[:,13], y_train, 'ro') | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
The relationship seems to follow more of a negative quadratic than a linear line. ***Question 4*** | LSTAT = X_train[:,13][:,None]
MEDV = y_train
reg = LinearRegression().fit(LSTAT, MEDV)
reg.coef_
reg.intercept_ | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
MEDV = 34.991133021969475 + (-0.98093888)(LSTAT) ***Question 5*** | abline = np.array([reg.intercept_, reg.coef_], dtype=object)
testx = np.linspace(0,40,100)[:,None]
testX = np.hstack((np.ones_like(testx),testx))
testt = np.dot(testX,abline)
plt.figure()
plt.plot(LSTAT,MEDV,'ro')
plt.plot(testx,testt,'b') | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
The model fits decently well along the center of the mass of data. Around the extremes, the line is a little bit off. ***Question 6*** | pred = reg.predict(LSTAT)
mean_squared_error(y_train, pred) | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
Average Loss = 38.47893344802523 ***Question 7*** | pred_test = reg.predict(X_test[:,13][:,None])
mean_squared_error(y_test, pred_test) | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
Test MSE is slightly higher, which means that there is a slight overfit ***Question 8*** | LSTAT_sqr = np.hstack((np.ones_like(LSTAT), LSTAT, LSTAT**2))
reg = LinearRegression().fit(LSTAT_sqr, MEDV)
pred_train_LSTAT_sqr = reg.predict(LSTAT_sqr)
MSE_train_sqr = mean_squared_error(y_train, pred_train_LSTAT_sqr)
MSE_train_sqr
LSTAT_sqr_test = np.hstack((np.ones_like(X_test[:,13][:,None]), X_test[:,13][:,None], X_test[:,13][:,None]**2))
pred_test_LSTAT_sqr = reg.predict(LSTAT_sqr_test)
MSE_test_sqr = mean_squared_error(y_test, pred_test_LSTAT_sqr)
MSE_test_sqr | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
The test set has a lower MSE compared to the training set which means the model is fitting well. ***Question 9*** | reg.coef_
reg.intercept_
squared_line = [reg.intercept_, reg.coef_[0][1], reg.coef_[0][2]]
testx = np.linspace(0,40,100)[:,None]
testX = np.hstack((np.ones_like(testx),testx, testx**2))
testt = np.dot(testX,squared_line)
plt.figure()
plt.plot(LSTAT,MEDV,'ro')
plt.plot(testx,testt,'b') | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
Model fits pretty well. Better than the line. ***Question 10*** | reg = LinearRegression().fit(X_train, y_train)
reg.coef_
reg.intercept_
pred = reg.predict(X_train)
mean_squared_error(y_train, pred) | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
The above mean square error is for the training set | pred_test = reg.predict(X_test)
mean_squared_error(y_test, pred_test) | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
This model with polynomial features fits better as compared to the linear model with just a single feature. Making the model more complex allows it to fit the data more flexibly. This causes the MSE to go lower. ***Question 11*** | train_square_matrix = np.hstack((X_train, X_train**2))
model = LinearRegression().fit(train_square_matrix, MEDV)
pred_train_sqr = model.predict(train_square_matrix)
MSE_train_sqr = mean_squared_error(y_train, pred_train_sqr)
MSE_train_sqr
test_square_matrix = np.hstack((X_test, X_test**2))
pred = model.predict(test_square_matrix)
mean_squared_error(y_test, pred) | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
The MSE's for the matrix of the squares of all the 13 input features performs better than the just the matrix of the features themselves. However, the testing set shows that the model is overfitting a little ***Question 12*** | poly = PolynomialFeatures(degree = 2)
X_train_poly = poly.fit_transform(X_train)
X_test_poly = poly.fit_transform(X_test)
model = LinearRegression().fit(X_train_poly, y_train)
pred = model.predict(X_train_poly)
mean_squared_error(y_train, pred)
pred = model.predict(X_test_poly)
mean_squared_error(y_test, pred) | _____no_output_____ | MIT | machineLearning/scikitLearnAndPolyRegression.ipynb | naokishami/Classwork |
Project 3: Implement SLAM --- Project OverviewIn this project, you'll implement SLAM for robot that moves and senses in a 2 dimensional, grid world!SLAM gives us a way to both localize a robot and build up a map of its environment as a robot moves and senses in real-time. This is an active area of research in the fields of robotics and autonomous systems. Since this localization and map-building relies on the visual sensing of landmarks, this is a computer vision problem. Using what you've learned about robot motion, representations of uncertainty in motion and sensing, and localization techniques, you will be tasked with defining a function, `slam`, which takes in six parameters as input and returns the vector `mu`. > `mu` contains the (x,y) coordinate locations of the robot as it moves, and the positions of landmarks that it senses in the worldYou can implement helper functions as you see fit, but your function must return `mu`. The vector, `mu`, should have (x, y) coordinates interlaced, for example, if there were 2 poses and 2 landmarks, `mu` will look like the following, where `P` is the robot position and `L` the landmark position:```mu = matrix([[Px0], [Py0], [Px1], [Py1], [Lx0], [Ly0], [Lx1], [Ly1]])```You can see that `mu` holds the poses first `(x0, y0), (x1, y1), ...,` then the landmark locations at the end of the matrix; we consider a `nx1` matrix to be a vector. Generating an environmentIn a real SLAM problem, you may be given a map that contains information about landmark locations, and in this example, we will make our own data using the `make_data` function, which generates a world grid with landmarks in it and then generates data by placing a robot in that world and moving and sensing over some numer of time steps. The `make_data` function relies on a correct implementation of robot move/sense functions, which, at this point, should be complete and in the `robot_class.py` file. The data is collected as an instantiated robot moves and senses in a world. Your SLAM function will take in this data as input. So, let's first create this data and explore how it represents the movement and sensor measurements that our robot takes.--- Create the worldUse the code below to generate a world of a specified size with randomly generated landmark locations. You can change these parameters and see how your implementation of SLAM responds! `data` holds the sensors measurements and motion of your robot over time. It stores the measurements as `data[i][0]` and the motion as `data[i][1]`. Helper functionsYou will be working with the `robot` class that may look familiar from the first notebook, In fact, in the `helpers.py` file, you can read the details of how data is made with the `make_data` function. It should look very similar to the robot move/sense cycle you've seen in the first notebook. | import numpy as np
from helpers import make_data
# your implementation of slam should work with the following inputs
# feel free to change these input values and see how it responds!
# world parameters
num_landmarks = 5 # number of landmarks
N = 20 # time steps
world_size = 100.0 # size of world (square)
# robot parameters
measurement_range = 50.0 # range at which we can sense landmarks
motion_noise = 2.0 # noise in robot motion
measurement_noise = 2.0 # noise in the measurements
distance = 20.0 # distance by which robot (intends to) move each iteratation
# make_data instantiates a robot, AND generates random landmarks for a given world size and number of landmarks
data = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance) |
Landmarks: [[21, 13], [16, 21], [70, 38], [38, 75], [87, 50]]
Robot: [x=21.10550 y=65.76182]
| MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
A note on `make_data`The function above, `make_data`, takes in so many world and robot motion/sensor parameters because it is responsible for:1. Instantiating a robot (using the robot class)2. Creating a grid world with landmarks in it**This function also prints out the true location of landmarks and the *final* robot location, which you should refer back to when you test your implementation of SLAM.**The `data` this returns is an array that holds information about **robot sensor measurements** and **robot motion** `(dx, dy)` that is collected over a number of time steps, `N`. You will have to use *only* these readings about motion and measurements to track a robot over time and find the determine the location of the landmarks using SLAM. We only print out the true landmark locations for comparison, later.In `data` the measurement and motion data can be accessed from the first and second index in the columns of the data array. See the following code for an example, where `i` is the time step:```measurement = data[i][0]motion = data[i][1]``` | # print out some stats about the data
time_step = 0
print('Example measurements: \n', data[time_step][0])
print('\n')
print('Example motion: \n', data[time_step][1]) | Example measurements:
[[0, -27.336665046464944, -35.10866490452625], [1, -33.31752573050853, -30.61726091048749], [2, 19.910918480109437, -10.91254402509894], [3, -10.810346809363109, 24.042261189064593], [4, 35.51407538801196, -0.736122885336894]]
Example motion:
[-19.11495180327814, 5.883758795052183]
| MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
Try changing the value of `time_step`, you should see that the list of measurements varies based on what in the world the robot sees after it moves. As you know from the first notebook, the robot can only sense so far and with a certain amount of accuracy in the measure of distance between its location and the location of landmarks. The motion of the robot always is a vector with two values: one for x and one for y displacement. This structure will be useful to keep in mind as you traverse this data in your implementation of slam. Initialize ConstraintsOne of the most challenging tasks here will be to create and modify the constraint matrix and vector: omega and xi. In the second notebook, you saw an example of how omega and xi could hold all the values the define the relationships between robot poses `xi` and landmark positions `Li` in a 1D world, as seen below, where omega is the blue matrix and xi is the pink vector.In *this* project, you are tasked with implementing constraints for a 2D world. We are referring to robot poses as `Px, Py` and landmark positions as `Lx, Ly`, and one way to approach this challenge is to add *both* x and y locations in the constraint matrices.You may also choose to create two of each omega and xi (one for x and one for y positions). TODO: Write a function that initializes omega and xiComplete the function `initialize_constraints` so that it returns `omega` and `xi` constraints for the starting position of the robot. Any values that we do not yet know should be initialized with the value `0`. You may assume that our robot starts out in exactly the middle of the world with 100% confidence (no motion or measurement noise at this point). The inputs `N` time steps, `num_landmarks`, and `world_size` should give you all the information you need to construct intial constraints of the correct size and starting values.*Depending on your approach you may choose to return one omega and one xi that hold all (x,y) positions *or* two of each (one for x values and one for y); choose whichever makes most sense to you!* | def initialize_constraints(N, num_landmarks, world_size):
''' This function takes in a number of time steps N, number of landmarks, and a world_size,
and returns initialized constraint matrices, omega and xi.'''
## Recommended: Define and store the size (rows/cols) of the constraint matrix in a variable
mat_size = 2 * (N + num_landmarks) # multiply 2 because of 2-dimension expression
## TODO: Define the constraint matrix, Omega, with two initial "strength" values
## for the initial x, y location of our robot
omega = np.zeros((mat_size, mat_size))
omega[0][0] = 1 # x in initial position
omega[1][1] = 1 # y in initial position
## TODO: Define the constraint *vector*, xi
## you can assume that the robot starts out in the middle of the world with 100% confidence
xi = np.zeros((mat_size, 1))
xi[0] = world_size/2
xi[1] = world_size/2
return omega, xi
| _____no_output_____ | MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
Test as you goIt's good practice to test out your code, as you go. Since `slam` relies on creating and updating constraint matrices, `omega` and `xi` to account for robot sensor measurements and motion, let's check that they initialize as expected for any given parameters.Below, you'll find some test code that allows you to visualize the results of your function `initialize_constraints`. We are using the [seaborn](https://seaborn.pydata.org/) library for visualization.**Please change the test values of N, landmarks, and world_size and see the results**. Be careful not to use these values as input into your final smal function.This code assumes that you have created one of each constraint: `omega` and `xi`, but you can change and add to this code, accordingly. The constraints should vary in size with the number of time steps and landmarks as these values affect the number of poses a robot will take `(Px0,Py0,...Pxn,Pyn)` and landmark locations `(Lx0,Ly0,...Lxn,Lyn)` whose relationships should be tracked in the constraint matrices. Recall that `omega` holds the weights of each variable and `xi` holds the value of the sum of these variables, as seen in Notebook 2. You'll need the `world_size` to determine the starting pose of the robot in the world and fill in the initial values for `xi`. | # import data viz resources
import matplotlib.pyplot as plt
from pandas import DataFrame
import seaborn as sns
%matplotlib inline
# define a small N and world_size (small for ease of visualization)
N_test = 5
num_landmarks_test = 2
small_world = 10
# initialize the constraints
initial_omega, initial_xi = initialize_constraints(N_test, num_landmarks_test, small_world)
# define figure size
plt.rcParams["figure.figsize"] = (10,7)
# display omega
sns.heatmap(DataFrame(initial_omega), cmap='Blues', annot=True, linewidths=.5)
# define figure size
plt.rcParams["figure.figsize"] = (1,7)
# display xi
sns.heatmap(DataFrame(initial_xi), cmap='Oranges', annot=True, linewidths=.5) | _____no_output_____ | MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
--- SLAM inputs In addition to `data`, your slam function will also take in:* N - The number of time steps that a robot will be moving and sensing* num_landmarks - The number of landmarks in the world* world_size - The size (w/h) of your world* motion_noise - The noise associated with motion; the update confidence for motion should be `1.0/motion_noise`* measurement_noise - The noise associated with measurement/sensing; the update weight for measurement should be `1.0/measurement_noise` A note on noiseRecall that `omega` holds the relative "strengths" or weights for each position variable, and you can update these weights by accessing the correct index in omega `omega[row][col]` and *adding/subtracting* `1.0/noise` where `noise` is measurement or motion noise. `Xi` holds actual position values, and so to update `xi` you'll do a similar addition process only using the actual value of a motion or measurement. So for a vector index `xi[row][0]` you will end up adding/subtracting one measurement or motion divided by their respective `noise`. TODO: Implement Graph SLAMFollow the TODO's below to help you complete this slam implementation (these TODO's are in the recommended order), then test out your implementation! Updating with motion and measurementsWith a 2D omega and xi structure as shown above (in earlier cells), you'll have to be mindful about how you update the values in these constraint matrices to account for motion and measurement constraints in the x and y directions. Recall that the solution to these matrices (which holds all values for robot poses `P` and landmark locations `L`) is the vector, `mu`, which can be computed at the end of the construction of omega and xi as the inverse of omega times xi: $\mu = \Omega^{-1}\xi$**You may also choose to return the values of `omega` and `xi` if you want to visualize their final state!** | ## TODO: Complete the code to implement SLAM
## slam takes in 6 arguments and returns mu,
## mu is the entire path traversed by a robot (all x,y poses) *and* all landmarks locations
def slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise):
## TODO: Use your initilization to create constraint matrices, omega and xi
omega, xi = initialize_constraints(N, num_landmarks, world_size)
## TODO: Iterate through each time step in the data
## get all the motion and measurement data as you iterate
for i in range(0, N-1): # loop for steps
pos_index = 2 * i # index for 2-dimension
measurement = data[i][0]
## TODO: update the constraint matrix/vector to account for all *measurements*
## this should be a series of additions that take into account the measurement noise
for j in range(len(measurement)): # observed landmarks loop
land_index = 2 * ( N + measurement[j][0] )
for k in range(2): # 2-dimension loop
# update omega matrix
assert pos_index < 2*N, "pos_index: {}".format(pos_index)
assert land_index < 2*(N+num_landmarks), "land_index: {}".format(land_index)
omega[pos_index+k][pos_index+k] += (1 / measurement_noise)
omega[pos_index+k][land_index+k] -= (1 / measurement_noise)
omega[land_index+k][pos_index+k] -= (1 / measurement_noise)
omega[land_index+k][land_index+k] += (1 / measurement_noise)
# update xi vector
xi[pos_index+k] -= (measurement[j][k+1] / measurement_noise)
xi[land_index+k] += (measurement[j][k+1] / measurement_noise)
## TODO: update the constraint matrix/vector to account for all *motion* and motion noise
for i in range(0, N-1): # loop for steps
cpos_index = 2 * i # index for 2-dimension current position.
npos_index = 2 * (i + 1) # index for 2-dimension next position.
motion = data[i][1]
for j in range(2): # loop for 2-dimension
omega[cpos_index+j][cpos_index+j] += (1 / motion_noise)
omega[cpos_index+j][npos_index+j] -= (1 / motion_noise)
omega[npos_index+j][cpos_index+j] -= (1 / motion_noise)
omega[npos_index+j][npos_index+j] += (1 / motion_noise)
xi[cpos_index+j] -= (motion[j] / motion_noise)
xi[npos_index+j] += (motion[j] / motion_noise)
## TODO: After iterating through all the data
## Compute the best estimate of poses and landmark positions
## using the formula, omega_inverse * Xi
mu = np.linalg.inv(np.matrix(omega)) * xi
return mu # return `mu`
| _____no_output_____ | MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
Helper functionsTo check that your implementation of SLAM works for various inputs, we have provided two helper functions that will help display the estimated pose and landmark locations that your function has produced. First, given a result `mu` and number of time steps, `N`, we define a function that extracts the poses and landmarks locations and returns those as their own, separate lists. Then, we define a function that nicely print out these lists; both of these we will call, in the next step. | # a helper function that creates a list of poses and of landmarks for ease of printing
# this only works for the suggested constraint architecture of interlaced x,y poses
def get_poses_landmarks(mu, N):
# create a list of poses
poses = []
for i in range(N):
poses.append((mu[2*i].item(), mu[2*i+1].item()))
# create a list of landmarks
landmarks = []
for i in range(num_landmarks):
landmarks.append((mu[2*(N+i)].item(), mu[2*(N+i)+1].item()))
# return completed lists
return poses, landmarks
def print_all(poses, landmarks):
print('\n')
print('Estimated Poses:')
for i in range(len(poses)):
print('['+', '.join('%.3f'%p for p in poses[i])+']')
print('\n')
print('Estimated Landmarks:')
for i in range(len(landmarks)):
print('['+', '.join('%.3f'%l for l in landmarks[i])+']')
| _____no_output_____ | MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
Run SLAMOnce you've completed your implementation of `slam`, see what `mu` it returns for different world sizes and different landmarks! What to ExpectThe `data` that is generated is random, but you did specify the number, `N`, or time steps that the robot was expected to move and the `num_landmarks` in the world (which your implementation of `slam` should see and estimate a position for. Your robot should also start with an estimated pose in the very center of your square world, whose size is defined by `world_size`.With these values in mind, you should expect to see a result that displays two lists:1. **Estimated poses**, a list of (x, y) pairs that is exactly `N` in length since this is how many motions your robot has taken. The very first pose should be the center of your world, i.e. `[50.000, 50.000]` for a world that is 100.0 in square size.2. **Estimated landmarks**, a list of landmark positions (x, y) that is exactly `num_landmarks` in length. Landmark LocationsIf you refer back to the printout of *exact* landmark locations when this data was created, you should see values that are very similar to those coordinates, but not quite (since `slam` must account for noise in motion and measurement). | # call your implementation of slam, passing in the necessary parameters
mu = slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise)
# print out the resulting landmarks and poses
if(mu is not None):
# get the lists of poses and landmarks
# and print them out
poses, landmarks = get_poses_landmarks(mu, N)
print_all(poses, landmarks) |
Estimated Poses:
[50.000, 50.000]
[30.932, 55.524]
[10.581, 60.210]
[1.931, 44.539]
[13.970, 30.030]
[26.721, 15.510]
[38.852, 1.865]
[23.640, 16.597]
[7.988, 30.490]
[16.829, 49.508]
[26.632, 68.416]
[34.588, 85.257]
[15.954, 87.795]
[14.410, 68.543]
[11.222, 48.268]
[7.365, 29.064]
[4.241, 8.590]
[10.207, 28.434]
[14.889, 47.138]
[20.159, 66.431]
Estimated Landmarks:
[21.992, 13.723]
[16.718, 21.137]
[69.826, 37.990]
[38.311, 75.271]
[87.067, 48.906]
| MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
Visualize the constructed worldFinally, using the `display_world` code from the `helpers.py` file (which was also used in the first notebook), we can actually visualize what you have coded with `slam`: the final position of the robot and the positon of landmarks, created from only motion and measurement data!**Note that these should be very similar to the printed *true* landmark locations and final pose from our call to `make_data` early in this notebook.** | # import the helper function
from helpers import display_world
# Display the final world!
# define figure size
plt.rcParams["figure.figsize"] = (20,20)
# check if poses has been created
if 'poses' in locals():
# print out the last pose
print('Last pose: ', poses[-1])
# display the last position of the robot *and* the landmark positions
display_world(int(world_size), poses[-1], landmarks) | Last pose: (20.15873442355911, 66.43079160194176)
| MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
Question: How far away is your final pose (as estimated by `slam`) compared to the *true* final pose? Why do you think these poses are different?You can find the true value of the final pose in one of the first cells where `make_data` was called. You may also want to look at the true landmark locations and compare them to those that were estimated by `slam`. Ask yourself: what do you think would happen if we moved and sensed more (increased N)? Or if we had lower/higher noise parameters. **Answer**: Ground truth position was displayed as the following.`Landmarks: [[21, 13], [16, 21], [70, 38], [38, 75], [87, 50]]Robot: [x=21.10550 y=65.76182]`Estimated Landmarks was obtained as below.`Estimated Poses:[50.000, 50.000]... (after 20 iterations)[20.159, 66.431]Estimated Landmarks:[21.992, 13.723][16.718, 21.137][69.826, 37.990][38.311, 75.271][87.067, 48.906]`Estimated poses and ground truth poses are similar to each other.There are also much similarity between estimated landmarks and that of ground truth.RMSE between ground truth and estimation is 0.6315.This is calculated in the following cell of this notebook.RMSE is very small number and SLAM estimated robot position and landmarks.Though only robot moved at only 20 steps in this case, more motion steps will contribute more accurate estimation.A lot of sample of measurements will decrease the influence of measurment noise and motion noise.With more measurement sample, SLAM can ignore noise deviation.When noise deviation is less, estimation accuracy of SLAM is higher.Noise deviation prevents robot from estimating its position and landmarks' position with high accuracy. | # calculate RMSE
import math
def getRMSE(ground_truth, estimation):
sum_rmse = 0
for i, element_est in enumerate(estimation):
diff = ground_truth[i] - element_est
diff_square = diff * diff
sum_rmse += diff_square
rmse = math.sqrt(sum_rmse / len(ground_truth))
return rmse
flatten = lambda x: [z for y in x for z in (flatten(y) if hasattr(y, '__iter__') else (y,))]
ground_truth = [[21.10550, 65.76182], [21, 13], [16, 21], [70, 38], [38, 75], [87, 50]]
estimation = [[20.159, 66.431], [21.992, 13.723], [16.718, 21.137], [69.826, 37.990], [38.311, 75.271], [87.067, 48.906]]
ground_truth = flatten(ground_truth)
estimation = flatten(estimation)
rmse = getRMSE(ground_truth, estimation)
print(rmse) | 0.6315729782587813
| MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
TestingTo confirm that your slam code works before submitting your project, it is suggested that you run it on some test data and cases. A few such cases have been provided for you, in the cells below. When you are ready, uncomment the test cases in the next cells (there are two test cases, total); your output should be **close-to or exactly** identical to the given results. If there are minor discrepancies it could be a matter of floating point accuracy or in the calculation of the inverse matrix. Submit your projectIf you pass these tests, it is a good indication that your project will pass all the specifications in the project rubric. Follow the submission instructions to officially submit! | # Here is the data and estimated outputs for test case 1
test_data1 = [[[[1, 19.457599255548065, 23.8387362100849], [2, -13.195807561967236, 11.708840328458608], [3, -30.0954905279171, 15.387879242505843]], [-12.2607279422326, -15.801093326936487]], [[[2, -0.4659930049620491, 28.088559771215664], [4, -17.866382374890936, -16.384904503932]], [-12.2607279422326, -15.801093326936487]], [[[4, -6.202512900833806, -1.823403210274639]], [-12.2607279422326, -15.801093326936487]], [[[4, 7.412136480918645, 15.388585962142429]], [14.008259661173426, 14.274756084260822]], [[[4, -7.526138813444998, -0.4563942429717849]], [14.008259661173426, 14.274756084260822]], [[[2, -6.299793150150058, 29.047830407717623], [4, -21.93551130411791, -13.21956810989039]], [14.008259661173426, 14.274756084260822]], [[[1, 15.796300959032276, 30.65769689694247], [2, -18.64370821983482, 17.380022987031367]], [14.008259661173426, 14.274756084260822]], [[[1, 0.40311325410337906, 14.169429532679855], [2, -35.069349468466235, 2.4945558982439957]], [14.008259661173426, 14.274756084260822]], [[[1, -16.71340983241936, -2.777000269543834]], [-11.006096015782283, 16.699276945166858]], [[[1, -3.611096830835776, -17.954019226763958]], [-19.693482634035977, 3.488085684573048]], [[[1, 18.398273354362416, -22.705102332550947]], [-19.693482634035977, 3.488085684573048]], [[[2, 2.789312482883833, -39.73720193121324]], [12.849049222879723, -15.326510824972983]], [[[1, 21.26897046581808, -10.121029799040915], [2, -11.917698965880655, -23.17711662602097], [3, -31.81167947898398, -16.7985673023331]], [12.849049222879723, -15.326510824972983]], [[[1, 10.48157743234859, 5.692957082575485], [2, -22.31488473554935, -5.389184118551409], [3, -40.81803984305378, -2.4703329790238118]], [12.849049222879723, -15.326510824972983]], [[[0, 10.591050242096598, -39.2051798967113], [1, -3.5675572049297553, 22.849456408289125], [2, -38.39251065320351, 7.288990306029511]], [12.849049222879723, -15.326510824972983]], [[[0, -3.6225556479370766, -25.58006865235512]], [-7.8874682868419965, -18.379005523261092]], [[[0, 1.9784503557879374, -6.5025974151499]], [-7.8874682868419965, -18.379005523261092]], [[[0, 10.050665232782423, 11.026385307998742]], [-17.82919359778298, 9.062000642947142]], [[[0, 26.526838150174818, -0.22563393232425621], [4, -33.70303936886652, 2.880339841013677]], [-17.82919359778298, 9.062000642947142]]]
## Test Case 1
##
# Estimated Pose(s):
# [50.000, 50.000]
# [37.858, 33.921]
# [25.905, 18.268]
# [13.524, 2.224]
# [27.912, 16.886]
# [42.250, 30.994]
# [55.992, 44.886]
# [70.749, 59.867]
# [85.371, 75.230]
# [73.831, 92.354]
# [53.406, 96.465]
# [34.370, 100.134]
# [48.346, 83.952]
# [60.494, 68.338]
# [73.648, 53.082]
# [86.733, 38.197]
# [79.983, 20.324]
# [72.515, 2.837]
# [54.993, 13.221]
# [37.164, 22.283]
# Estimated Landmarks:
# [82.679, 13.435]
# [70.417, 74.203]
# [36.688, 61.431]
# [18.705, 66.136]
# [20.437, 16.983]
### Uncomment the following three lines for test case 1 and compare the output to the values above ###
mu_1 = slam(test_data1, 20, 5, 100.0, 2.0, 2.0)
poses, landmarks = get_poses_landmarks(mu_1, 20)
print_all(poses, landmarks)
# Here is the data and estimated outputs for test case 2
test_data2 = [[[[0, 26.543274387283322, -6.262538160312672], [3, 9.937396825799755, -9.128540360867689]], [18.92765331253674, -6.460955043986683]], [[[0, 7.706544739722961, -3.758467215445748], [1, 17.03954411948937, 31.705489938553438], [3, -11.61731288777497, -6.64964096716416]], [18.92765331253674, -6.460955043986683]], [[[0, -12.35130507136378, 2.585119104239249], [1, -2.563534536165313, 38.22159657838369], [3, -26.961236804740935, -0.4802312626141525]], [-11.167066095509824, 16.592065417497455]], [[[0, 1.4138633151721272, -13.912454837810632], [1, 8.087721200818589, 20.51845934354381], [3, -17.091723454402302, -16.521500551709707], [4, -7.414211721400232, 38.09191602674439]], [-11.167066095509824, 16.592065417497455]], [[[0, 12.886743222179561, -28.703968411636318], [1, 21.660953298391387, 3.4912891084614914], [3, -6.401401414569506, -32.321583037341625], [4, 5.034079343639034, 23.102207946092893]], [-11.167066095509824, 16.592065417497455]], [[[1, 31.126317672358578, -10.036784369535214], [2, -38.70878528420893, 7.4987265861424595], [4, 17.977218575473767, 6.150889254289742]], [-6.595520680493778, -18.88118393939265]], [[[1, 41.82460922922086, 7.847527392202475], [3, 15.711709540417502, -30.34633659912818]], [-6.595520680493778, -18.88118393939265]], [[[0, 40.18454208294434, -6.710999804403755], [3, 23.019508919299156, -10.12110867290604]], [-6.595520680493778, -18.88118393939265]], [[[3, 27.18579315312821, 8.067219022708391]], [-6.595520680493778, -18.88118393939265]], [[], [11.492663265706092, 16.36822198838621]], [[[3, 24.57154567653098, 13.461499960708197]], [11.492663265706092, 16.36822198838621]], [[[0, 31.61945290413707, 0.4272295085799329], [3, 16.97392299158991, -5.274596836133088]], [11.492663265706092, 16.36822198838621]], [[[0, 22.407381798735177, -18.03500068379259], [1, 29.642444125196995, 17.3794951934614], [3, 4.7969752441371645, -21.07505361639969], [4, 14.726069092569372, 32.75999422300078]], [11.492663265706092, 16.36822198838621]], [[[0, 10.705527984670137, -34.589764174299596], [1, 18.58772336795603, -0.20109708164787765], [3, -4.839806195049413, -39.92208742305105], [4, 4.18824810165454, 14.146847823548889]], [11.492663265706092, 16.36822198838621]], [[[1, 5.878492140223764, -19.955352450942357], [4, -7.059505455306587, -0.9740849280550585]], [19.628527845173146, 3.83678180657467]], [[[1, -11.150789592446378, -22.736641053247872], [4, -28.832815721158255, -3.9462962046291388]], [-19.841703647091965, 2.5113335861604362]], [[[1, 8.64427397916182, -20.286336970889053], [4, -5.036917727942285, -6.311739993868336]], [-5.946642674882207, -19.09548221169787]], [[[0, 7.151866679283043, -39.56103232616369], [1, 16.01535401373368, -3.780995345194027], [4, -3.04801331832137, 13.697362774960865]], [-5.946642674882207, -19.09548221169787]], [[[0, 12.872879480504395, -19.707592098123207], [1, 22.236710716903136, 16.331770792606406], [3, -4.841206109583004, -21.24604435851242], [4, 4.27111163223552, 32.25309748614184]], [-5.946642674882207, -19.09548221169787]]]
## Test Case 2
##
# Estimated Pose(s):
# [50.000, 50.000]
# [69.035, 45.061]
# [87.655, 38.971]
# [76.084, 55.541]
# [64.283, 71.684]
# [52.396, 87.887]
# [44.674, 68.948]
# [37.532, 49.680]
# [31.392, 30.893]
# [24.796, 12.012]
# [33.641, 26.440]
# [43.858, 43.560]
# [54.735, 60.659]
# [65.884, 77.791]
# [77.413, 94.554]
# [96.740, 98.020]
# [76.149, 99.586]
# [70.211, 80.580]
# [64.130, 61.270]
# [58.183, 42.175]
# Estimated Landmarks:
# [76.777, 42.415]
# [85.109, 76.850]
# [13.687, 95.386]
# [59.488, 39.149]
# [69.283, 93.654]
### Uncomment the following three lines for test case 2 and compare to the values above ###
mu_2 = slam(test_data2, 20, 5, 100.0, 2.0, 2.0)
poses, landmarks = get_poses_landmarks(mu_2, 20)
print_all(poses, landmarks)
|
Estimated Poses:
[50.000, 50.000]
[69.181, 45.665]
[87.743, 39.703]
[76.270, 56.311]
[64.317, 72.176]
[52.257, 88.154]
[44.059, 69.401]
[37.002, 49.918]
[30.924, 30.955]
[23.508, 11.419]
[34.180, 27.133]
[44.155, 43.846]
[54.806, 60.920]
[65.698, 78.546]
[77.468, 95.626]
[96.802, 98.821]
[75.957, 99.971]
[70.200, 81.181]
[64.054, 61.723]
[58.107, 42.628]
Estimated Landmarks:
[76.779, 42.887]
[85.065, 77.438]
[13.548, 95.652]
[59.449, 39.595]
[69.263, 94.240]
| MIT | 3. Landmark Detection and Tracking.ipynb | takam5f2/CVN_SLAM |
Random Forests Import Libraries | import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn import datasets,metrics
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline | _____no_output_____ | MIT | ml/Random Forests/RandomForests.ipynb | Siddhant-K-code/AlgoBook |
Load the [iris_data](https://archive.ics.uci.edu/ml/datasets/iris) | iris_data = datasets.load_iris()
print(iris_data.target_names)
print(iris_data.feature_names) | ['setosa' 'versicolor' 'virginica']
['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']
| MIT | ml/Random Forests/RandomForests.ipynb | Siddhant-K-code/AlgoBook |
Preprocess the data | df = pd.DataFrame(
{
'sepal_length':iris_data.data[:,0],
'sepal_width':iris_data.data[:,1],
'petal_length':iris_data.data[:,2],
'petal_width':iris_data.data[:,3],
'species':iris_data.target
})
df.head()
#Number of instances per class
df.groupby('species').size()
# species -> target column
features = df.iloc[:,:4].values
targets = df['species'] | _____no_output_____ | MIT | ml/Random Forests/RandomForests.ipynb | Siddhant-K-code/AlgoBook |
Visualization | #pair_plot
#To explore the relationship between the features
plt.figure()
sns.pairplot(df,hue = "species", height=3, markers=["o", "s", "D"])
plt.show() | _____no_output_____ | MIT | ml/Random Forests/RandomForests.ipynb | Siddhant-K-code/AlgoBook |
Fitting the model | X_train, X_test, Y_train, Y_test = train_test_split(features,targets,test_size = 0.3,random_state = 1)
model_1 = RandomForestClassifier(n_estimators = 100,random_state = 1)
model_1.fit(X_train, Y_train)
Y_pred = model_1.predict(X_test)
metrics.accuracy_score(Y_test,Y_pred) | _____no_output_____ | MIT | ml/Random Forests/RandomForests.ipynb | Siddhant-K-code/AlgoBook |
Accuracy is around 95.6% Improving the model Hyperparameter selection | #using Exhaustive Grid Search
n_estimators = [2, 10, 100,500]
max_depth = [2, 10, 15,20]
min_samples_split = [1,2, 5, 10]
min_samples_leaf = [1, 2, 10,20]
hyper_param = dict(n_estimators = n_estimators, max_depth = max_depth,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf)
gridF = GridSearchCV(RandomForestClassifier(random_state = 1), hyper_param, cv = 3, verbose = 1,
n_jobs = -1)
bestF = gridF.fit(X_train, Y_train)
grid.best_params_
#using these parameters
model_2 = RandomForestClassifier(n_estimators = 2,max_depth = 15, min_samples_leaf = 2, min_samples_split = 2)
model_2.fit(X_train,Y_train)
Y_pred_2 = model_2.predict(X_test)
metrics.accuracy_score(Y_test,Y_pred_2)
#Other such Hyperparameter tuning methods can also be used. | _____no_output_____ | MIT | ml/Random Forests/RandomForests.ipynb | Siddhant-K-code/AlgoBook |
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.  ResNet50 Image Classification using ONNX and AzureMLThis example shows how to deploy the ResNet50 ONNX model as a web service using Azure Machine Learning services and the ONNX Runtime. What is ONNXONNX is an open format for representing machine learning and deep learning models. ONNX enables open and interoperable AI by enabling data scientists and developers to use the tools of their choice without worrying about lock-in and flexibility to deploy to a variety of platforms. ONNX is developed and supported by a community of partners including Microsoft, Facebook, and Amazon. For more information, explore the [ONNX website](http://onnx.ai). ResNet50 DetailsResNet classifies the major object in an input image into a set of 1000 pre-defined classes. For more information about the ResNet50 model and how it was created can be found on the [ONNX Model Zoo github](https://github.com/onnx/models/tree/master/vision/classification/resnet). PrerequisitesTo make the best use of your time, make sure you have done the following:* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to: * install the AML SDK * create a workspace and its configuration file (config.json) | # Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION) | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Download pre-trained ONNX model from ONNX Model Zoo.Download the [ResNet50v2 model and test data](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.tar.gz) and extract it in the same folder as this tutorial notebook. | import urllib.request
onnx_model_url = "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.tar.gz"
urllib.request.urlretrieve(onnx_model_url, filename="resnet50v2.tar.gz")
!tar xvzf resnet50v2.tar.gz | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Deploying as a web service with Azure ML Load your Azure ML workspaceWe begin by instantiating a workspace object from the existing workspace created earlier in the configuration notebook. | from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, sep = '\n') | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Register your model with Azure MLNow we upload the model and register it in the workspace. | from azureml.core.model import Model
model = Model.register(model_path = "resnet50v2/resnet50v2.onnx",
model_name = "resnet50v2",
tags = {"onnx": "demo"},
description = "ResNet50v2 from ONNX Model Zoo",
workspace = ws) | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Displaying your registered modelsYou can optionally list out all the models that you have registered in this workspace. | models = ws.models
for name, m in models.items():
print("Name:", name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags) | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Write scoring fileWe are now going to deploy our ONNX model on Azure ML using the ONNX Runtime. We begin by writing a score.py file that will be invoked by the web service call. The `init()` function is called once when the container is started so we load the model using the ONNX Runtime into a global session object. | %%writefile score.py
import json
import time
import sys
import os
import numpy as np # we're going to use numpy to process input and output data
import onnxruntime # to inference ONNX models, we use the ONNX Runtime
def softmax(x):
x = x.reshape(-1)
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def init():
global session
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'resnet50v2.onnx')
session = onnxruntime.InferenceSession(model, None)
def preprocess(input_data_json):
# convert the JSON data into the tensor input
img_data = np.array(json.loads(input_data_json)['data']).astype('float32')
#normalize
mean_vec = np.array([0.485, 0.456, 0.406])
stddev_vec = np.array([0.229, 0.224, 0.225])
norm_img_data = np.zeros(img_data.shape).astype('float32')
for i in range(img_data.shape[0]):
norm_img_data[i,:,:] = (img_data[i,:,:]/255 - mean_vec[i]) / stddev_vec[i]
return norm_img_data
def postprocess(result):
return softmax(np.array(result)).tolist()
def run(input_data_json):
try:
start = time.time()
# load in our data which is expected as NCHW 224x224 image
input_data = preprocess(input_data_json)
input_name = session.get_inputs()[0].name # get the id of the first input of the model
result = session.run([], {input_name: input_data})
end = time.time() # stop timer
return {"result": postprocess(result),
"time": end - start}
except Exception as e:
result = str(e)
return {"error": result} | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Create inference configuration First we create a YAML file that specifies which dependencies we would like to see in our container. | from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(pip_packages=["numpy","onnxruntime","azureml-core"])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string()) | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Create the inference configuration object | from azureml.core.model import InferenceConfig
inference_config = InferenceConfig(runtime= "python",
entry_script="score.py",
conda_file="myenv.yml",
extra_docker_file_steps = "Dockerfile") | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Deploy the model | from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'demo': 'onnx'},
description = 'web service for ResNet50 ONNX model') | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
The following cell will likely take a few minutes to run as well. | from random import randint
aci_service_name = 'onnx-demo-resnet50'+str(randint(0,100))
print("Service", aci_service_name)
aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)
aci_service.wait_for_deployment(True)
print(aci_service.state) | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
In case the deployment fails, you can check the logs. Make sure to delete your aci_service before trying again. | if aci_service.state != 'Healthy':
# run this command for debugging.
print(aci_service.get_logs())
aci_service.delete() | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Success!If you've made it this far, you've deployed a working web service that does image classification using an ONNX model. You can get the URL for the webservice with the code below. | print(aci_service.scoring_uri) | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
When you are eventually done using the web service, remember to delete it. | #aci_service.delete() | _____no_output_____ | MIT | how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb | MustAl-Du/MachineLearningNotebooks |
Stable Baselines, a Fork of OpenAI Baselines - Training, Saving and LoadingGithub Repo: [https://github.com/hill-a/stable-baselines](https://github.com/hill-a/stable-baselines)Medium article: [https://medium.com/@araffin/stable-baselines-a-fork-of-openai-baselines-df87c4b2fc82](https://medium.com/@araffin/stable-baselines-a-fork-of-openai-baselines-df87c4b2fc82) Install Dependencies and Stable Baselines Using PipList of full dependencies can be found in the [README](https://github.com/hill-a/stable-baselines).```sudo apt-get update && sudo apt-get install cmake libopenmpi-dev zlib1g-dev``````pip install stable-baselines``` | !apt install swig cmake libopenmpi-dev zlib1g-dev
!pip install stable-baselines==2.5.1 box2d box2d-kengz | _____no_output_____ | MIT | my_colabs/stbl_team/saving_loading_a2c.ipynb | guyk1971/stable-baselines |
Import policy, RL agent, ... | import gym
import numpy as np
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import A2C | _____no_output_____ | MIT | my_colabs/stbl_team/saving_loading_a2c.ipynb | guyk1971/stable-baselines |
Create the Gym env and instantiate the agentFor this example, we will use Lunar Lander environment."Landing outside landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land on its first attempt. Four discrete actions available: do nothing, fire left orientation engine, fire main engine, fire right orientation engine. "Lunar Lander environment: [https://gym.openai.com/envs/LunarLander-v2/](https://gym.openai.com/envs/LunarLander-v2/)Note: vectorized environments allow to easily multiprocess training. In this example, we are using only one process, hence the DummyVecEnv.We chose the MlpPolicy because input of CartPole is a feature vector, not images.The type of action to use (discrete/continuous) will be automatically deduced from the environment action space | env = gym.make('LunarLander-v2')
# vectorized environments allow to easily multiprocess training
# we demonstrate its usefulness in the next examples
env = DummyVecEnv([lambda: env]) # The algorithms require a vectorized environment to run
model = A2C(MlpPolicy, env, ent_coef=0.1, verbose=0)
| [33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
| MIT | my_colabs/stbl_team/saving_loading_a2c.ipynb | guyk1971/stable-baselines |
We create a helper function to evaluate the agent: | def evaluate(model, num_steps=1000):
"""
Evaluate a RL agent
:param model: (BaseRLModel object) the RL Agent
:param num_steps: (int) number of timesteps to evaluate it
:return: (float) Mean reward for the last 100 episodes
"""
episode_rewards = [0.0]
obs = env.reset()
for i in range(num_steps):
# _states are only useful when using LSTM policies
action, _states = model.predict(obs)
# here, action, rewards and dones are arrays
# because we are using vectorized env
obs, rewards, dones, info = env.step(action)
# Stats
episode_rewards[-1] += rewards[0]
if dones[0]:
obs = env.reset()
episode_rewards.append(0.0)
# Compute mean reward for the last 100 episodes
mean_100ep_reward = round(np.mean(episode_rewards[-100:]), 1)
print("Mean reward:", mean_100ep_reward, "Num episodes:", len(episode_rewards))
return mean_100ep_reward | _____no_output_____ | MIT | my_colabs/stbl_team/saving_loading_a2c.ipynb | guyk1971/stable-baselines |
Let's evaluate the un-trained agent, this should be a random agent. | # Random Agent, before training
mean_reward_before_train = evaluate(model, num_steps=10000) | Mean reward: -210.3 Num episodes: 107
| MIT | my_colabs/stbl_team/saving_loading_a2c.ipynb | guyk1971/stable-baselines |
Train the agent and save itWarning: this may take a while | # Train the agent
model.learn(total_timesteps=10000)
# Save the agent
model.save("a2c_lunar")
del model # delete trained model to demonstrate loading | _____no_output_____ | MIT | my_colabs/stbl_team/saving_loading_a2c.ipynb | guyk1971/stable-baselines |
Load the trained agent | model = A2C.load("a2c_lunar")
# Evaluate the trained agent
mean_reward = evaluate(model, num_steps=10000) | Mean reward: -310.2 Num episodes: 68
| MIT | my_colabs/stbl_team/saving_loading_a2c.ipynb | guyk1971/stable-baselines |
A/B test 3 - loved journeys, control vs node2vecThis related links B/C test (ab3) was conducted from 15-20th 2019.The data used in this report are 15-19th Mar 2019 because the test was ended on 20th mar.The test compared the existing related links (where available) to links generated using node2vec algorithm Import | %load_ext autoreload
%autoreload 2
import os
import pandas as pd
import numpy as np
import ast
import re
# z test
from statsmodels.stats.proportion import proportions_ztest
# bayesian bootstrap and vis
import matplotlib.pyplot as plt
import seaborn as sns
import bayesian_bootstrap.bootstrap as bb
from astropy.utils import NumpyRNGContext
# progress bar
from tqdm import tqdm, tqdm_notebook
from scipy import stats
from collections import Counter
import sys
sys.path.insert(0, '../../src' )
import analysis as analysis
# set up the style for our plots
sns.set(style='white', palette='colorblind', font_scale=1.3,
rc={'figure.figsize':(12,9),
"axes.facecolor": (0, 0, 0, 0)})
# instantiate progress bar goodness
tqdm.pandas(tqdm_notebook)
pd.set_option('max_colwidth',500)
# the number of bootstrap means used to generate a distribution
boot_reps = 10000
# alpha - false positive rate
alpha = 0.05
# number of tests
m = 4
# Correct alpha for multiple comparisons
alpha = alpha / m
# The Bonferroni correction can be used to adjust confidence intervals also.
# If one establishes m confidence intervals, and wishes to have an overall confidence level of 1-alpha,
# each individual confidence interval can be adjusted to the level of 1-(alpha/m).
# reproducible
seed = 1337 | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
File/dir locations Processed journey data | DATA_DIR = os.getenv("DATA_DIR")
filename = "full_sample_loved_947858.csv.gz"
filepath = os.path.join(
DATA_DIR, "sampled_journey", "20190315_20190319",
filename)
filepath
VARIANT_DICT = {
'CONTROL_GROUP':'B',
'INTERVENTION_GROUP':'C'
}
# read in processed sampled journey with just the cols we need for related links
df = pd.read_csv(filepath, sep ="\t", compression="gzip")
# convert from str to list
df['Event_cat_act_agg']= df['Event_cat_act_agg'].progress_apply(ast.literal_eval)
df['Page_Event_List'] = df['Page_Event_List'].progress_apply(ast.literal_eval)
df['Page_List'] = df['Page_List'].progress_apply(ast.literal_eval)
# drop dodgy rows, where page variant is not A or B.
df = df.query('ABVariant in [@CONTROL_GROUP, @INTERVENTION_GROUP]')
df[['Occurrences', 'ABVariant']].groupby('ABVariant').sum()
df['Page_List_Length'] = df['Page_List'].progress_apply(len)
| 100%|ββββββββββ| 740885/740885 [00:00<00:00, 766377.92it/s]
| MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Nav type of page lookup - is it a finding page? if not it's a thing page | filename = "document_types.csv.gz"
# created a metadata dir in the DATA_DIR to hold this data
filepath = os.path.join(
DATA_DIR, "metadata",
filename)
print(filepath)
df_finding_thing = pd.read_csv(filepath, sep="\t", compression="gzip")
df_finding_thing.head()
thing_page_paths = df_finding_thing[
df_finding_thing['is_finding']==0]['pagePath'].tolist()
finding_page_paths = df_finding_thing[
df_finding_thing['is_finding']==1]['pagePath'].tolist() | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
OutliersSome rows should be removed before analysis. For example rows with journey lengths of 500 or very high related link click rates. This process might have to happen once features have been created. Derive variables journey_click_rateThere is no difference in the proportion of journeys using at least one related link (journey_click_rate) between page variant A and page variant B. \begin{equation*}\frac{\text{total number of journeys including at least one click on a related link}}{\text{total number of journeys}}\end{equation*} | # get the number of related links clicks per Sequence
df['Related Links Clicks per seq'] = df['Event_cat_act_agg'].map(analysis.sum_related_click_events)
# map across the Sequence variable, which includes pages and Events
# we want to pass all the list elements to a function one-by-one and then collect the output.
df["Has_Related"] = df["Related Links Clicks per seq"].map(analysis.is_related)
df['Related Links Clicks row total'] = df['Related Links Clicks per seq'] * df['Occurrences']
df.head(3) | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
count of clicks on navigation elementsThere is no statistically significant difference in the count of clicks on navigation elements per journey between page variant A and page variant B.\begin{equation*}{\text{total number of navigation element click events from content pages}}\end{equation*} Related link counts | # get the total number of related links clicks for that row (clicks per sequence multiplied by occurrences)
df['Related Links Clicks row total'] = df['Related Links Clicks per seq'] * df['Occurrences'] | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Navigation events | def count_nav_events(page_event_list):
"""Counts the number of nav events from a content page in a Page Event List."""
content_page_nav_events = 0
for pair in page_event_list:
if analysis.is_nav_event(pair[1]):
if pair[0] in thing_page_paths:
content_page_nav_events += 1
return content_page_nav_events
# needs finding_thing_df read in from document_types.csv.gz
df['Content_Page_Nav_Event_Count'] = df['Page_Event_List'].progress_map(count_nav_events)
def count_search_from_content(page_list):
search_from_content = 0
for i, page in enumerate(page_list):
if i > 0:
if '/search?q=' in page:
if page_list[i-1] in thing_page_paths:
search_from_content += 1
return search_from_content
df['Content_Search_Event_Count'] = df['Page_List'].progress_map(count_search_from_content)
# count of nav or search clicks
df['Content_Nav_or_Search_Count'] = df['Content_Page_Nav_Event_Count'] + df['Content_Search_Event_Count']
# occurrences is accounted for by the group by bit in our bayesian boot analysis function
df['Content_Nav_Search_Event_Sum_row_total'] = df['Content_Nav_or_Search_Count'] * df['Occurrences']
# required for journeys with no nav later
df['Has_No_Nav_Or_Search'] = df['Content_Nav_Search_Event_Sum_row_total'] == 0 | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Temporary df file in case of crash Save | df.to_csv(os.path.join(
DATA_DIR,
"ab3_loved_temp.csv.gz"), sep="\t", compression="gzip", index=False)
df = pd.read_csv(os.path.join(
DATA_DIR,
"ab3_loved_temp.csv.gz"), sep="\t", compression="gzip") | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Frequentist statistics Statistical significance | # help(proportions_ztest)
has_rel = analysis.z_prop(df, 'Has_Related', VARIANT_DICT)
has_rel
has_rel['p-value'] < alpha | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Practical significance - uplift | # Due to multiple testing we used the Bonferroni correction for alpha
ci_low,ci_upp = analysis.zconf_interval_two_samples(has_rel['x_a'], has_rel['n_a'],
has_rel['x_b'], has_rel['n_b'], alpha = alpha)
print(' difference in proportions = {0:.2f}%'.format(100*(has_rel['p_b']-has_rel['p_a'])))
print(' % relative change in proportions = {0:.2f}%'.format(100*((has_rel['p_b']-has_rel['p_a'])/has_rel['p_a'])))
print(' 95% Confidence Interval = ( {0:.2f}% , {1:.2f}% )'
.format(100*ci_low, 100*ci_upp)) | difference in proportions = 1.53%
% relative change in proportions = 44.16%
95% Confidence Interval = ( 1.46% , 1.61% )
| MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Bayesian statistics Based on [this](https://medium.com/@thibalbo/coding-bayesian-ab-tests-in-python-e89356b3f4bd) blog To be developed, a Bayesian approach can provide a simpler interpretation. Bayesian bootstrap | analysis.compare_total_searches(df, VARIANT_DICT)
fig, ax = plt.subplots()
plot_df_B = df[df.ABVariant == VARIANT_DICT['INTERVENTION_GROUP']].groupby(
'Content_Nav_or_Search_Count').sum().iloc[:, 0]
plot_df_A = df[df.ABVariant == VARIANT_DICT['CONTROL_GROUP']].groupby(
'Content_Nav_or_Search_Count').sum().iloc[:, 0]
ax.set_yscale('log')
width =0.4
ax = plot_df_B.plot.bar(label='B', position=1, width=width)
ax = plot_df_A.plot.bar(label='A', color='salmon', position=0, width=width)
plt.title("loved journeys")
plt.ylabel("Log(number of journeys)")
plt.xlabel("Number of uses of search/nav elements in journey")
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.savefig('nav_counts_loved_bar.png', dpi = 900, bbox_inches = 'tight')
a_bootstrap, b_bootstrap = analysis.bayesian_bootstrap_analysis(df, col_name='Content_Nav_or_Search_Count', boot_reps=boot_reps, seed = seed, variant_dict=VARIANT_DICT)
np.array(a_bootstrap).mean()
np.array(a_bootstrap).mean() - (0.05 * np.array(a_bootstrap).mean())
np.array(b_bootstrap).mean()
print("A relative change of {0:.2f}% from control to intervention".format((np.array(b_bootstrap).mean()-np.array(a_bootstrap).mean())/np.array(a_bootstrap).mean()*100))
# ratio is vestigial but we keep it here for convenience
# it's actually a count but considers occurrences
ratio_stats = analysis.bb_hdi(a_bootstrap, b_bootstrap, alpha=alpha)
ratio_stats
ax = sns.distplot(b_bootstrap, label='B')
ax.errorbar(x=[ratio_stats['b_ci_low'], ratio_stats['b_ci_hi']], y=[2, 2], linewidth=5, c='teal', marker='o',
label='95% HDI B')
ax = sns.distplot(a_bootstrap, label='A', ax=ax, color='salmon')
ax.errorbar(x=[ratio_stats['a_ci_low'], ratio_stats['a_ci_hi']], y=[5, 5], linewidth=5, c='salmon', marker='o',
label='95% HDI A')
ax.set(xlabel='mean search/nav count per journey', ylabel='Density')
sns.despine()
legend = plt.legend(frameon=True, bbox_to_anchor=(0.75, 1), loc='best')
frame = legend.get_frame()
frame.set_facecolor('white')
plt.title("loved journeys")
plt.savefig('nav_counts_loved.png', dpi = 900, bbox_inches = 'tight')
# calculate the posterior for the difference between A's and B's ratio
# ypa prefix is vestigial from blog post
ypa_diff = np.array(b_bootstrap) - np.array(a_bootstrap)
# get the hdi
ypa_diff_ci_low, ypa_diff_ci_hi = bb.highest_density_interval(ypa_diff)
# the mean of the posterior
print('mean:', ypa_diff.mean())
print('low ci:', ypa_diff_ci_low, '\nhigh ci:', ypa_diff_ci_hi)
ax = sns.distplot(ypa_diff)
ax.plot([ypa_diff_ci_low, ypa_diff_ci_hi], [0, 0], linewidth=10, c='k', marker='o',
label='95% HDI')
ax.set(xlabel='Content_Nav_or_Search_Count', ylabel='Density',
title='The difference between B\'s and A\'s mean counts times occurrences')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show();
# We count the number of values greater than 0 and divide by the total number
# of observations
# which returns us the the proportion of values in the distribution that are
# greater than 0, could act a bit like a p-value
(ypa_diff > 0).sum() / ypa_diff.shape[0]
# We count the number of values less than 0 and divide by the total number
# of observations
# which returns us the the proportion of values in the distribution that are
# less than 0, could act a bit like a p-value
(ypa_diff < 0).sum() / ypa_diff.shape[0]
(ypa_diff>0).sum()
(ypa_diff<0).sum() | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
proportion of journeys with a page sequence including content and related links onlyThere is no statistically significant difference in the proportion of journeys with a page sequence including content and related links only (including loops) between page variant A and page variant B \begin{equation*}\frac{\text{total number of journeys that only contain content pages and related links (i.e. no nav pages)}}{\text{total number of journeys}}\end{equation*} Overall | # if (Content_Nav_Search_Event_Sum == 0) that's our success
# Has_No_Nav_Or_Search == 1 is a success
# the problem is symmetrical so doesn't matter too much
sum(df.Has_No_Nav_Or_Search * df.Occurrences) / df.Occurrences.sum()
sns.distplot(df.Content_Nav_or_Search_Count.values); | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Frequentist statistics Statistical significance | nav = analysis.z_prop(df, 'Has_No_Nav_Or_Search', VARIANT_DICT)
nav | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Practical significance - uplift | # Due to multiple testing we used the Bonferroni correction for alpha
ci_low,ci_upp = analysis.zconf_interval_two_samples(nav['x_a'], nav['n_a'],
nav['x_b'], nav['n_b'], alpha = alpha)
diff = 100*(nav['x_b']/nav['n_b']-nav['x_a']/nav['n_a'])
print(' difference in proportions = {0:.2f}%'.format(diff))
print(' 95% Confidence Interval = ( {0:.2f}% , {1:.2f}% )'
.format(100*ci_low, 100*ci_upp))
print("There was a {0: .2f}% relative change in the proportion of journeys not using search/nav elements".format(100 * ((nav['p_b']-nav['p_a'])/nav['p_a']))) | There was a 0.18% relative change in the proportion of journeys not using search/nav elements
| MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Average Journey Length (number of page views)There is no statistically significant difference in the average page list length of journeys (including loops) between page variant A and page variant B. | length_B = df[df.ABVariant == VARIANT_DICT['INTERVENTION_GROUP']].groupby(
'Page_List_Length').sum().iloc[:, 0]
lengthB_2 = length_B.reindex(np.arange(1, 501, 1), fill_value=0)
length_A = df[df.ABVariant == VARIANT_DICT['CONTROL_GROUP']].groupby(
'Page_List_Length').sum().iloc[:, 0]
lengthA_2 = length_A.reindex(np.arange(1, 501, 1), fill_value=0)
fig, ax = plt.subplots(figsize=(100, 30))
ax.set_yscale('log')
width = 0.4
ax = lengthB_2.plot.bar(label='B', position=1, width=width)
ax = lengthA_2.plot.bar(label='A', color='salmon', position=0, width=width)
plt.xlabel('length', fontsize=1)
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show(); | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Bayesian bootstrap for non-parametric hypotheses | # http://savvastjortjoglou.com/nfl-bayesian-bootstrap.html
# let's use mean journey length (could probably model parametrically but we use it for demonstration here)
# some journeys have length 500 and should probably be removed as they are liekely bots or other weirdness
#exclude journeys of longer than 500 as these could be automated traffic
df_short = df[df['Page_List_Length'] < 500]
print("The mean number of pages in an loved journey is {0:.3f}".format(sum(df.Page_List_Length*df.Occurrences)/df.Occurrences.sum()))
# for reproducibility, set the seed within this context
a_bootstrap, b_bootstrap = analysis.bayesian_bootstrap_analysis(df, col_name='Page_List_Length', boot_reps=boot_reps, seed = seed, variant_dict=VARIANT_DICT)
a_bootstrap_short, b_bootstrap_short = analysis.bayesian_bootstrap_analysis(df_short, col_name='Page_List_Length', boot_reps=boot_reps, seed = seed, variant_dict=VARIANT_DICT)
np.array(a_bootstrap).mean()
np.array(b_bootstrap).mean()
print("There's a relative change in page length of {0:.2f}% from A to B".format((np.array(b_bootstrap).mean()-np.array(a_bootstrap).mean())/np.array(a_bootstrap).mean()*100))
print(np.array(a_bootstrap_short).mean())
print(np.array(b_bootstrap_short).mean())
# Calculate a 95% HDI
a_ci_low, a_ci_hi = bb.highest_density_interval(a_bootstrap)
print('low ci:', a_ci_low, '\nhigh ci:', a_ci_hi)
ax = sns.distplot(a_bootstrap, color='salmon')
ax.plot([a_ci_low, a_ci_hi], [0, 0], linewidth=10, c='k', marker='o',
label='95% HDI')
ax.set(xlabel='Journey Length', ylabel='Density', title='Page Variant A Mean Journey Length')
sns.despine()
plt.legend();
# Calculate a 95% HDI
b_ci_low, b_ci_hi = bb.highest_density_interval(b_bootstrap)
print('low ci:', b_ci_low, '\nhigh ci:', b_ci_hi)
ax = sns.distplot(b_bootstrap)
ax.plot([b_ci_low, b_ci_hi], [0, 0], linewidth=10, c='k', marker='o',
label='95% HDI')
ax.set(xlabel='Journey Length', ylabel='Density', title='Page Variant B Mean Journey Length')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show();
ax = sns.distplot(b_bootstrap, label='B')
ax = sns.distplot(a_bootstrap, label='A', ax=ax, color='salmon')
ax.set(xlabel='Journey Length', ylabel='Density')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.title("loved journeys")
plt.savefig('journey_length_loved.png', dpi = 900, bbox_inches = 'tight')
ax = sns.distplot(b_bootstrap_short, label='B')
ax = sns.distplot(a_bootstrap_short, label='A', ax=ax, color='salmon')
ax.set(xlabel='Journey Length', ylabel='Density')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show(); | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
We can also measure the uncertainty in the difference between the Page Variants's Journey Length by subtracting their posteriors. | # calculate the posterior for the difference between A's and B's YPA
ypa_diff = np.array(b_bootstrap) - np.array(a_bootstrap)
# get the hdi
ypa_diff_ci_low, ypa_diff_ci_hi = bb.highest_density_interval(ypa_diff)
# the mean of the posterior
ypa_diff.mean()
print('low ci:', ypa_diff_ci_low, '\nhigh ci:', ypa_diff_ci_hi)
ax = sns.distplot(ypa_diff)
ax.plot([ypa_diff_ci_low, ypa_diff_ci_hi], [0, 0], linewidth=10, c='k', marker='o',
label='95% HDI')
ax.set(xlabel='Journey Length', ylabel='Density',
title='The difference between B\'s and A\'s mean Journey Length')
sns.despine()
legend = plt.legend(frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
plt.show(); | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
We can actually calculate the probability that B's mean Journey Length was greater than A's mean Journey Length by measuring the proportion of values greater than 0 in the above distribution. | # We count the number of values greater than 0 and divide by the total number
# of observations
# which returns us the the proportion of values in the distribution that are
# greater than 0, could act a bit like a p-value
(ypa_diff > 0).sum() / ypa_diff.shape[0]
# We count the number of values greater than 0 and divide by the total number
# of observations
# which returns us the the proportion of values in the distribution that are
# greater than 0, could act a bit like a p-value
(ypa_diff < 0).sum() / ypa_diff.shape[0] | _____no_output_____ | MIT | notebooks/analyses_reports/2019-03-15_to_03-19_ab3_node2vec_i_loved.ipynb | alphagov/govuk_ab_analysis |
Design Your Own Neural Net | import numpy as np
import matplotlib.pyplot as plt
logistic = lambda u: 1/(1+np.exp(-u))
def get_challenge1():
np.random.seed(0)
X = np.random.randn(100, 2)
d = np.sqrt(np.sum(X**2, axis=1))
y = np.array(d < 1, dtype=float)
return X, y
def get_challenge2():
X, y = get_challenge1()
X = np.concatenate((X+np.array([[-2, 0]]), X+np.array([[2, 0]])), axis=0)
y = np.concatenate((y, y))
return X, y
X, y = get_challenge1()
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
plt.axis("equal")
def plot_net(X, y, mynet, res=100):
rg = [np.min(X[:, 0]), np.max(X[:, 0])]
dr = rg[1] - rg[0]
pixx = np.linspace(rg[0], rg[1], res)
rg = [np.min(X[:, 1]), np.max(X[:, 1])]
dr = rg[1]- rg[0]
pixy = np.linspace(rg[0], rg[1], res)
xx, yy = np.meshgrid(pixx, pixy)
I = mynet(xx, yy)
plt.figure(figsize=(12, 6))
plt.subplot(121)
plt.imshow(I, cmap='gray', extent=(pixx[0], pixx[-1], pixy[-1], pixy[0]))
plt.colorbar()
plt.scatter(X[y == 0, 0], X[y == 0, 1], c='C0')
plt.scatter(X[y == 1, 0], X[y == 1, 1], c='C1')
plt.gca().invert_yaxis()
plt.subplot(122)
plt.imshow(I > 0.5, cmap='gray', extent=(pixx[0], pixx[-1], pixy[-1], pixy[0]))
plt.colorbar()
pred = mynet(X[:, 0], X[:, 1]) > 0.5
plt.scatter(X[(y == 0)*(pred == 0), 0], X[(y == 0)*(pred == 0), 1], c='C0')
plt.scatter(X[(y == 0)*(pred == 1), 0], X[(y == 0)*(pred == 1), 1], c='C0', marker='x')
plt.scatter(X[(y == 1)*(pred == 0), 0], X[(y == 1)*(pred == 0), 1], c='C1', marker='x')
plt.scatter(X[(y == 1)*(pred == 1), 0], X[(y == 1)*(pred == 1), 1], c='C1')
plt.gca().invert_yaxis()
num_correct = np.sum((y==0)*(pred==0)) + np.sum((y==1)*(pred==1))
perc = 100*num_correct/X.shape[0]
plt.title("{} Correct ({}%)".format(num_correct, perc))
def fn1(x, y):
return logistic(-2*x+2)
def fn2(x, y):
return logistic(2*x+2)
def myfn(x, y):
return logistic(fn1(x, y) + fn2(x, y) - 1.5)
plt.figure()
plot_net(X, y, fn1)
plt.figure()
plot_net(X, y, fn2)
plt.figure()
plot_net(X, y, myfn) | _____no_output_____ | Apache-2.0 | DesignYourNeuralNet.ipynb | Ursinus-CS477-F2021/Week11_Convexity_NNIntro |
Gaussian Process Regression Gaussian Process* Random process where any point $\large π₯β\mathbb{R}^π$ is assigned random variable $\large \mathbb{f}(π₯)$* Joint distribution of such finite number of variables is given by:$$\large π(\mathbb{f}βπ)=π©(\mathbb{f}|π,πΎ)$$ where$$ \mathbb{f} = (\mathbb{f}(π₯_1 ), β¦, \mathbb{f}(π₯_π )) $$$$ \mu = (π(π₯_1 ),β¦, π(π₯_π )) $$$$ πΎ_{ππ} = \kappa(π₯_π, π₯_π) $$ where $\kappa$ is a PSD kernel function Gaussian Process Regression* Joint distribution of observed values $\large \mathbb{f} $ and predictions $\large \mathbb{f}_β $ is Gaussian with$$\begin{pmatrix} \large \mathbb{f} \\ \large \mathbb{f}_* \end{pmatrix} \sim N\Bigg( \large 0, \begin{pmatrix} K & K_* \\ K_*^T & K_{**} \end{pmatrix} \Bigg)$$where $πΎ = \kappa(π, π)$, $πΎ_β = \kappa(π, π_β)$ and $πΎ_{β*}=\kappa(π_β, π_β)$* Posterior/predictive distribution for $\large π¦=f+\epsilon$ with $\large \epsilon \sim N(0, \sigma_π¦^2 \mathbb{I})$ is given by$$ \large π(\mathbb{π}_ββπ_β, π, π¦) = N(\mu_β, \Sigma_β )$$where $$\large \mu_β=πΎ_β(πΎ+\sigma_π^2 πΌ)^{β1} π¦$$$$\large \Sigma_β=πΎ_{ββ}β(πΎ_β (πΎ+\sigma_π^2 \mathbb{I})^{β1} πΎ_β^π$$* Regression line is the mean of the posterior distribution $\large\mu_*$* Diagonal entries of the covariance matrix $\large \Sigma_*$ can be used for confidence intervals surrounding the regression line Gaussian Process Regression DashboardThe dashboard below helps us better understand GP regression* Ground truth (or the function GPR is trying to learn) is shown as a white dotted line* The regression line in magenta is the zero line (mean of the prior distribution) to start with* `Display 5 Priors?` checkbox shows/hides 5 realizations from prior distribution* Training samples can be added by clicking anywhere on the figure or can be updated by dragging the existing points* `Display 5 Posteriors?` checkbox shows/hides 5 realizations from the posterior distribution* `Display Std Bands?` checkbox shows/hides 2 std bands from the posterior mean (aka regression line)* $\sigma_{noise}$ slider controls noise around the training samples* Add a few points close to the white line at different places to see the regression line and the confidence intervals update in real time!* Impact of RBF kernel hyper-params ($\sigma$ and $l$) can be seen by updating their values below the figure | import inspect
import numpy as np
import ipywidgets as w
import bqplot.pyplot as plt
import bqplot as bq
# kernels
def rbf(x1, x2, sigma=1., l=1.):
z = (x1 - x2[:, np.newaxis]) / l
return sigma**2 * np.exp(-.5 * z ** 2)
def gp_regression(X_train, y_train, X_test,
kernel=rbf,
sigma_noise=.1,
kernel_params=dict(sigma=1., l=1.)):
# compute the kernel matrices for train, train_test, test combinations
K = kernel(X_train, X_train, **kernel_params)
K_s = kernel(X_train, X_test, **kernel_params)
K_ss = kernel(X_test, X_test, **kernel_params)
n, p = len(X_train), len(X_test)
# compute the posterior mean and cov
mu_s = np.dot(K_s, np.linalg.solve(K + sigma_noise**2 * np.eye(n), y_train))
cov_s = K_ss - np.dot(K_s, np.linalg.solve(K + sigma_noise**2 * np.eye(n), K_s.T))
# prior and posterior moments
mu_prior, cov_prior = np.zeros(p), K_ss
mu_post, cov_post = mu_s, cov_s + sigma_noise**2
return dict(prior=(mu_prior, cov_prior),
posterior=(mu_post, cov_post))
xmin, xmax = -1, 2
kernel = rbf
params = dict(sigma=1., l=1.)
X_test = np.arange(xmin, xmax, .05)
p = len(X_test)
K_ss = kernel(X_test, X_test, **params)
mu_prior, cov_prior = np.zeros(p), K_ss
N = 5
f_priors = np.random.multivariate_normal(mu_prior, cov_prior, N)
# kernel controls
kernel_label = w.HTML(description='RBF Kernel')
equation_label = w.Label("$\kappa(x_1, x_2) = \sigma^2 exp(-\\frac{(x_1 - x_2)^2}{2l^2})$")
sigma_slider = w.FloatText(description="$\sigma$", min=0, value=1, step=1)
l_slider = w.FloatText(description="$l$", min=0, value=1, step=1)
kernel_controls = w.HBox([kernel_label, equation_label, sigma_slider, l_slider])
fig_margin=dict(top=60, bottom=40, left=50, right=0)
fig = plt.figure(title='Gaussian Process Regression',
layout=w.Layout(width='1200px', height='700px'),
animation_duration=750,
fig_margin=fig_margin)
plt.scales(scales={'x': bq.LinearScale(min=xmin, max=xmax),
'y': bq.LinearScale(min=-2, max=2)})
# ground truth line
y = -np.sin(3 * X_test) - X_test ** 2 + .3 * X_test + .5
f_line = plt.plot(X_test, y, colors=['white'], line_style='dash_dotted')
std_bands = plt.plot(X_test, [],
fill='between',
fill_colors=['yellow'],
apply_clip=False,
fill_opacities=[.2], stroke_width=0)
train_scat = plt.scatter([], [], colors=['magenta'],
enable_move=True,
interactions={'click': 'add'},
marker_size=1, marker='square')
prior_lines = plt.plot(X_test, f_priors, stroke_width=1,
colors=['#ccc'], apply_clip=False)
posterior_lines = plt.plot(X_test, [], stroke_width=1, apply_clip=False)
mean_line = plt.plot(X_test, [], 'm')
plt.xlabel('X')
plt.ylabel('Y')
# reset btn
reset_button = w.Button(description='Reset Points', button_style='success')
reset_button.layout.margin = '20px 0px 0px 70px'
data_noise_slider = w.FloatSlider(description='$\sigma_{noise}$', value=0, step=.01, max=1)
# controls for the plot
f_priors_cb = w.Checkbox(description='Display 5 Priors?')
f_posteriors_cb = w.Checkbox(description='Display 5 Posteriors?')
std_bands_cb = w.Checkbox(description='Display Std Bands?')
check_boxes = [f_priors_cb, f_posteriors_cb, std_bands_cb]
label = w.Label('*Click on the figure to add training samples')
controls = w.VBox(check_boxes + [reset_button, label, data_noise_slider])
# link widgets
_ = w.jslink((f_priors_cb, 'value'), (prior_lines, 'visible'))
_ = w.jslink((f_posteriors_cb, 'value'), (posterior_lines, 'visible'))
_ = w.jslink((std_bands_cb, 'value'), (std_bands, 'visible'))
def update_plot(change):
X_train = train_scat.x
y_train = train_scat.y
gp_res = gp_regression(X_train, y_train, X_test,
sigma_noise=data_noise_slider.value,
kernel=rbf,
kernel_params=dict(sigma=sigma_slider.value, l=l_slider.value))
mu_post, cov_post = gp_res['posterior']
# simulate N samples from the posterior distribution
posterior_lines.y = np.random.multivariate_normal(mu_post, cov_post, N)
sig_post = np.sqrt(np.diag(cov_post))
# update the regression line to the mean of the posterior distribution
mean_line.y = mu_post
# update the std bands to +/- 2 sigmas from the posterior mean
std_bands.y = [mu_post - 2 * sig_post, mu_post + 2 * sig_post]
train_scat.observe(update_plot, names=['x', 'y'])
# redraw plot whenever controls are updated
for widget in [sigma_slider, l_slider, data_noise_slider]:
widget.observe(update_plot)
def reset_points(*args):
with train_scat.hold_trait_notifications():
train_scat.x = []
train_scat.y = []
reset_button.on_click(lambda btn: reset_points())
fig.on_displayed(update_plot)
w.HBox([w.VBox([fig, kernel_controls]), controls]) | _____no_output_____ | MIT | ml/visualizations/Gaussian Process Regression.ipynb | kingreatwill/penter |
Tranformation helper Functions Getter function for axes and origin of a sensors coordinate system`Note: view is a field extracted from the config of sensors.`For example, `view = config['cameras']['front_left']['view']` | def get_axes_of_a_view(view):
"""
Extract the normalized axes of a sensor in the vehicle coordinate system
view: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
"""
x_axis = view['x-axis']
y_axis = view['y-axis']
x_axis_norm = la.norm(x_axis)
y_axis_norm = la.norm(y_axis)
if (x_axis_norm < EPSILON or y_axis_norm < EPSILON):
raise ValueError("Norm of input vector(s) too small.")
# normalize the axes
x_axis = x_axis / x_axis_norm
y_axis = y_axis / y_axis_norm
# make a new y-axis which lies in the original x-y plane, but is orthogonal to x-axis
y_axis = y_axis - x_axis * np.dot(y_axis, x_axis)
# create orthogonal z-axis
z_axis = np.cross(x_axis, y_axis)
# calculate and check y-axis and z-axis norms
y_axis_norm = la.norm(y_axis)
z_axis_norm = la.norm(z_axis)
if (y_axis_norm < EPSILON) or (z_axis_norm < EPSILON):
raise ValueError("Norm of view axis vector(s) too small.")
# make x/y/z-axes orthonormal
y_axis = y_axis / y_axis_norm
z_axis = z_axis / z_axis_norm
return x_axis, y_axis, z_axis
def get_origin_of_a_view(view):
"""
Extract the origin of a sensor configuration in the vehicle coordinate system
view: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
"""
return view['origin'] | _____no_output_____ | MIT | data_processing/lidar_data_processing.ipynb | abhitoronto/KITTI_ROAD_SEGMENTATION |
Getter functions for Coordinate tranformation matrix: $$\begin{bmatrix} R & T \\ 0 & 1\end{bmatrix}$$ | def get_transform_to_global(view):
"""
Get the Tranformation matrix to convert sensor coordinates to global coordinates
from the view object of a sensor
view: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
"""
# get axes
x_axis, y_axis, z_axis = get_axes_of_a_view(view)
# get origin
origin = get_origin_of_a_view(view)
transform_to_global = np.eye(4)
# rotation
transform_to_global[0:3, 0] = x_axis
transform_to_global[0:3, 1] = y_axis
transform_to_global[0:3, 2] = z_axis
# origin
transform_to_global[0:3, 3] = origin
return transform_to_global
def get_transform_from_global(view):
"""
Get the Tranformation matrix to convert global coordinates to sensor coordinates
from the view object of a sensor
view: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
"""
# get transform to global
transform_to_global = get_transform_to_global(view)
trans = np.eye(4)
rot = np.transpose(transform_to_global[0:3, 0:3])
trans[0:3, 0:3] = rot
trans[0:3, 3] = np.dot(rot, -transform_to_global[0:3, 3])
return trans
def transform_from_to(src, target):
"""
Get the Tranformation matrix to convert from source sensor view to target sensor view
src: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
target: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
"""
transform = np.dot(get_transform_from_global(target), \
get_transform_to_global(src))
return transform | _____no_output_____ | MIT | data_processing/lidar_data_processing.ipynb | abhitoronto/KITTI_ROAD_SEGMENTATION |
Getter Functions for Rotation Matrix $$R_{3x3}$$ | def get_rot_from_global(view):
"""
Get the only the Rotation matrix to rotate sensor coordinates to global coordinates
from the view object of a sensor
view: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
"""
# get transform to global
transform_to_global = get_transform_to_global(view)
# get rotation
rot = np.transpose(transform_to_global[0:3, 0:3])
return rot
def get_rot_to_global(view):
"""
Get only the Rotation matrix to rotate global coordinates to sensor coordinates
from the view object of a sensor
view: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
"""
# get transform to global
transform_to_global = get_transform_to_global(view)
# get rotation
rot = transform_to_global[0:3, 0:3]
return rot
def rot_from_to(src, target):
"""
Get only the rotation matrix to rotate from source sensor view to target sensor view
src: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
target: 'view object'
is a dictionary of the x-axis, y-axis and origin of a sensor
"""
rot = np.dot(get_rot_from_global(target), get_rot_to_global(src))
return rot | _____no_output_____ | MIT | data_processing/lidar_data_processing.ipynb | abhitoronto/KITTI_ROAD_SEGMENTATION |
Helper Functions for (image/Lidar/label) file names | def extract_sensor_file_name(file_name, root_path, sensor_name, ext):
file_name_split = file_name.split('/')
seq_name = file_name_split[-4]
data_viewpoint = file_name_split[-2]
file_name_sensor = file_name_split[-1].split('.')[0]
file_name_sensor = file_name_sensor.split('_')
file_name_sensor = file_name_sensor[0] + '_' + \
sensor_name + '_' + \
file_name_sensor[2] + '_' + \
file_name_sensor[3] + '.' + ext
file_path_sensor = join(root_path, seq_name, sensor_name, data_viewpoint, file_name_sensor)
return file_path_sensor
def extract_image_file_name_from_any_file_name(file_name, root_path):
return extract_sensor_file_name(file_name, root_path, 'camera', 'png')
def extract_semantic_file_name_from_any_file_name(file_name, root_path):
return extract_sensor_file_name(file_name, root_path, 'label', 'png')
def get_prev_directory(file_name):
file_name_split = file_name.split('/')
it = -1
if not file_name_split[it]:
it = it - 1
return file_name.replace(file_name_split[it], '')
def create_unique_dir(dir_name):
if dir_name[-1] == '/':
try:
os.mkdir(dir_name)
if DEBUG:
print(f'New directory created: {dir_name}')
except FileExistsError :
if DEBUG:
print(f'{dir_name} Already Exists. Directory creation skipped')
else:
if DEBUG:
print(f'ERROR: {dir_name} is not a Valid Directory')
def get_cam_name_from_file_name(file_name):
file_name_array = file_name.split('/')
view_point = file_name_array[-2]
view_point_array = view_point.split('_')
cam_name = view_point_array[-2] + '_' + view_point_array[-1]
return cam_name | _____no_output_____ | MIT | data_processing/lidar_data_processing.ipynb | abhitoronto/KITTI_ROAD_SEGMENTATION |
Helper Functions for Images | def get_cv2_image(file_name_image, color_transform):
# Create Image object and correct image color
image = cv2.imread(file_name_image)
image = cv2.cvtColor(image, color_transform)
return image
def get_undistorted_cv2_image(file_name_image, config, color_transform):
# Create Image object and correct image color
image = get_cv2_image(file_name_image, color_transform)
# Extract cam_name
cam_name = get_cam_name_from_file_name(file_name_image)
if cam_name in ['front_left', 'front_center', \
'front_right', 'side_left', \
'side_right', 'rear_center']:
# get parameters from config file
intr_mat_undist = \
np.asarray(config['cameras'][cam_name]['CamMatrix'])
intr_mat_dist = \
np.asarray(config['cameras'][cam_name]['CamMatrixOriginal'])
dist_parms = \
np.asarray(config['cameras'][cam_name]['Distortion'])
lens = config['cameras'][cam_name]['Lens']
if (lens == 'Fisheye'):
return cv2.fisheye.undistortImage(image, intr_mat_dist,\
D=dist_parms, Knew=intr_mat_undist)
elif (lens == 'Telecam'):
return cv2.undistort(image, intr_mat_dist, \
distCoeffs=dist_parms, newCameraMatrix=intr_mat_undist)
else:
return image
else:
print("Invalid camera name. Returning original image")
return image
def hsv_to_rgb(h, s, v):
"""
Colour format conversion from Hue Saturation Value to RGB.
"""
if s == 0.0:
return v, v, v
i = int(h * 6.0)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
def normalize_vector(vector, lb, ub):
minimum = np.min(vector)
maximum = np.max(vector)
return lb + (ub - lb)*(vector - minimum)/(maximum-minimum) | _____no_output_____ | MIT | data_processing/lidar_data_processing.ipynb | abhitoronto/KITTI_ROAD_SEGMENTATION |
LIDAR Helper Function Using LIDAR data- LiDAR data is provided in a camera reference frame.- `np.load(file_name_lidar)` loads the LIDAR points dictionary- LIDAR info - azimuth: - row: y axis image location of the lidar point - lidar_id: id of the LIDAR that the point belongs to - depth: Point Depth - reflectance: - col: x axis image location of the lidar point - points: - timestamp: - distance: LIDAR dictionary loading Example: ```root_path = './camera_lidar_semantic/' get the list of files in lidar directoryfile_names = sorted(glob.glob(join(root_path, '*/lidar/*/*.npz'))) read the lidar datalidar_front_center = np.load(file_names[0])``` | def get_lidar_on_image(file_name_lidar, config, root_path, pixel_size=3, pixel_opacity=1):
file_name_image = extract_image_file_name_from_any_file_name(file_name_lidar, root_path)
image = get_undistorted_cv2_image(file_name_image, config, cv2.COLOR_BGR2RGB)
lidar = np.load(file_name_lidar)
# get rows and cols
rows = (lidar['row'] + 0.5).astype(np.int)
cols = (lidar['col'] + 0.5).astype(np.int)
# lowest distance values to be accounted for in colour code
MIN_DISTANCE = np.min(lidar['distance'])
# largest distance values to be accounted for in colour code
MAX_DISTANCE = np.max(lidar['distance'])
# get distances
distances = lidar['distance']
# determine point colours from distance
colours = (distances - MIN_DISTANCE) / (MAX_DISTANCE - MIN_DISTANCE)
colours = np.asarray([np.asarray(hsv_to_rgb(0.75 * c, \
np.sqrt(pixel_opacity), 1.0)) for c in colours])
pixel_rowoffs = np.indices([pixel_size, pixel_size])[0] - pixel_size // 2
pixel_coloffs = np.indices([pixel_size, pixel_size])[1] - pixel_size // 2
canvas_rows = image.shape[0]
canvas_cols = image.shape[1]
for i in range(len(rows)):
pixel_rows = np.clip(rows[i] + pixel_rowoffs, 0, canvas_rows - 1)
pixel_cols = np.clip(cols[i] + pixel_coloffs, 0, canvas_cols - 1)
image[pixel_rows, pixel_cols, :] = \
(1. - pixel_opacity) * \
np.multiply(image[pixel_rows, pixel_cols, :], \
colours[i]) + pixel_opacity * 255 * colours[i]
return image.astype(np.uint8), lidar | _____no_output_____ | MIT | data_processing/lidar_data_processing.ipynb | abhitoronto/KITTI_ROAD_SEGMENTATION |
MAIN | # # Pick a random LIDAR file from the custom data set
# np.random.seed()
# idx = np.random.randint(0, len(custom_lidar_files)-1)
# file_name_lidar = custom_lidar_files[idx]
# # Visualize LIDAR on image
# lidar_on_image, lidar = get_lidar_on_image(file_name_lidar, config, root_path)
# pt.fig = pt.figure(figsize=(15, 15))
# pt.title('number of points are '+ str(len(lidar['row'])) )
# pt.imshow(lidar_on_image)
# pt.axis('off')
# # Visualize Semantic Image
# label_image = get_undistorted_cv2_image(extract_semantic_file_name_from_any_file_name(file_name_lidar, root_path) ,\
# config, cv2.COLOR_BGR2RGB)
# pt.fig = pt.figure(figsize=(15, 15))
# pt.imshow(label_image)
# pt.axis('off') | _____no_output_____ | MIT | data_processing/lidar_data_processing.ipynb | abhitoronto/KITTI_ROAD_SEGMENTATION |
LIDAR data loading | # Open Config File
with open ('cams_lidars.json', 'r') as f:
config = json.load(f)
# pprint.pprint(config)
# Create Root Path
root_path = '/hdd/a2d2-data/camera_lidar_semantic/'
# Count Number of LIDAR points in each file
def get_num_lidar_pts_list(file_names_lidar):
num_lidar_points = []
start = time.time()
for file_lidar in file_names_lidar:
n_points = len(np.load(file_lidar)['points'])
num_lidar_points.append(n_points)
end = time.time() - start
return num_lidar_points
# Create a histogram
def create_hist_pts(points, xlabel='number of points', ylabel='freq', title='Histogram of points'):
fig = pt.hist(points, 1000)
pt.xlabel(xlabel)
pt.ylabel(ylabel)
pt.title(title)
pt.show()
return fig
# Save this list in a file
def save_list_to_pfile(list_, file_name='file.pkl'):
with open(file_name, 'wb') as filehandle:
pickle.dump(list_, filehandle)
# Load Lidar data
N = 10000
lidar_file_list = root_path + f'../dataset/lidar_files_{N}.pkl'
if Path(lidar_file_list).is_file():
with open(lidar_file_list, 'rb') as handle:
file_names_lidar = pickle.load(handle)
else:
# Get the list of files in lidar directory
lidar_dirs = '*/lidar/*/*.npz' # ALL LIDAR
# lidar_dirs = '20180925_124435/lidar/*/*.npz' # 1 - Front and Sides
# lidar_dirs = '*/lidar/cam_front_center/*.npz' # ALL front center
file_names_lidar = sorted(glob.glob(join(root_path, lidar_dirs)))
# Extract Lidar files with minimum N points
num_lidar_points_list = get_num_lidar_pts_list(file_names_lidar)
# Create Histogram
create_hist_pts(num_lidar_points_list, title='Histogram of Lidar data-points')
file_names_lidar = [file_names_lidar[_] for _ in range(len(num_lidar_points_list))\
if num_lidar_points_list[_] >= N ]
print(f'There are {len(file_names_lidar)} files greater than {N} points')
# Save list to file
save_list_to_pfile(file_names_lidar, lidar_file_list) | _____no_output_____ | MIT | data_processing/lidar_data_processing.ipynb | abhitoronto/KITTI_ROAD_SEGMENTATION |
LIDAR DATA PROCESSING | def get_image_files(lidar_file, method_type):
# Create Lidar_x Lidar_y Lidar_z directory
lx_file = extract_sensor_file_name(lidar_file, root_path, f'lidar-x-{method_type}', 'png')
ly_file = extract_sensor_file_name(lidar_file, root_path, f'lidar-y-{method_type}', 'png')
lz_file = extract_sensor_file_name(lidar_file, root_path, f'lidar-z-{method_type}', 'png')
l_color_file = extract_sensor_file_name(lidar_file, root_path, 'lidar-image', 'png')
img_file = extract_image_file_name_from_any_file_name(lidar_file, root_path)
return img_file, lx_file, ly_file, lz_file, l_color_file
# Create Upsampled LIDAR image
# Iterate over Lidar Files
# for lidar_file in file_names_lidar:
def create_dense_lidar_images_upsample(lidar_file, project_lidar=False):
if project_lidar:
lidar_on_image, lidar_data = get_lidar_on_image(lidar_file, config, root_path)
else:
lidar_data = np.load(lidar_file)
## CONSTANTS
NEIGHBOUR_RADIUS = 40 #Pixels
INVERSE_COFF = 0.5
DEPTH_COFF = 0
CUTOFF_THRESH = 0.4
PIXEL_THRESH = 1/(1+INVERSE_COFF*NEIGHBOUR_RADIUS)
lidar_on_image, lidar_data = get_lidar_on_image(lidar_file, config, root_path)
# Create Lidar_x Lidar_y Lidar_z directory
img_file, lx_file, ly_file, lz_file, l_color_file = get_image_files(lidar_file, 'upsample')
# TODO: Check if files already exist
lx_cam_dir = get_prev_directory(lx_file)
ly_cam_dir = get_prev_directory(ly_file)
lz_cam_dir = get_prev_directory(lz_file)
l_color_cam_dir = get_prev_directory(l_color_file)
lx_dir = get_prev_directory(lx_cam_dir)
ly_dir = get_prev_directory(ly_cam_dir)
lz_dir = get_prev_directory(lz_cam_dir)
l_color_dir = get_prev_directory(l_color_cam_dir)
create_unique_dir(lx_dir)
create_unique_dir(ly_dir)
create_unique_dir(lz_dir)
create_unique_dir(l_color_dir)
create_unique_dir(lx_cam_dir)
create_unique_dir(ly_cam_dir)
create_unique_dir(lz_cam_dir)
create_unique_dir(l_color_cam_dir)
# Load Lidar Data and find max distance
rows = (lidar_data['row'] + 0.5).astype(np.int)
cols = (lidar_data['col'] + 0.5).astype(np.int)
rows_float = np.array(lidar_data['row'])
cols_float = np.array(lidar_data['col'])
lidar_points = np.array(lidar_data['points'])
lidar_depth = np.array(lidar_data['distance'])
max_distance = np.max(lidar_depth)
if DEBUG:
print(f'max distance: {max_distance}')
if DEBUG:
print(f'Processing {lx_file}')
# create X,Y,Z images
img_file = extract_image_file_name_from_any_file_name(lidar_file, root_path)
img_x = get_cv2_image(img_file ,cv2.COLOR_BGR2GRAY) # Grayscale image only has one channel
img_dim = np.shape(img_x)
img_x_num = np.zeros(img_dim)
img_y_num = img_x_num.copy()
img_z_num = img_x_num.copy()
img_den = np.zeros(img_dim)
x_or = np.zeros(img_dim)
# Iterate Over LIDAR points
if DEBUG:
print(f'total Lidar Points: {len(rows)}')
for lid_idx in range(len(rows)):
idx_a = np.arange(np.maximum(rows[lid_idx] - NEIGHBOUR_RADIUS, 0),\
np.minimum(rows[lid_idx] + NEIGHBOUR_RADIUS + 1, img_dim[0]))
idx_b = np.arange(np.maximum(cols[lid_idx] - NEIGHBOUR_RADIUS, 0),\
np.minimum(cols[lid_idx] + NEIGHBOUR_RADIUS + 1, img_dim[1]))
dist_row = (rows_float[lid_idx] - idx_a)
dist_col = (cols_float[lid_idx] - idx_b)
if len(idx_a) != len(dist_row) or len(idx_b) != len(dist_col):
print(str(rows_float[lid_idx]) + ", " + str(cols_float[lid_idx]))
print(f'{len(idx_a)}, {len(idx_b)}')
print(f'{len(dist_row)}, {len(dist_col)}')
break
dist_row_mat = np.array([dist_row]).T * np.ones(len(dist_col))
dist_col_mat = np.ones((len(dist_row), 1)) * np.array([dist_col])
temp_mat = ( 1 - DEPTH_COFF*lidar_depth[lid_idx]/max_distance)/\
( 1 + INVERSE_COFF*np.sqrt( np.square(dist_row_mat) + np.square(dist_col_mat)))
# Cap the lowest value of denominator
temp_mat[temp_mat < PIXEL_THRESH ] = 0.0
img_den[np.ix_(idx_a,idx_b)] += temp_mat
# img_den[np.ix_(idx_a,idx_b)] += ( 1 - DEPTH_COFF*lidar_data['distance'][lid_idx]/max_distance)/\
# ( 1 + INVERSE_COFF*np.sqrt( np.square(dist_row_mat) + np.square(dist_col_mat)))
img_x_num[np.ix_(idx_a,idx_b)] += img_den[idx_a][:,idx_b] * lidar_points[lid_idx,0]
img_y_num[np.ix_(idx_a,idx_b)] += img_den[idx_a][:,idx_b] * lidar_points[lid_idx,1]
img_z_num[np.ix_(idx_a,idx_b)] += img_den[idx_a][:,idx_b] * lidar_points[lid_idx,2]
print(f'Creating Image: {lx_file}\n')
# Cap the lowest value of denominator
img_den[img_den < CUTOFF_THRESH] = 0.0
img_x_num = np.divide(img_x_num, img_den, out=np.zeros_like(img_x_num), where=img_den!=0) # Divide by 0 is a 0
img_y_num = np.divide(img_y_num, img_den, out=np.zeros_like(img_y_num), where=img_den!=0) # Divide by 0 is a 0
img_z_num = np.divide(img_z_num, img_den, out=np.zeros_like(img_z_num), where=img_den!=0) # Divide by 0 is a 0
img_x_num = normalize_vector(img_x_num, 0.0, 2**16).astype(np.uint16)
img_y_num = normalize_vector(img_y_num, 0.0, 2**16).astype(np.uint16)
img_z_num = normalize_vector(img_z_num, 0.0, 2**16).astype(np.uint16)
# img_x_num[np.argwhere(img_x_num == 0.0)] = 255.0
cv2.imwrite(lx_file, img_x_num)
cv2.imwrite(ly_file, img_y_num)
cv2.imwrite(lz_file, img_z_num)
if project_lidar:
cv2.imwrite(l_color_file, lidar_on_image)
if DEBUG:
print(f'Saving {lx_file}')
print(f'Saving {ly_file}')
print(f'Saving {lz_file}')
return img_file, lx_file, ly_file, lz_file, l_color_file
# Using Waslander code here
from ip_basic import depth_map_utils
def create_dense_lidar_images_ip_basic(lidar_file, project_lidar=False):
if project_lidar:
lidar_on_image, lidar_data = get_lidar_on_image(lidar_file, config, root_path)
else:
lidar_data = np.load(lidar_file)
# Create Lidar_x Lidar_y Lidar_z directory
img_file, lx_file, ly_file, lz_file, l_color_file = get_image_files(lidar_file, 'ip')
if False and Path(lx_file).is_file() and Path(ly_file).is_file() and Path(lz_file).is_file():
return img_file, lx_file, ly_file, lz_file, l_color_file
# TODO: Check if files already exist
lx_cam_dir = get_prev_directory(lx_file)
ly_cam_dir = get_prev_directory(ly_file)
lz_cam_dir = get_prev_directory(lz_file)
l_color_cam_dir = get_prev_directory(l_color_file)
lx_dir = get_prev_directory(lx_cam_dir)
ly_dir = get_prev_directory(ly_cam_dir)
lz_dir = get_prev_directory(lz_cam_dir)
l_color_dir = get_prev_directory(l_color_cam_dir)
create_unique_dir(lx_dir)
create_unique_dir(ly_dir)
create_unique_dir(lz_dir)
create_unique_dir(l_color_dir)
create_unique_dir(lx_cam_dir)
create_unique_dir(ly_cam_dir)
create_unique_dir(lz_cam_dir)
create_unique_dir(l_color_cam_dir)
# Load Lidar Data and find max distance
rows = (lidar_data['row'] + 0.5).astype(np.int)
cols = (lidar_data['col'] + 0.5).astype(np.int)
lidar_points = np.array(lidar_data['points'])
lidar_points_x = normalize_vector(lidar_points[:,0], 2**8 * 0.1 + 1, 2**16 - 1)
lidar_points_y = normalize_vector(lidar_points[:,1], 2**8 * 0.1 + 1, 2**16 - 1)
lidar_points_z = normalize_vector(lidar_points[:,2], 2**8 * 0.1 + 1, 2**16 - 1)
# create X,Y,Z images
img_x = get_cv2_image(img_file ,cv2.COLOR_BGR2GRAY) # Grayscale image only has one channel
img_dim = np.shape(img_x)
img_x_num = np.zeros(img_dim, dtype=np.uint16)
img_y_num = img_x_num.copy()
img_z_num = img_x_num.copy()
# Iterate Over LIDAR points
if DEBUG:
print(f'total Lidar Points: {len(rows)}')
if DEBUG:
print(f'Processing {lx_file}')
for lid_idx in range(len(rows)):
idx_a = np.clip(rows[lid_idx], 0, img_dim[0]-1)
idx_b = np.clip(cols[lid_idx], 0, img_dim[1]-1)
img_x_num[idx_a,idx_b] = lidar_points_x[lid_idx]
img_y_num[idx_a,idx_b] = lidar_points_y[lid_idx]
img_z_num[idx_a,idx_b] = lidar_points_z[lid_idx]
projected_x = np.float32(img_x_num/256.0)
projected_y = np.float32(img_y_num/256.0)
projected_z = np.float32(img_z_num/256.0)
projected_x = depth_map_utils.fill_in_fast( projected_x, max_depth=2**8 + 1)
projected_y = depth_map_utils.fill_in_fast( projected_y, max_depth=2**8 + 1)
projected_z = depth_map_utils.fill_in_fast( projected_z, max_depth=2**8 + 1)
img_x_num = (projected_x * 256.0).astype(np.uint16)
img_y_num = (projected_y * 256.0).astype(np.uint16)
img_z_num = (projected_z * 256.0).astype(np.uint16)
print(f'Creating Image: {lx_file}\n')
cv2.imwrite(lx_file, img_x_num)
cv2.imwrite(ly_file, img_y_num)
cv2.imwrite(lz_file, img_z_num)
if project_lidar:
cv2.imwrite(l_color_file, lidar_on_image)
return img_file, lx_file, ly_file, lz_file, l_color_file
# from multiprocessing import Pool
from multiprocessing import Pool
NUM_WORKERS = 6
def create_dense_lidar_images(file_names_lidar, image_range = (0, 100), use_mp=True,n_worker = NUM_WORKERS):
process_files = file_names_lidar[image_range[0]:image_range[1]]
ip_files = []
upsample_files = []
if use_mp:
pool1 = Pool(n_worker)
start = time.time()
pool1.map(create_dense_lidar_images_ip_basic, process_files)
compute_time_ip = (time.time() - start)/num_images
start = time.time()
pool1.map(create_dense_lidar_images_upsample, process_files)
compute_time_upsample = (time.time() - start)/num_images
else:
start = time.time()
for lidar_file in process_files:
out_ip = create_dense_lidar_images_ip_basic(lidar_file)
ip_files.append(list(out_ip)[:-1])
compute_time_ip = (time.time() - start)/num_images
start = time.time()
for lidar_file in process_files:
out_upsample = create_dense_lidar_images_upsample(lidar_file)
upsample_files.append(out_upsample)
compute_time_upsample = (time.time() - start)/num_images
print(f'Processing time per image (ip_basic): {compute_time_ip} seconds')
print(f'Processing time per image (upsampling): {compute_time_upsample} seconds')
return ip_files, upsample_files
# LIDAR data Processing
num_images = 20
ip_files = []
upsample_files = []
processed_lidar_files = file_names_lidar[0:num_images]
ip_files, upsample_files = \
create_dense_lidar_images(file_names_lidar, image_range=(0,num_images), use_mp=True) | Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009806.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009489.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000006176.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009789.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000006128.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009786.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009813.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009820.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009861.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009899.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009944.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000009912.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000010313.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000010193.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000012121.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000014481.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000014548.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000014772.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000014962.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-ip/cam_front_center/20180807145028_lidar-x-ip_frontcenter_000014943.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000006128.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009789.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009786.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009489.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000006176.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009806.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009820.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009813.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009861.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009912.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009899.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000009944.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000010313.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000010193.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000012121.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000014548.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000014481.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000014772.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000014943.png
Creating Image: /hdd/a2d2-data/camera_lidar_semantic/20180807_145028/lidar-x-upsample/cam_front_center/20180807145028_lidar-x-upsample_frontcenter_000014962.png
Processing time per image (ip_basic): 0.30456315279006957 seconds
Processing time per image (upsampling): 3.206155979633331 seconds
| MIT | data_processing/lidar_data_processing.ipynb | abhitoronto/KITTI_ROAD_SEGMENTATION |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.