markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
---|---|---|---|---|
Deploy ANN index component
This component deploys an ANN index to an ANN Endpoint.
The componet tracks the deployed index in the TFX custom DeployedANNIndex artifact. | %%writefile deploy_index.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deploys an ANN index."""
import logging
import numpy as np
import uuid
import tfx
import tensorflow as tf
from google.cloud import bigquery
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact, Parameter
from tfx.types.experimental.simple_artifacts import Dataset
from ann_service import IndexDeploymentClient
from ann_types import ANNIndex
from ann_types import DeployedANNIndex
@component
def deploy_index(
project_id: Parameter[str],
project_number: Parameter[str],
region: Parameter[str],
vpc_name: Parameter[str],
deployed_index_id_prefix: Parameter[str],
ann_index: InputArtifact[ANNIndex],
deployed_ann_index: OutputArtifact[DeployedANNIndex]
):
deployment_client = IndexDeploymentClient(project_id,
project_number,
region)
index_name = ann_index.get_string_custom_property('index_name')
index_display_name = ann_index.get_string_custom_property('index_display_name')
endpoint_display_name = f'Endpoint for {index_display_name}'
logging.info(f'Creating endpoint: {endpoint_display_name}')
operation_id = deployment_client.create_endpoint(endpoint_display_name, vpc_name)
response = deployment_client.wait_for_completion(operation_id, 'Waiting for endpoint', 30)
endpoint_name = response['name']
logging.info(f'Endpoint created: {endpoint_name}')
endpoint_id = endpoint_name.split('/')[-1]
index_id = index_name.split('/')[-1]
deployed_index_display_name = f'Deployed {index_display_name}'
deployed_index_id = deployed_index_id_prefix + str(uuid.uuid4())
logging.info(f'Creating deployed index: {deployed_index_id}')
logging.info(f' from: {index_name}')
operation_id = deployment_client.create_deployment(
deployed_index_display_name,
deployed_index_id,
endpoint_id,
index_id)
response = deployment_client.wait_for_completion(operation_id, 'Waiting for deployment', 60)
logging.info('Index deployed!')
deployed_index_ip = deployment_client.get_deployment_grpc_ip(
endpoint_id, deployed_index_id
)
# Write the deployed index properties to metadata.
deployed_ann_index.set_string_custom_property('endpoint_name',
endpoint_name)
deployed_ann_index.set_string_custom_property('deployed_index_id',
deployed_index_id)
deployed_ann_index.set_string_custom_property('index_name',
index_name)
deployed_ann_index.set_string_custom_property('deployed_index_grpc_ip',
deployed_index_ip)
| retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Creating a TFX pipeline
The pipeline automates the process of preparing item embeddings (in BigQuery), training a Matrix Factorization model (in BQML), and creating and deploying an ANN Service index.
The pipeline has a simple sequential flow. The pipeline accepts a set of runtime parameters that define GCP environment settings and embeddings and index assembly parameters. | import os
# Only required for local run.
from tfx.orchestration.metadata import sqlite_metadata_connection_config
from tfx.orchestration.pipeline import Pipeline
from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner
from compute_pmi import compute_pmi
from export_embeddings import export_embeddings
from extract_embeddings import extract_embeddings
from train_item_matching import train_item_matching_model
from create_index import create_index
from deploy_index import deploy_index
def ann_pipeline(
pipeline_name,
pipeline_root,
metadata_connection_config,
project_id,
project_number,
region,
vpc_name,
bq_dataset_name,
min_item_frequency,
max_group_size,
dimensions,
embeddings_gcs_location,
index_display_name,
deployed_index_id_prefix) -> Pipeline:
"""Implements the SCANN training pipeline."""
pmi_computer = compute_pmi(
project_id=project_id,
bq_dataset=bq_dataset_name,
min_item_frequency=min_item_frequency,
max_group_size=max_group_size
)
bqml_trainer = train_item_matching_model(
project_id=project_id,
bq_dataset=bq_dataset_name,
item_cooc=pmi_computer.outputs.item_cooc,
dimensions=dimensions,
)
embeddings_extractor = extract_embeddings(
project_id=project_id,
bq_dataset=bq_dataset_name,
bq_model=bqml_trainer.outputs.bq_model
)
embeddings_exporter = export_embeddings(
project_id=project_id,
gcs_location=embeddings_gcs_location,
item_embeddings_bq=embeddings_extractor.outputs.item_embeddings
)
index_constructor = create_index(
project_id=project_id,
project_number=project_number,
region=region,
display_name=index_display_name,
dimensions=dimensions,
item_embeddings=embeddings_exporter.outputs.item_embeddings_gcs
)
index_deployer = deploy_index(
project_id=project_id,
project_number=project_number,
region=region,
vpc_name=vpc_name,
deployed_index_id_prefix=deployed_index_id_prefix,
ann_index=index_constructor.outputs.ann_index
)
components = [
pmi_computer,
bqml_trainer,
embeddings_extractor,
embeddings_exporter,
index_constructor,
index_deployer
]
return Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
# Only needed for local runs.
metadata_connection_config=metadata_connection_config,
components=components) | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Testing the pipeline locally
You will first run the pipeline locally using the Beam runner.
Clean the metadata and artifacts from the previous runs | pipeline_root = f'/tmp/{PIPELINE_NAME}'
local_mlmd_folder = '/tmp/mlmd'
if tf.io.gfile.exists(pipeline_root):
print("Removing previous artifacts...")
tf.io.gfile.rmtree(pipeline_root)
if tf.io.gfile.exists(local_mlmd_folder):
print("Removing local mlmd SQLite...")
tf.io.gfile.rmtree(local_mlmd_folder)
print("Creating mlmd directory: ", local_mlmd_folder)
tf.io.gfile.mkdir(local_mlmd_folder)
print("Creating pipeline root folder: ", pipeline_root)
tf.io.gfile.mkdir(pipeline_root) | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Set pipeline parameters and create the pipeline | bq_dataset_name = 'song_embeddings'
index_display_name = 'Song embeddings'
deployed_index_id_prefix = 'deployed_song_embeddings_'
min_item_frequency = 15
max_group_size = 100
dimensions = 50
embeddings_gcs_location = f'gs://{BUCKET_NAME}/embeddings'
metadata_connection_config = sqlite_metadata_connection_config(
os.path.join(local_mlmd_folder, 'metadata.sqlite'))
pipeline = ann_pipeline(
pipeline_name=PIPELINE_NAME,
pipeline_root=pipeline_root,
metadata_connection_config=metadata_connection_config,
project_id=PROJECT_ID,
project_number=PROJECT_NUMBER,
region=REGION,
vpc_name=VPC_NAME,
bq_dataset_name=bq_dataset_name,
index_display_name=index_display_name,
deployed_index_id_prefix=deployed_index_id_prefix,
min_item_frequency=min_item_frequency,
max_group_size=max_group_size,
dimensions=dimensions,
embeddings_gcs_location=embeddings_gcs_location
) | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Start the run | logging.getLogger().setLevel(logging.INFO)
BeamDagRunner().run(pipeline) | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Inspect produced metadata
During the execution of the pipeline, the inputs and outputs of each component have been tracked in ML Metadata. | from ml_metadata import metadata_store
from ml_metadata.proto import metadata_store_pb2
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.sqlite.filename_uri = os.path.join(local_mlmd_folder, 'metadata.sqlite')
connection_config.sqlite.connection_mode = 3 # READWRITE_OPENCREATE
store = metadata_store.MetadataStore(connection_config)
store.get_artifacts() | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
NOTICE. The following code does not work with ANN Service Experimental. It will be finalized when the service moves to the Preview stage.
Running the pipeline on AI Platform Pipelines
You will now run the pipeline on AI Platform Pipelines (Unified)
Package custom components into a container
The modules containing custom components must be first package as a docker container image, which is a derivative of the standard TFX image.
Create a Dockerfile | %%writefile Dockerfile
FROM gcr.io/tfx-oss-public/tfx:0.25.0
WORKDIR /pipeline
COPY ./ ./
ENV PYTHONPATH="/pipeline:${PYTHONPATH}" | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Build and push the docker image to Container Registry | !gcloud builds submit --tag gcr.io/{PROJECT_ID}/caip-tfx-custom:{USER} . | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Create AI Platform Pipelines client | from aiplatform.pipelines import client
aipp_client = client.Client(
project_id=PROJECT_ID,
region=REGION,
api_key=API_KEY
) | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Set the the parameters for AIPP execution and create the pipeline | metadata_connection_config = None
pipeline_root = PIPELINE_ROOT
pipeline = ann_pipeline(
pipeline_name=PIPELINE_NAME,
pipeline_root=pipeline_root,
metadata_connection_config=metadata_connection_config,
project_id=PROJECT_ID,
project_number=PROJECT_NUMBER,
region=REGION,
vpc_name=VPC_NAME,
bq_dataset_name=bq_dataset_name,
index_display_name=index_display_name,
deployed_index_id_prefix=deployed_index_id_prefix,
min_item_frequency=min_item_frequency,
max_group_size=max_group_size,
dimensions=dimensions,
embeddings_gcs_location=embeddings_gcs_location
) | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Compile the pipeline | config = kubeflow_v2_dag_runner.KubeflowV2DagRunnerConfig(
project_id=PROJECT_ID,
display_name=PIPELINE_NAME,
default_image='gcr.io/{}/caip-tfx-custom:{}'.format(PROJECT_ID, USER))
runner = kubeflow_v2_dag_runner.KubeflowV2DagRunner(
config=config,
output_filename='pipeline.json')
runner.compile(
pipeline,
write_out=True) | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Submit the pipeline run | aipp_client.create_run_from_job_spec('pipeline.json') | retail/recommendation-system/bqml-scann/ann02_run_pipeline.ipynb | GoogleCloudPlatform/analytics-componentized-patterns | apache-2.0 |
Data Sampling | x_0 = np.random.normal(loc=[-4, 2], scale=0.5, size=(100, 2))
x_1 = np.random.normal(loc=[-4, -3], scale=0.5, size=(100, 2))
x_2 = np.random.normal(loc=[4, 1], scale=0.5, size=(100, 2))
x_3 = np.random.normal(loc=[5, -2], scale=0.5, size=(100, 2))
X = np.vstack([x_0, x_1, x_2, x_3])
Y = np.ones(X.shape[0], dtype=np.intc)
Y[:100] = 0
Y[100:200] = 1
Y[200:300] = 2
Y[300:] = 3
ml.plotClassify2D(None, X, Y)
classes = np.unique(Y)
print classes | week4/multiclass_perceptron.ipynb | sameersingh/ml-discussions | apache-2.0 |
Multiclass Preceptron Training Algorithm
<img src = 'extras/multiclass.png'>
One of the main differences is that now there is a $\theta_c$ for each class. So in the algorithm above $\theta$ is basically size $#Classes \times #Features$.
To find the class, instead of using the sign threshold on the response, we are looking for the class that maximizes the response.
So let's adapt the code from the previous discussion to do this.
Let's add the const to the X and create the theta matrix. | # Like previous discussion
def add_const(X):
return np.hstack([np.ones([X.shape[0], 1]), X])
Xconst = add_const(X)
theta = np.random.randn(classes.shape[0], Xconst.shape[1]) # Adding 1 for theta corresponding to bias term
x_j, y_j = Xconst[5], Y[5]
# The response is also the same, only we transpose the theta.
def resp(x, theta):
return np.dot(x, theta.T) | week4/multiclass_perceptron.ipynb | sameersingh/ml-discussions | apache-2.0 |
For the predict we need to find the class that maximizes the response. We can do this with np.argmax(). | def predict(x, theta):
r = resp(x, theta)
return np.argmax(np.atleast_2d(r), axis=1)
# Error stays the same
def pred_err(X, Y, theta):
"""Predicts that class for X and returns the error rate. """
Yhat = predict(X, theta)
return np.mean(Yhat != Y)
pred_vals = predict(x_j, theta)
print 'Predicted class %d, True class is %d' % (pred_vals, y_j) | week4/multiclass_perceptron.ipynb | sameersingh/ml-discussions | apache-2.0 |
Learning Update | a = 0.1
y_j_hat = predict(x_j, theta)
theta[y_j_hat] -= a * x_j
theta[y_j] += a * x_j | week4/multiclass_perceptron.ipynb | sameersingh/ml-discussions | apache-2.0 |
Train method
Using everything we coded so far, let's code the training method. | def train(X, Y, a=0.01, stop_tol=1e-8, max_iter=50):
Xconst = add_const(X)
m, n = Xconst.shape
c = np.unique(Y).shape[0]
# Initializing theta
theta = np.random.rand(c, n)
# The update loop
J_err = [np.inf]
for i in xrange(1, max_iter + 1):
for j in range(m):
x_j, y_j = Xconst[j], Y[j]
y_j_hat = predict(x_j, theta)
theta[y_j_hat] -= a * x_j
theta[y_j] += a * x_j
curr_err = pred_err(Xconst, Y, theta)
J_err.append(curr_err)
print 'Error %.3f at iteration %d' % (J_err[-1], i)
return theta, J_err | week4/multiclass_perceptron.ipynb | sameersingh/ml-discussions | apache-2.0 |
Multiclass Pereptron Object
Let us put this all in a class MultiPerceptron. | from mltools.base import classifier
class MultiClassPerceptron(classifier):
def __init__(self, theta=None):
self.theta = theta
def add_const(self, X):
return np.hstack([np.ones([X.shape[0], 1]), X])
def resp(self, x):
return np.dot(x, self.theta.T)
def predict(self, X):
"""Retruns class prediction for either single point or multiple points. """
Xconst = np.atleast_2d(X)
# Making sure it has the const, if not adding it.
if Xconst.shape[1] == self.theta.shape[1] - 1:
Xconst = self.add_const(Xconst)
r = self.resp(Xconst)
return np.argmax(np.atleast_2d(r), axis=1)
# Notice that we don't need the sign function (from Perceptron class) any longer
# def sign(self, vals):
# """A sign version with breaking 0's as +1. """
# return np.sign(vals + 1e-200)
def pred_err(self, X, Y):
Yhat = self.predict(X)
return np.mean(Yhat != Y)
def train(self, X, Y, a=0.01, stop_tol=1e-8, max_iter=50):
# Start by adding a const
Xconst = self.add_const(X)
m, n = Xconst.shape
c = np.unique(Y).shape[0]
self.classes = np.unique(Y)
# Making sure Theta is inititialized.
if self.theta is None:
self.theta = np.random.randn(c, n)
# The update loop
J_err = [np.inf]
for i in xrange(1, max_iter + 1):
for j in np.random.permutation(m):
x_j, y_j = Xconst[j], Y[j]
y_j_hat = self.predict(x_j)
self.theta[y_j_hat[0]] -= a * x_j
self.theta[y_j] += a * x_j
curr_err = self.pred_err(Xconst, Y)
J_err.append(curr_err)
return J_err | week4/multiclass_perceptron.ipynb | sameersingh/ml-discussions | apache-2.0 |
Let's train and plot :) | model = MultiClassPerceptron()
j_err = model.train(X, Y, a=.02, max_iter=50)
ml.plotClassify2D(model, X, Y) | week4/multiclass_perceptron.ipynb | sameersingh/ml-discussions | apache-2.0 |
Bonus question
In the plot below we have two classes. Let's assume that I want to have multiclass perceptron with 2 classes, what would theta have to be to separate them correctly? | x_0 = np.random.normal(loc=[-2, 2], scale=0.5, size=(100, 2))
x_1 = np.random.normal(loc=[2, 2], scale=0.5, size=(100, 2))
X = np.vstack([x_0, x_1])
Y = np.ones(X.shape[0], dtype=np.intc)
Y[:100] = 0
Y[100:200] = 1
ml.plotClassify2D(None, X, Y)
theta = ???? # Fill in the code and run
model = MultiClassPerceptron(theta)
ml.plotClassify2D(model, X, Y) | week4/multiclass_perceptron.ipynb | sameersingh/ml-discussions | apache-2.0 |
Prometheus 서버에서 메트릭 로드하기
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/io/tutorials/prometheus"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/io/tutorials/prometheus.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행하기</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/io/tutorials/prometheus.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서소스 보기</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/io/tutorials/prometheus.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드하기</a></td>
</table>
주의: Python 패키지 외에 이 노트북에서는 sudo apt-get install을 사용하여 타자 패키지를 설치합니다.
개요
이 튜토리얼은 Prometheus 서버에서 tf.data.Dataset로 CoreDNS 메트릭을 로드한 다음 훈련과 추론에 tf.keras를 사용합니다.
CoreDNS는 서비스 검색에 중점을 둔 DNS 서버이며 Kubernetes 클러스터의 일부로 널리 배포됩니다. 이 때문에 종종 연산을 통해 면밀하게 모니터링됩니다.
이 튜토리얼은 머신러닝을 통해 연산을 자동화하려는 DevOps에서 사용할 수 있는 예입니다.
설정 및 사용법
필요한 tensorflow-io 패키지를 설치하고 런타임 다시 시작하기 | import os
try:
%tensorflow_version 2.x
except Exception:
pass
!pip install tensorflow-io
from datetime import datetime
import tensorflow as tf
import tensorflow_io as tfio | site/ko/io/tutorials/prometheus.ipynb | tensorflow/docs-l10n | apache-2.0 |
CoreDNS 및 Prometheus 설치 및 설정하기
데모 목적으로, DNS 쿼리를 수신하기 위해 포트 9053이 열려 있고 스크래핑에 대한 메트릭을 노출하기 위해 포트 9153(기본값)이 열려 있는 CoreDNS 서버가 로컬에 있습니다. 다음은 CoreDNS에 대한 기본 Corefile 구성이며 다운로드할 수 있습니다.
.:9053 { prometheus whoami }
설치에 대한 자세한 내용은 CoreDNS 설명서에서 찾을 수 있습니다. | !curl -s -OL https://github.com/coredns/coredns/releases/download/v1.6.7/coredns_1.6.7_linux_amd64.tgz
!tar -xzf coredns_1.6.7_linux_amd64.tgz
!curl -s -OL https://raw.githubusercontent.com/tensorflow/io/master/docs/tutorials/prometheus/Corefile
!cat Corefile
# Run `./coredns` as a background process.
# IPython doesn't recognize `&` in inline bash cells.
get_ipython().system_raw('./coredns &') | site/ko/io/tutorials/prometheus.ipynb | tensorflow/docs-l10n | apache-2.0 |
다음 단계로 Prometheus 서버를 설정하고 Prometheus를 사용하여 위의 포트 9153에서 노출된 CoreDNS 메트릭을 스크래핑합니다. 구성을 위한 prometheus.yml 파일도 다운로드할 수 있습니다. | !curl -s -OL https://github.com/prometheus/prometheus/releases/download/v2.15.2/prometheus-2.15.2.linux-amd64.tar.gz
!tar -xzf prometheus-2.15.2.linux-amd64.tar.gz --strip-components=1
!curl -s -OL https://raw.githubusercontent.com/tensorflow/io/master/docs/tutorials/prometheus/prometheus.yml
!cat prometheus.yml
# Run `./prometheus` as a background process.
# IPython doesn't recognize `&` in inline bash cells.
get_ipython().system_raw('./prometheus &') | site/ko/io/tutorials/prometheus.ipynb | tensorflow/docs-l10n | apache-2.0 |
일부 활동을 표시하기 위해 dig 명령을 사용하여 설정된 CoreDNS 서버에 대해 몇 가지 DNS 쿼리를 생성할 수 있습니다. | !sudo apt-get install -y -qq dnsutils
!dig @127.0.0.1 -p 9053 demo1.example.org
!dig @127.0.0.1 -p 9053 demo2.example.org | site/ko/io/tutorials/prometheus.ipynb | tensorflow/docs-l10n | apache-2.0 |
이제 CoreDNS 서버의 메트릭을 Prometheus 서버에서 스크래핑하고 TensorFlow에서 사용할 준비가 됩니다.
CoreDNS 메트릭에 대한 Dataset를 만들고 TensorFlow에서 사용하기
PostgreSQL 서버에서 사용할 수 있고 tfio.experimental.IODataset.from_prometheus를 통해 수행할 수 있는 CoreDNS 메트릭의 Dataset를 만듭니다. 최소한 두 가지 인수가 필요합니다. query는 메트릭을 선택하기 위해 Prometheus 서버로 전달되고 length는 Dataset에 로드하려는 기간입니다.
"coredns_dns_request_count_total" 및 "5"(초)로 시작하여 아래 Dataset를 만들 수 있습니다. 튜토리얼 앞부분에서 두 개의 DNS 쿼리가 보내졌기 때문에 "coredns_dns_request_count_total"에 대한 메트릭은 시계열 마지막에서 "2.0"이 될 것으로 예상됩니다. | dataset = tfio.experimental.IODataset.from_prometheus(
"coredns_dns_request_count_total", 5, endpoint="http://localhost:9090")
print("Dataset Spec:\n{}\n".format(dataset.element_spec))
print("CoreDNS Time Series:")
for (time, value) in dataset:
# time is milli second, convert to data time:
time = datetime.fromtimestamp(time // 1000)
print("{}: {}".format(time, value['coredns']['localhost:9153']['coredns_dns_request_count_total'])) | site/ko/io/tutorials/prometheus.ipynb | tensorflow/docs-l10n | apache-2.0 |
Dataset의 사양을 자세히 살펴보겠습니다.
( TensorSpec(shape=(), dtype=tf.int64, name=None), { 'coredns': { 'localhost:9153': { 'coredns_dns_request_count_total': TensorSpec(shape=(), dtype=tf.float64, name=None) } } } )
데이터세트는 (time, values) 튜플로 구성되는 것을 분명히 알 수 있으며, values 필드는 다음으로 확장된 Python dict입니다.
"job_name": { "instance_name": { "metric_name": value, }, }
위의 예에서 'coredns'는 작업 이름이고, 'localhost:9153'은 인스턴스 이름이며, 'coredns_dns_request_count_total'은 메트릭 이름입니다. 사용된 Prometheus 쿼리에 따라 여러 작업/인스턴스/메트릭이 반환될 수 있습니다. 이것은 또한 Python dict이 Dataset의 구조에 사용된 이유이기도 합니다.
다른 쿼리 "go_memstats_gc_sys_bytes"를 예로 들어 보겠습니다. CoreDNS와 Prometheus가 모두 Golang으로 작성되었으므로 "go_memstats_gc_sys_bytes" 메트릭은 "coredns" 작업과 "prometheus" 작업 모두에 사용할 수 있습니다.
참고: 이 셀은 처음 실행할 때 오류가 발생할 수 있습니다. 다시 실행하면 통과됩니다. | dataset = tfio.experimental.IODataset.from_prometheus(
"go_memstats_gc_sys_bytes", 5, endpoint="http://localhost:9090")
print("Time Series CoreDNS/Prometheus Comparision:")
for (time, value) in dataset:
# time is milli second, convert to data time:
time = datetime.fromtimestamp(time // 1000)
print("{}: {}/{}".format(
time,
value['coredns']['localhost:9153']['go_memstats_gc_sys_bytes'],
value['prometheus']['localhost:9090']['go_memstats_gc_sys_bytes'])) | site/ko/io/tutorials/prometheus.ipynb | tensorflow/docs-l10n | apache-2.0 |
생성된 Dataset는 이제 훈련 또는 추론 목적으로 tf.keras로 직접 전달할 수 있습니다.
모델 훈련에 Dataset 사용하기
메트릭 Dataset가 생성되면 모델 훈련 또는 추론을 위해 Dataset를 tf.keras로 바로 전달할 수 있습니다.
데모 목적으로 이 튜토리얼에서는 1개의 특성과 2개의 스텝을 입력으로 포함하는 매우 간단한 LSTM 모델을 사용합니다. | n_steps, n_features = 2, 1
simple_lstm_model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(8, input_shape=(n_steps, n_features)),
tf.keras.layers.Dense(1)
])
simple_lstm_model.compile(optimizer='adam', loss='mae')
| site/ko/io/tutorials/prometheus.ipynb | tensorflow/docs-l10n | apache-2.0 |
사용할 데이터세트는 10개의 샘플이 있는 CoreDNS의 'go_memstats_sys_bytes' 값입니다. 그러나 window=n_steps 및 shift=1의 슬라이딩 윈도우가 형성되기 때문에 추가 샘플이 필요합니다(연속된 두 요소에 대해 첫 번째 요소는 x로, 두 번째 요소는 훈련을 위해 y로 입력됨). 합계는 10 + n_steps - 1 + 1 = 12초입니다.
데이터 값의 스케일도 [0, 1]로 조정됩니다. | n_samples = 10
dataset = tfio.experimental.IODataset.from_prometheus(
"go_memstats_sys_bytes", n_samples + n_steps - 1 + 1, endpoint="http://localhost:9090")
# take go_memstats_gc_sys_bytes from coredns job
dataset = dataset.map(lambda _, v: v['coredns']['localhost:9153']['go_memstats_sys_bytes'])
# find the max value and scale the value to [0, 1]
v_max = dataset.reduce(tf.constant(0.0, tf.float64), tf.math.maximum)
dataset = dataset.map(lambda v: (v / v_max))
# expand the dimension by 1 to fit n_features=1
dataset = dataset.map(lambda v: tf.expand_dims(v, -1))
# take a sliding window
dataset = dataset.window(n_steps, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda d: d.batch(n_steps))
# the first value is x and the next value is y, only take 10 samples
x = dataset.take(n_samples)
y = dataset.skip(1).take(n_samples)
dataset = tf.data.Dataset.zip((x, y))
# pass the final dataset to model.fit for training
simple_lstm_model.fit(dataset.batch(1).repeat(10), epochs=5, steps_per_epoch=10) | site/ko/io/tutorials/prometheus.ipynb | tensorflow/docs-l10n | apache-2.0 |
Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab) | import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
vocab_set = set(text)
#enumerate the set and put in dictionary
vocab_to_int = {word: ii for ii, word in enumerate(vocab_set, 1)}
#flip the dictionary
int_to_vocab = {ii: word for word, ii in vocab_to_int.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables) | tv-script-generation/dlnd_tv_script_generation.ipynb | mikelseverson/Udacity-Deep_Learning-Nanodegree | mit |
Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate) | def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
input = tf.placeholder(tf.int32, shape=(None, None), name='input')
targets = tf.placeholder(tf.int32, shape=(None, None), name='targets')
learningRate = tf.placeholder(tf.float32, shape=None, name='learning_rate')
# TODO: Implement Function
return input, targets, learningRate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs) | tv-script-generation/dlnd_tv_script_generation.ipynb | mikelseverson/Udacity-Deep_Learning-Nanodegree | mit |
Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState) | def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
lstm_cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
rnn_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell])
initialized = rnn_cell.zero_state(batch_size, tf.float32)
initialized = tf.identity(initialized, name="initial_state")
return rnn_cell, initialized
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell) | tv-script-generation/dlnd_tv_script_generation.ipynb | mikelseverson/Udacity-Deep_Learning-Nanodegree | mit |
Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState) | def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embeded = get_embed(input_data, vocab_size, rnn_size)
rnn, state = build_rnn(cell, embeded)
logits = tf.contrib.layers.fully_connected(rnn, vocab_size)
return logits, state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn) | tv-script-generation/dlnd_tv_script_generation.ipynb | mikelseverson/Udacity-Deep_Learning-Nanodegree | mit |
Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2], [ 7 8], [13 14]]
# Batch of targets
[[ 2 3], [ 8 9], [14 15]]
]
# Second Batch
[
# Batch of Input
[[ 3 4], [ 9 10], [15 16]]
# Batch of targets
[[ 4 5], [10 11], [16 17]]
]
# Third Batch
[
# Batch of Input
[[ 5 6], [11 12], [17 18]]
# Batch of targets
[[ 6 7], [12 13], [18 1]]
]
]
```
Notice that the last target value in the last batch is the first input value of the first batch. In this case, 1. This is a common technique used when creating sequence batches, although it is rather unintuitive. | def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
n_elements = len(int_text)
n_batches = (n_elements - 1)//(batch_size*seq_length)
all_batches = np.zeros(shape=(n_batches, 2, batch_size, seq_length), dtype=np.int32)
# fill Numpy array
for i in range(n_batches):
for j in range(batch_size):
input_start = i * seq_length + j * batch_size * seq_length
target_start = input_start + 1
target_stop = target_start + seq_length
if target_stop < len(int_text):
for k in range(seq_length):
all_batches[i][0][j][k] = int_text[input_start + k]
all_batches[i][1][j][k] = int_text[target_start + k]
return all_batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches) | tv-script-generation/dlnd_tv_script_generation.ipynb | mikelseverson/Udacity-Deep_Learning-Nanodegree | mit |
Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress. | # Number of Epochs
num_epochs = 40
# Batch Size
batch_size = 200
# RNN Size
rnn_size = 128
embed_dim = None
# Embedding Dimension Size
# Sequence Length
seq_length = 56
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 100
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save' | tv-script-generation/dlnd_tv_script_generation.ipynb | mikelseverson/Udacity-Deep_Learning-Nanodegree | mit |
Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) | def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
inputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
return inputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors) | tv-script-generation/dlnd_tv_script_generation.ipynb | mikelseverson/Udacity-Deep_Learning-Nanodegree | mit |
← Back to Index
Constant-Q Transform and Chroma
Constant-Q Transform
Unlike the Fourier transform, but similar to the mel scale, the constant-Q transform (Wikipedia) uses a logarithmically spaced frequency axis. For more information, read the original paper:
Judith C. Brown, "Calculation of a constant Q spectral transform," J. Acoust. Soc. Am., 89(1):425–434, 1991.
Let's load a file: | x, sr = librosa.load('audio/simple_piano.wav')
ipd.Audio(x, rate=sr) | chroma.ipynb | stevetjoa/stanford-mir | mit |
To compute a constant-Q spectrogram, will use librosa.cqt: | fmin = librosa.midi_to_hz(36)
hop_length = 512
C = librosa.cqt(x, sr=sr, fmin=fmin, n_bins=72, hop_length=hop_length) | chroma.ipynb | stevetjoa/stanford-mir | mit |
Display: | logC = librosa.amplitude_to_db(numpy.abs(C))
plt.figure(figsize=(15, 5))
librosa.display.specshow(logC, sr=sr, x_axis='time', y_axis='cqt_note', fmin=fmin, cmap='coolwarm') | chroma.ipynb | stevetjoa/stanford-mir | mit |
Note how each frequency bin corresponds to one MIDI pitch number.
Chroma
A chroma vector (Wikipedia) (FMP, p. 123) is a typically a 12-element feature vector indicating how much energy of each pitch class, {C, C#, D, D#, E, ..., B}, is present in the signal.
librosa.feature.chroma_stft | chromagram = librosa.feature.chroma_stft(x, sr=sr, hop_length=hop_length)
plt.figure(figsize=(15, 5))
librosa.display.specshow(chromagram, x_axis='time', y_axis='chroma', hop_length=hop_length, cmap='coolwarm') | chroma.ipynb | stevetjoa/stanford-mir | mit |
librosa.feature.chroma_cqt | chromagram = librosa.feature.chroma_cqt(x, sr=sr, hop_length=hop_length)
plt.figure(figsize=(15, 5))
librosa.display.specshow(chromagram, x_axis='time', y_axis='chroma', hop_length=hop_length, cmap='coolwarm') | chroma.ipynb | stevetjoa/stanford-mir | mit |
Chroma energy normalized statistics (CENS) (FMP, p. 375). The main idea of CENS features is that taking statistics over large windows smooths local deviations in tempo, articulation, and musical ornaments such as trills and arpeggiated chords. CENS are best used for tasks such as audio matching and similarity.
librosa.feature.chroma_cens | chromagram = librosa.feature.chroma_cens(x, sr=sr, hop_length=hop_length)
plt.figure(figsize=(15, 5))
librosa.display.specshow(chromagram, x_axis='time', y_axis='chroma', hop_length=hop_length, cmap='coolwarm') | chroma.ipynb | stevetjoa/stanford-mir | mit |
Logistic Regression
1. Introduction
1.1. Binary classification and decision theory. The MAP criterion
Goal of a classification problem is to assign a class or category to every instance or observation of a data collection. Here, we will assume that every instance ${\bf x}$ is an $N$-dimensional vector in $\mathbb{R}^N$, and that the class $y$ of sample ${\bf x}$ is an element of a binary set ${\mathcal Y} = {0, 1}$. The goal of a classifier is to predict the true value of $y$ after observing ${\bf x}$.
We will denote as $\hat{y}$ the classifier output or decision. If $y=\hat{y}$, the decision is an hit, otherwise $y\neq \hat{y}$ and the decision is an error.
Decision theory provides a solution to the classification problem in situations where the relation between instance ${\bf x}$ and its class $y$ is given by a known probabilistic model: assume that every tuple $({\bf x}, y)$ is an outcome of a random vector $({\bf X}, Y)$ with joint distribution $p_{{\bf X},Y}({\bf x}, y)$. A natural criteria for classification is to select predictor $\hat{Y}=f({\bf x})$ in such a way that the probability or error, $P{\hat{Y} \neq Y}$ is minimum. Noting that
$$
P{\hat{Y} \neq Y} = \int P{\hat{Y} \neq Y | {\bf x}} p_{\bf X}({\bf x}) d{\bf x}
$$
the optimal decision is got if, for every sample ${\bf x}$, we make decision minimizing the conditional error probability:
\begin{align}
\hat{y}^* &= \arg\min_{\hat{y}} P{\hat{y} \neq Y |{\bf x}} \
&= \arg\max_{\hat{y}} P{\hat{y} = Y |{\bf x}} \
\end{align}
Thus, the optimal decision rule can be expressed as
$$
P_{Y|{\bf X}}(1|{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}{\hat{y}=0}\quad P{Y|{\bf X}}(0|{\bf x})
$$
or, equivalently
$$
P_{Y|{\bf X}}(1|{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad \frac{1}{2}
$$
The classifier implementing this decision rule is usually named MAP (Maximum A Posteriori).
1.2. Parametric classification.
Classical decision theory is grounded on the assumption that the probabilistic model relating the observed sample ${\bf X}$ and the true hypothesis $Y$ is known. Unfortunately, this is unrealistic in many applications, where the only available information to construct the classifier is a dataset $\mathcal S = {({\bf x}^{(k)}, y^{(k)}), \,k=1,\ldots,K}$ of instances and their respective class labels.
A more realistic formulation of the classification problem is the following: given a dataset $\mathcal S = {({\bf x}^{(k)}, y^{(k)}) \in {\mathbb{R}}^N \times {\mathcal Y}, \, k=1,\ldots,K}$ of independent and identically distributed (i.i.d.) samples from an unknown distribution $p_{{\bf X},Y}({\bf x}, y)$, predict the class $y$ of a new sample ${\bf x}$ with the minimum probability of error.
Since the probabilistic model generating the data is unknown, the MAP decision rule cannot be applied. However, many classification algorithms use the dataset to obtain an estimate of the posterior class probabilities, and apply it to implement an approximation to the MAP decision maker.
Parametric classifiers based on this idea assume, additionally, that the posterior class probabilty satisfies some parametric formula:
$$
P_{Y|X}(1|{\bf x},{\bf w}) = f_{\bf w}({\bf x})
$$
where ${\bf w}$ is a vector of parameters. Given the expression of the MAP decision maker, classification consists in comparing the value of $f_{\bf w}({\bf x})$ with the threshold $\frac{1}{2}$, and each parameter vector would be associated to a different decision maker.
In practice, the dataset ${\mathcal S}$ is used to select a particular parameter vector $\hat{\bf w}$ according to certain criterion. Accordingly, the decision rule becomes
$$
f_{\hat{\bf w}}({\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad \frac{1}{2}
$$
In this lesson, we explore one of the most popular model-based parametric classification methods: logistic regression.
<img src="figs/parametric_decision.png", width=300>
2. Logistic regression.
2.1. The logistic function
The logistic regression model assumes that the binary class label $Y \in {0,1}$ of observation $X\in \mathbb{R}^N$ satisfies the expression.
$$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g({\bf w}^\intercal{\bf x})$$
$$P_{Y|{\bf,X}}(0|{\bf x}, {\bf w}) = 1-g({\bf w}^\intercal{\bf x})$$
where ${\bf w}$ is a parameter vector and $g(·)$ is the logistic function, which is defined by
$$g(t) = \frac{1}{1+\exp(-t)}$$
It is straightforward to see that the logistic function has the following properties:
P1: Probabilistic output: $\quad 0 \le g(t) \le 1$
P2: Symmetry: $\quad g(-t) = 1-g(t)$
P3: Monotonicity: $\quad g'(t) = g(t)·[1-g(t)] \ge 0$
In the following we define a logistic function in python, and use it to plot a graphical representation.
Exercise 1: Verify properties P2 and P3.
Exercise 2: Implement a function to compute the logistic function, and use it to plot such function in the inverval $[-6,6]$. | # Define the logistic function
def logistic(x):
p = 1.0 / (1 + np.exp(-x))
return p
# Plot the logistic function
t = np.arange(-6, 6, 0.1)
z = logistic(t)
plt.plot(t, z)
plt.xlabel('$t$', fontsize=14)
plt.ylabel('$\phi(t)$', fontsize=14)
plt.title('The logistic function')
plt.grid() | C3.Classification_LogReg/.ipynb_checkpoints/RegresionLogistica-checkpoint.ipynb | ML4DS/ML4all | mit |
3.3. Nonlinear classifiers.
The logistic model can be extended to construct non-linear classifiers by using non-linear data transformations. A general form for a nonlinear logistic regression model is
$$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g[{\bf w}^\intercal{\bf z}({\bf x})] $$
where ${\bf z}({\bf x})$ is an arbitrary nonlinear transformation of the original variables. The boundary decision in that case is given by equation
$$
{\bf w}^\intercal{\bf z} = 0
$$
Exercise 2: Modify the code above to generate a 3D surface plot of the polynomial logistic regression model given by
$$
P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g(1 + 10 x_0 + 10 x_1 - 20 x_0^2 + 5 x_0 x_1 + x_1^2)
$$ | # SOLUTION TO THE EXERCISE
# Weight vector:
w = [1, 10, 10, -20, 5, 1] # Try different weights
# Create a regtangular grid.
x_min = -1
x_max = 1
dx = x_max - x_min
h = float(dx) / 200
xgrid = np.arange(x_min, x_max, h)
xx0, xx1 = np.meshgrid(xgrid, xgrid)
# Compute the logistic map for the given weights
Z = logistic(w[0] + w[1]*xx0 + w[2]*xx1 + w[3]*np.multiply(xx0,xx0) +
w[4]*np.multiply(xx0,xx1) + w[3]*np.multiply(xx1,xx1))
# Plot the logistic map
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper)
plt.xlabel('$x_0$')
plt.ylabel('$x_1$')
ax.set_zlabel('P(1|x,w)') | C3.Classification_LogReg/.ipynb_checkpoints/RegresionLogistica-checkpoint.ipynb | ML4DS/ML4all | mit |
In order to apply the gradient descent rule, we need to define two methods:
- A fit method, that receives the training data and returns the model weights and the value of the negative log-likelihood during all iterations.
- A predict method, that receives the model weight and a set of inputs, and returns the posterior class probabilities for that input, as well as their corresponding class predictions. | def logregFit(Z_tr, Y_tr, rho, n_it):
# Data dimension
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
nll_tr2 = np.zeros(n_it)
pe_tr = np.zeros(n_it)
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
# Compute posterior probabilities for weight w
p1_tr = logistic(np.dot(Z_tr, w))
# Compute negative log-likelihood
# (note that this is not required for the weight update, only for nll tracking)
Y_tr2 = 2*Y_tr - 1
nll_tr[n] = np.sum(np.log(1 + np.exp(-np.dot(Y_tr2*Z_tr, w))))
# Update weights
w += rho*np.dot(Z_tr.T, Y_tr - p1_tr)
return w, nll_tr
def logregPredict(Z, w):
# Compute posterior probability of class 1 for weights w.
p = logistic(np.dot(Z, w))
# Class
D = [int(round(pn)) for pn in p]
return p, D | C3.Classification_LogReg/.ipynb_checkpoints/RegresionLogistica-checkpoint.ipynb | ML4DS/ML4all | mit |
Objects
Vectors | a = cga.base_vector() # random vector with components in base space only
a
cga.up(a)
cga.null_vector() # create null vector directly | docs/tutorials/cga/object-oriented.ipynb | arsenovic/clifford | bsd-3-clause |
Sphere (point pair, circles) | C = cga.round(e1, e2, -e1, e3) # generates sphere from points
C = cga.round(e1, e2, -e1) # generates circle from points
C = cga.round(e1, e2) # generates point-pair from points
#or
C2 = cga.round(2) # random 2-sphere (sphere)
C1 = cga.round(1) # random 1-sphere, (circle)
C0 = cga.round(0) # random 0-sphere, (point pair)
C1.mv # access the multivector
C = cga.round(e1, e2, -e1, e3)
C.center,C.radius # spheres have properties
cga.down(C.center) == C.center_down
C_ = cga.round().from_center_radius(C.center,C.radius)
C_.center,C_.radius | docs/tutorials/cga/object-oriented.ipynb | arsenovic/clifford | bsd-3-clause |
Operators | T = cga.translation(e1) # generate translation
T.mv
C = cga.round(e1, e2, -e1)
T.mv*C.mv*~T.mv # translate a sphere
T(C) # shorthand call, same as above. returns type of arg
T(C).center | docs/tutorials/cga/object-oriented.ipynb | arsenovic/clifford | bsd-3-clause |
The idea is to look at the title of a newspaper article and figure out whether the article came from the New York Times or from TechCrunch. There are very sophisticated approaches that we can try, but for now, let's go with something very simple.
<h2> Data exploration and preprocessing in BigQuery </h2>
<p>
What does the Hacker News dataset look like? | %bq query
SELECT
url, title, score
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
LENGTH(title) > 10
AND score > 10
LIMIT 10 | blogs/textclassification/txtcls.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http://mobile.nytimes.com/...., I want to be left with <i>nytimes</i>. To ensure that the parsing works for all URLs of interest, I'll group by the source to make sure there are no weird names left. This was an iterative process. | query="""
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
COUNT(title) AS num_articles
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
GROUP BY
source
ORDER BY num_articles DESC
LIMIT 10
"""
import google.datalab.bigquery as bq
df = bq.Query(query).execute().result().to_dataframe()
df | blogs/textclassification/txtcls.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning. | query="""
SELECT source, REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ') AS title FROM
(SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
title
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
)
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
"""
df = bq.Query(query + " LIMIT 10").execute().result().to_dataframe()
df.head() | blogs/textclassification/txtcls.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
For ML training, we will need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset). A simple way to do this is to use the hash of a well-distributed column in our data (See https://www.oreilly.com/learning/repeatable-sampling-of-data-sets-in-bigquery-for-machine-learning).
<p>
So, let's do that and save the results as CSV files. | traindf = bq.Query(query + " AND ABS(MOD(FARM_FINGERPRINT(title), 4)) > 0").execute().result().to_dataframe()
evaldf = bq.Query(query + " AND ABS(MOD(FARM_FINGERPRINT(title), 4)) = 0").execute().result().to_dataframe()
traindf.head()
traindf['source'].value_counts()
evaldf['source'].value_counts()
traindf.to_csv('train.csv', header=False, index=False, encoding='utf-8', sep='\t')
evaldf.to_csv('eval.csv', header=False, index=False, encoding='utf-8', sep='\t')
!head -3 train.csv
!wc -l *.csv
%bash
gsutil cp *.csv gs://${BUCKET}/txtcls1/ | blogs/textclassification/txtcls.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
<h2> TensorFlow code </h2>
Please explore the code in this <a href="txtcls1/trainer">directory</a> -- <a href="txtcls1/trainer/model.py">model.py</a> contains the key TensorFlow model and <a href="txtcls1/trainer/task.py">task.py</a> has a main() that launches off the training job.
However, the following cells should give you an idea of what the model code does: | import tensorflow as tf
from tensorflow.contrib import lookup
from tensorflow.python.platform import gfile
print tf.__version__
MAX_DOCUMENT_LENGTH = 5
PADWORD = 'ZYXW'
# vocabulary
lines = ['Some title', 'A longer title', 'An even longer title', 'This is longer than doc length']
# create vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
vocab_processor.fit(lines)
with gfile.Open('vocab.tsv', 'wb') as f:
f.write("{}\n".format(PADWORD))
for word, index in vocab_processor.vocabulary_._mapping.iteritems():
f.write("{}\n".format(word))
N_WORDS = len(vocab_processor.vocabulary_)
print '{} words into vocab.tsv'.format(N_WORDS)
# can use the vocabulary to convert words to numbers
table = lookup.index_table_from_file(
vocabulary_file='vocab.tsv', num_oov_buckets=1, vocab_size=None, default_value=-1)
numbers = table.lookup(tf.constant(lines[0].split()))
with tf.Session() as sess:
tf.tables_initializer().run()
print "{} --> {}".format(lines[0], numbers.eval())
!cat vocab.tsv
# string operations
titles = tf.constant(lines)
words = tf.string_split(titles)
densewords = tf.sparse_tensor_to_dense(words, default_value=PADWORD)
numbers = table.lookup(densewords)
# now pad out with zeros and then slice to constant length
padding = tf.constant([[0,0],[0,MAX_DOCUMENT_LENGTH]])
padded = tf.pad(numbers, padding)
sliced = tf.slice(padded, [0,0], [-1, MAX_DOCUMENT_LENGTH])
with tf.Session() as sess:
tf.tables_initializer().run()
print "titles=", titles.eval(), titles.shape
print "words=", words.eval()
print "dense=", densewords.eval(), densewords.shape
print "numbers=", numbers.eval(), numbers.shape
print "padding=", padding.eval(), padding.shape
print "padded=", padded.eval(), padded.shape
print "sliced=", sliced.eval(), sliced.shape
%bash
grep "^def" txtcls1/trainer/model.py | blogs/textclassification/txtcls.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
Let's make sure the code works locally on a small dataset for a few steps. | %bash
echo "bucket=${BUCKET}"
rm -rf outputdir
export PYTHONPATH=${PYTHONPATH}:${PWD}/txtcls1
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=outputdir \
--job-dir=./tmp --train_steps=200 | blogs/textclassification/txtcls.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
When I ran it, I got a 41% accuracy after a few steps. Because batchsize=32, 200 steps is essentially 6400 examples -- the full dataset is 72,000 examples, so this is not even the full dataset. And already, we are doing better than random chance.
<p>
Once the code works in standalone mode, you can run it on Cloud ML Engine. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section. Since we have 72,000 examples and batchsize=32, train_steps=36,000 essentially means 16 epochs. | %bash
OUTDIR=gs://${BUCKET}/txtcls1/trained_model
JOBNAME=txtcls_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gsutil cp txtcls1/trainer/*.py $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/txtcls1/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC --runtime-version=1.2 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_steps=36000 | blogs/textclassification/txtcls.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
Training finished with an accuracy of 73%. Obviously, this was trained on a really small dataset and with more data will hopefully come even greater accuracy.
<h2> Deploy trained model </h2>
<p>
Deploying the trained model to act as a REST web service is a simple gcloud call. | %bash
gsutil ls gs://${BUCKET}/txtcls1/trained_model/export/Servo/
%bash
MODEL_NAME="txtcls"
MODEL_VERSION="v1"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/txtcls1/trained_model/export/Servo/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} | blogs/textclassification/txtcls.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
<h2> Use model to predict </h2>
<p>
Send a JSON request to the endpoint of the service to make it predict which publication the article is more likely to run in. These are actual titles of articles in the New York Times, github, and TechCrunch on June 19. These titles were not part of the training or evaluation datasets. | from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import json
credentials = GoogleCredentials.get_application_default()
api = discovery.build('ml', 'v1beta1', credentials=credentials,
discoveryServiceUrl='https://storage.googleapis.com/cloud-ml/discovery/ml_v1beta1_discovery.json')
request_data = {'instances':
[
{
'title': 'Supreme Court to Hear Major Case on Partisan Districts'
},
{
'title': 'Furan -- build and push Docker images from GitHub to target'
},
{
'title': 'Time Warner will spend $100M on Snapchat original shows and ads'
},
]
}
parent = 'projects/%s/models/%s/versions/%s' % (PROJECT, 'txtcls', 'v1')
response = api.projects().predict(body=request_data, name=parent).execute()
print "response={0}".format(response) | blogs/textclassification/txtcls.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
Input functions to read JPEG images
The key difference between this notebook and the MNIST one is in the input function.
In the input function here, we are doing the following:
* Reading JPEG images, rather than 2D integer arrays.
* Reading in batches of batch_size images rather than slicing our in-memory structure to be batch_size images.
* Resizing the images to the expected HEIGHT, WIDTH. Because this is a real-world dataset, the images are of different sizes. We need to preprocess the data to, at the very least, resize them to constant size.
Run as a Python module
Since we want to run our code on Cloud ML Engine, we've packaged it as a python module.
The model.py and task.py containing the model code is in <a href="flowersmodel">flowersmodel</a>
Complete the TODOs in model.py before proceeding!
Once you've completed the TODOs, run it locally for a few steps to test the code. | %%bash
rm -rf flowersmodel.tar.gz flowers_trained
gcloud ml-engine local train \
--module-name=flowersmodel.task \
--package-path=${PWD}/flowersmodel \
-- \
--output_dir=${PWD}/flowers_trained \
--train_steps=5 \
--learning_rate=0.01 \
--batch_size=2 \
--model=$MODEL_TYPE \
--augment \
--train_data_path=gs://cloud-ml-data/img/flower_photos/train_set.csv \
--eval_data_path=gs://cloud-ml-data/img/flower_photos/eval_set.csv | courses/machine_learning/deepdive/08_image_keras/labs/flowers_fromscratch.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
Here are my results:
Model | Accuracy | Time taken | Run time parameters
--- | :---: | ---
cnn with batch-norm | 0.582 | 47 min | 1000 steps, LR=0.01, Batch=40
as above, plus augment | 0.615 | 3 hr | 5000 steps, LR=0.01, Batch=40
What was your accuracy?
Deploying and predicting with model
Deploy the model: | %%bash
MODEL_NAME="flowers"
MODEL_VERSION=${MODEL_TYPE}
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/flowers/trained_${MODEL_TYPE}/export/exporter | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ml-engine versions delete --quiet ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version=$TFVERSION | courses/machine_learning/deepdive/08_image_keras/labs/flowers_fromscratch.ipynb | GoogleCloudPlatform/training-data-analyst | apache-2.0 |
The dataset () is available either for download from the UCI ML repository or via a Python library scikit-learn dataset. Then we setup the Support Vector Classifier with the training data X and the target y: | sclf = SVC(gamma=0.001, C=100, kernel='linear')
X= dimages.data[:-10]
y= dimages.target[:-10]
print('train set samples:',len(X))
| MNISTSinglePredict2Test.ipynb | maxkleiner/maXbox4 | gpl-3.0 |
In the last step we predict a specific digit from the test set (only the last 10 samples are unseen), means we pass an actual image and SVC makes the prediction of which digit belongs to the image: | testimage = -5
s_prediction = sclf.predict([dimages.data[testimage]])
print ('the image maybe belongs to ',s_prediction)
plt.imshow(dimages.images[testimage], cmap=plt.cm.gray_r, interpolation="nearest")
plt.show() | MNISTSinglePredict2Test.ipynb | maxkleiner/maXbox4 | gpl-3.0 |
The same fit we try with a Random Forest Classifier to finish the first step of this lesson: | #RandomForestClassifier
rfc_clf = RandomForestClassifier()
rfc_clf.fit(X,y)
rfc_prediction = rfc_clf.predict([dimages.data[testimage]])
print ('predict with RFC ',rfc_prediction) | MNISTSinglePredict2Test.ipynb | maxkleiner/maXbox4 | gpl-3.0 |
There are many ways to improve this predict, including not using a vector classifier and go further with a neural classifier, but here’s a simple one to start what we do. Let’s just simplify our images by making them true black and white and stack an array.
MNIST Multi Prediction
Now we split explicit data in train- and test-set. Splitting the given images in 80:20 ratio so that 80% image is available for training and 20 % image is available for testing. We consider the data as pixels and the target as labels.
Convert and create the dataframe from datasets. We are using support vector machines for classification. Fit method trains the model and score will test it against the given test set and score.
A Support Vector Machine (SVM) is a discriminative classifier formally defined by a separating hyperplane. In other words, given labeled training data (supervised learning), the algorithm outputs an optimal hyperplane which categorizes new examples. In two dimensional space this hyperplane is a line dividing a plane in two parts where in each class lay in either side. | #df = pd.DataFrame(data=dimages.data, columns=dimages.feature_names)
df = pd.DataFrame(data=dimages.data)
print(df.head(5))
df['target'] = pd.Series(dimages.target)
#df['pixels'] = dimages.data[:,1:64] #pd.Series(dimages.data[:,1:785])
print(df['target'])
print(df.shape) #print(df.info)
pixels = df
labels = df.target
print('pixels ',pixels) | MNISTSinglePredict2Test.ipynb | maxkleiner/maXbox4 | gpl-3.0 |
We are ready for splitting the given images in 80:20 ratio so that 80% image is available for training and 20 % image as unseen or unknown is available for testing. | train_images, test_images, train_labels, test_labels = \
train_test_split(pixels,labels,train_size=0.8,random_state=2);
print('train size: ',len(train_images), len(train_labels))
print('test size: ',len(test_images), len(test_labels))
sclf.fit(train_images, train_labels)
print('test score ',sclf.score(test_images,test_labels)) | MNISTSinglePredict2Test.ipynb | maxkleiner/maXbox4 | gpl-3.0 |
This gives us the score of 97 percent ( 0.977777 ) which is at all a good score. We would try to increase the accuracy but this is sort of challenge.
The dataset description of our primer says: Each image is 8 pixels in height and 8 pixels in width, for a total of 64 pixels in total. Each pixel has a single pixel-value associated with it, indicating the lightness or darkness of that pixel, with higher numbers meaning darker. This pixel-value is an integer between 0 and 255, inclusive.
Would be nice to get the confusion matrix of MNIST dataset to get an impression of the score. | from sklearn.metrics import confusion_matrix
test_predictions = sclf.predict(test_images)
#print(confusion_matrix(test_labels,np.argmax(test_predictions,axis=1)))
print(confusion_matrix(test_labels, test_predictions)) | MNISTSinglePredict2Test.ipynb | maxkleiner/maXbox4 | gpl-3.0 |
Splitting the given images in 70:30 ratio shows a slight different confusion matrix so that 70% image is available for training and 30 % image as unseen or unknown is available for testing. Number 8 has probably most problems to get recognized! So disguise as 8 you can be a 6 or 9 and thats logical cause the 8 is in a 7-segment LCD display the base pattern! In german we say that with the word Achtung ;-). | train_images, test_images, train_labels, test_labels = \
train_test_split(pixels,labels,train_size=0.7,random_state=2);
sclf.fit(train_images, train_labels)
print('test score ',sclf.score(test_images,test_labels))
test_predictions = sclf.predict(test_images)
print(confusion_matrix(test_labels, test_predictions)) | MNISTSinglePredict2Test.ipynb | maxkleiner/maXbox4 | gpl-3.0 |
Data
Sunspots are a typical dataset for testing different peak detection algorithms. Besides having well defined, Gaussian-ish peaks, the height of the local maxima and minima are also variant over time. These attributes make sunspot datasets good for baselining different peak detection algorithms. | from utils import progress_bar_downloader
import os
link = 'http://www.quandl.com/api/v1/datasets/SIDC/SUNSPOTS_A.csv?&trim_start=1700-12-31&trim_end=2013-12-31&sort_order=desc'
dlname = 'sunspots.csv'
if not os.path.exists('./%s' % dlname):
progress_bar_downloader(link, dlname)
else:
print('%s already downloaded!' % dlname)
sunspot = np.genfromtxt(dlname, delimiter=',', skip_header=1, usecols=1)
plt.plot(sunspot, color='steelblue')
plt.title('Annual Sunspot Data, 1700-2014, from quandl.com') | blogsite/posts/wavelets.ipynb | kastnerkyle/kastnerkyle.github.io-nikola | bsd-3-clause |
The piece-regular function is another popular test signal, consisting of wildly non-Gaussian shapes. It is not a "real-world" example for most types of data, but is an extreme test of peak detection - many humans even have trouble picking all the peaks in this one!
The simplest way to acquire the piece-regular dataset is to use the load_signal.m function. I have pre-run this function, and saved the dataset to my public dropbox account as a .mat file. We can then fetch the file from there, in a similar way as the sunspot data.
For the curious, these were the octave commands (octave was run from the directory containing load_signal.m):
>> x = load_signal('piece-regular');
>> save -V7 piece-regular.mat x | from scipy.io import loadmat
link = 'https://dl.dropboxusercontent.com/u/15378192/piece-regular.mat'
dlname = 'piece-regular.mat'
if not os.path.exists('./%s' % dlname):
progress_bar_downloader(link, dlname)
else:
print('%s already downloaded!' % dlname)
data = loadmat(dlname)
pr = data['x']
plt.plot(pr, color='steelblue')
plt.title('Piecewise Regular Data, from WaveLab') | blogsite/posts/wavelets.ipynb | kastnerkyle/kastnerkyle.github.io-nikola | bsd-3-clause |
We Have To Go Deeper
The filterbank representation of the wavelet (seen below) is very convenient for wavelet peak finding. Extending code from a previous post, we will create an arbitrary depth anlysis wavelet filterbank (no reconstruction), in order to perform peak detection.
The basic algorithm is detailed in this whitepaper. In short, this method involves finding zero crossings in the level X detail coefficients (generated by the bior3.1 g[n]) where there is less noise in the zero crossings. Tracking those peaks back through the lower levels, we can refine the peak location with higher resolution data. I will use the haar wavelet, mainly because I know how to construct it, so the results will be slightly different than the whitepaper. For non-tutorial use, check out the PyWavelets package - it supports a ton of different wavelet functions and is very popular. scipy.signal also has some limited support for wavelet methods.
The NI whitepaper also chooses to use the undecimated wavelet transform - I have chosen to use the decimated version, and compensate for the results. This introduces some noise into the estimates, and may account for some of the "off-by-one" peaks in the results. Hoever, this fits better into the filterbank model for wavelet decomposition. | from IPython.display import Image
Image(url='http://upload.wikimedia.org/wikipedia/commons/2/22/Wavelets_-_Filter_Bank.png')
from numpy.lib.stride_tricks import as_strided
def polyphase_core(x, m, f):
#x = input data
#m = decimation rate
#f = filter
#Force it to be 1D
x = x.ravel()
#Hack job - append zeros to match decimation rate
if x.shape[0] % m != 0:
x = np.append(x, np.zeros((m - x.shape[0] % m,)))
if f.shape[0] % m != 0:
f = np.append(f, np.zeros((m - f.shape[0] % m,)))
polyphase = p = np.zeros((m, (x.shape[0] + f.shape[0]) / m), dtype=x.dtype)
p[0, :-1] = np.convolve(x[::m], f[::m])
#Invert the x values when applying filters
for i in range(1, m):
p[i, 1:] = np.convolve(x[m - i::m], f[i::m])
return p
def wavelet_lp(data, ntaps=4):
#type == 'haar':
f = np.array([1.] * ntaps)
return np.sum(polyphase_core(data, 2, f), axis=0)
def wavelet_hp(data, ntaps=4):
#type == 'haar':
if ntaps % 2 is not 0:
raise ValueError("ntaps should be even")
half = ntaps // 2
f = np.array(([-1.] * half) + ([1.] * half))
return np.sum(polyphase_core(data, 2, f), axis=0)
def wavelet_filterbank(n, data):
#Create and store all coefficients to level n
x = data
all_lp = []
all_hp = []
for i in range(n):
c = wavelet_lp(x)
x = wavelet_hp(x)
all_lp.append(c)
all_hp.append(x)
return all_lp, all_hp
def zero_crossing(x):
x = x.ravel()
#Create an X, 2 array of overlapping points i.e.
#[1, 2, 3, 4, 5] becomes
#[[1, 2],
#[2, 3],
#[3, 4],
#[4, 5]]
o = as_strided(x, shape=(x.shape[0] - 1, 2), strides=(x.itemsize, x.itemsize))
#Look for sign changes where sign goes from positive to negative - this is local maxima!
#Negative to positive is local minima
return np.where((np.sum(np.sign(o), axis=1) == 0) & (np.sign(o)[:, 0] == 1.))[0]
def peak_search(hp_arr, arr_max):
#Given all hp coefficients and a limiting value, find and return all peak indices
zero_crossings = []
for n, _ in enumerate(hp_arr):
#2 ** (n + 1) is required to rescale due to decimation by 2 at each level
#Also remove a bunch of redundant readings due to clip using np.unique
zero_crossings.append(np.unique(np.clip(2 ** (n + 1) * zero_crossing(hp_arr[n]), 0, arr_max)))
#Find refined estimate for each peak
peak_idx = []
for v in zero_crossings[-1]:
v_itr = v
for n in range(len(zero_crossings) - 2, 0, -1):
v_itr = find_nearest(v_itr, zero_crossings[n])
peak_idx.append(v_itr)
#Only return unique answers
return np.unique(np.array(peak_idx, dtype='int32'))
def find_nearest(v, x):
return x[np.argmin(np.abs(x - v))]
def peak_detect(data, depth):
if depth == 1:
raise ValueError("depth should be > 1")
#Return indices where peaks were found
lp, hp = wavelet_filterbank(depth, data)
return peak_search(hp, data.shape[0] - 1) | blogsite/posts/wavelets.ipynb | kastnerkyle/kastnerkyle.github.io-nikola | bsd-3-clause |
Peaking Duck
One of the trickiest things about this method is choosing the proper depth for wavelet peak detection. Too deep, and sharp peaks may be eliminated due to decimation. Not deep enough, and there may be false positives o spurious results.
Empirically, it appears that the number of taps in the wavelet filter has a similar effect to depth - too many taps seems to blur out sharp peaks, but not enough taps causes strange peaks to be detected. 4 taps seems to be a good number for these two datasets, but this may need tweaking for new applications.
Interestingly enough, changing one line in the zero_crossing function from:
return np.where((np.sum(np.sign(o), axis=1) == 0) & (np.sign(o)[:, 0] == 1.))[0]
to
return np.where((np.sum(np.sign(o), axis=1) == 0) & (np.sign(o)[:, 0] == -1.))[0]
changes this from a peak detector (local maximum) into a valley detector (local minimum) - very cool! Though the detected peaks are not perfect, they are pretty close and could probably be improved by choosing a better wavelet, or better estimation of the proper depth parameter. | indices = peak_detect(sunspot, 2)
plt.title('Detected peaks for sunspot dataset')
plt.plot(sunspot, color='steelblue')
plt.plot(indices, sunspot[indices], 'x', color='darkred')
plt.figure()
indices = peak_detect(pr, 3)
plt.title('Detected peaks for piece-regular dataset')
plt.plot(pr, color='steelblue')
plt.plot(indices, pr[indices], 'x', color='darkred') | blogsite/posts/wavelets.ipynb | kastnerkyle/kastnerkyle.github.io-nikola | bsd-3-clause |
Denoise and Defury
To complete our tour of wavelets, it would be beneficial to show how wavelets can be used for denoising as well. For this application, I will use the matrix representation of the wavelet basis, rather than the filterbank interpretation. Though they should be equivalent, the block transform is more straightforward (IMO) if you do not need the intermediate coefficients, as we did in the peak detection application. The MATLAB code is also a useful resource.
The general idea is to transform the input signal, then remove noise using a soft threshold in the transform space. This will zero out small coefficients, while larger coefficients will remain unaltered. Following this thresholding operation with the inverse wavelet transform we will get a filtered version of the original signal. Because each wavelet contains many different frequencies, this type of filtering can better preserve edges and trends than a simple highpass or lowpass filter. Let's check it out. | def haar_matrix(size):
level = int(np.ceil(np.log2(size)))
H = np.array([1.])[:, None]
NC = 1. / np.sqrt(2.)
LP = np.array([1., 1.])[:, None]
HP = np.array([1., -1.])[:, None]
for i in range(level):
H = NC * np.hstack((np.kron(H, LP), np.kron(np.eye(len(H)),HP)))
H = H.T
return H
def dwt(x):
H = haar_matrix(x.shape[0])
x = x.ravel()
#Zero pad to next power of 2
x = np.hstack((x, np.zeros(H.shape[1] - x.shape[0])))
return np.dot(H, x)
def idwt(x):
H = haar_matrix(x.shape[0])
x = x.ravel()
#Zero pad to next power of 2
x = np.hstack((x, np.zeros(H.shape[0] - x.shape[0])))
return np.dot(H.T, x)
def wthresh(a, thresh):
#Soft threshold
res = np.abs(a) - thresh
return np.sign(a) * ((res > 0) * res)
rstate = np.random.RandomState(0)
s = pr + 2 * rstate.randn(*pr.shape)
threshold = t = 5
wt = dwt(s)
wt = wthresh(wt, t)
rs = idwt(wt)
plt.plot(s, color='steelblue')
plt.title('Noisy Signal')
plt.figure()
plt.plot(dwt(s), color='darkred')
plt.title('Wavelet Transform of Noisy Signal')
plt.figure()
plt.title('Soft Thresholded Transform Coefficients')
plt.plot(wt, color='darkred')
plt.figure()
plt.title('Reconstructed Signal after Thresholding')
plt.plot(rs, color='steelblue') | blogsite/posts/wavelets.ipynb | kastnerkyle/kastnerkyle.github.io-nikola | bsd-3-clause |
The Compression Dimension
The use of wavelets for compression is basically identical to its use for filtering. Keep the most powerful coefficients, while zeroing out everything else. On reconstruction, the result will be very close to the original, and much closer than if you attempted the same thing with a DFT. I am not sure how this compares to DCT compression, but the visual result seems pretty good to me. | from scipy import misc
link = 'http://sipi.usc.edu/~ortega/icip2001/original/lena_512.gif'
dlname = 'lena.gif'
if not os.path.exists('./%s' % dlname):
progress_bar_downloader(link, dlname)
else:
print('%s already downloaded!' % dlname)
def dwt2d(x):
H = haar_matrix(x.shape[0])
return np.dot(np.dot(H, x), H.T)
def idwt2d(x):
H = haar_matrix(x.shape[0])
return np.dot(np.dot(H.T, x), H)
lena = misc.imread(dlname)
wt = dwt2d(lena)
thresh = wthresh(wt, 15)
rs = idwt2d(thresh)
wtnz = len(wt.nonzero()[0]) + len(wt.nonzero()[1])
rsnz = len(thresh.nonzero()[0]) + len(thresh.nonzero()[1])
reduction = 'using %.2f percent the coefficients of the original' % (100 * float(rsnz) / wtnz)
plt.imshow(lena, cmap='gray')
f = plt.gca()
f.axes.get_xaxis().set_visible(False)
f.axes.get_yaxis().set_visible(False)
plt.title('Original Lena')
plt.figure()
plt.imshow(rs, cmap='gray')
f = plt.gca()
f.axes.get_xaxis().set_visible(False)
f.axes.get_yaxis().set_visible(False)
plt.title('Compressed Lena, %s' % reduction) | blogsite/posts/wavelets.ipynb | kastnerkyle/kastnerkyle.github.io-nikola | bsd-3-clause |
That's all we need to create and train a model: | dls = CollabDataLoaders.from_df(ratings, bs=64, seed=42)
y_range = [0,5.5]
learn = collab_learner(dls, n_factors=50, y_range=y_range)
learn.fit_one_cycle(3, 5e-3) | dev_nbs/course/lesson4-collab.ipynb | fastai/fastai | apache-2.0 |
Movielens 100k
Let's try with the full Movielens 100k data dataset, available from http://files.grouplens.org/datasets/movielens/ml-100k.zip | path=Config().data/'ml-100k'
ratings = pd.read_csv(path/'u.data', delimiter='\t', header=None,
names=[user,item,'rating','timestamp'])
ratings.head()
movies = pd.read_csv(path/'u.item', delimiter='|', encoding='latin-1', header=None,
names=[item, 'title', 'date', 'N', 'url', *[f'g{i}' for i in range(19)]])
movies.head()
len(ratings)
rating_movie = ratings.merge(movies[[item, title]])
rating_movie.head()
dls = CollabDataLoaders.from_df(rating_movie, seed=42, valid_pct=0.1, bs=64, item_name=title, path=path)
dls.show_batch()
y_range = [0,5.5]
learn = collab_learner(dls, n_factors=40, y_range=y_range)
learn.lr_find()
learn.fit_one_cycle(5, 5e-3, wd=1e-1)
learn.save('dotprod') | dev_nbs/course/lesson4-collab.ipynb | fastai/fastai | apache-2.0 |
Here's some benchmarks on the same dataset for the popular Librec system for collaborative filtering. They show best results based on RMSE of 0.91, which corresponds to an MSE of 0.91**2 = 0.83.
Interpretation
Setup | learn.load('dotprod');
learn.model
g = rating_movie.groupby('title')['rating'].count()
top_movies = g.sort_values(ascending=False).index.values[:1000]
top_movies[:10] | dev_nbs/course/lesson4-collab.ipynb | fastai/fastai | apache-2.0 |
Movie bias | movie_bias = learn.model.bias(top_movies, is_item=True)
movie_bias.shape
mean_ratings = rating_movie.groupby('title')['rating'].mean()
movie_ratings = [(b, i, mean_ratings.loc[i]) for i,b in zip(top_movies,movie_bias)]
item0 = lambda o:o[0]
sorted(movie_ratings, key=item0)[:15]
sorted(movie_ratings, key=lambda o: o[0], reverse=True)[:15] | dev_nbs/course/lesson4-collab.ipynb | fastai/fastai | apache-2.0 |
Movie weights | movie_w = learn.model.weight(top_movies, is_item=True)
movie_w.shape
movie_pca = movie_w.pca(3)
movie_pca.shape
fac0,fac1,fac2 = movie_pca.t()
movie_comp = [(f, i) for f,i in zip(fac0, top_movies)]
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
movie_comp = [(f, i) for f,i in zip(fac1, top_movies)]
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
idxs = np.random.choice(len(top_movies), 50, replace=False)
idxs = list(range(50))
X = fac0[idxs]
Y = fac2[idxs]
plt.figure(figsize=(15,15))
plt.scatter(X, Y)
for i, x, y in zip(top_movies[idxs], X, Y):
plt.text(x,y,i, color=np.random.rand(3)*0.7, fontsize=11)
plt.show() | dev_nbs/course/lesson4-collab.ipynb | fastai/fastai | apache-2.0 |
Загрузка и предобработка (Download and preprocessing) | #download
df = pd.read_csv('https://op.mos.ru/EHDWSREST/catalog/export/get?id=230308', compression='zip', header=0, encoding='cp1251', sep=';', quotechar='"')
#look at the data
df.head(12) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Закодируем месяца числовыми значениями и удалим ненужные для анализа столбцы
We will code the month with numeric values and delete the columns we do not need for analysis | #code months
d={'январь':1, 'февраль':2, 'март':3, 'апрель':4, 'май':5, 'июнь':6, 'июль':7,
'август':8, 'сентябрь':9, 'октябрь':10, 'ноябрь':11, 'декабрь':12}
df.Month=df.Month.map(d)
#delete some unuseful columns
df.drop(['ID','global_id','Unnamed: 12'],axis=1,inplace=True)
#look at the data
df.head(12) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Построим попарные графики зависимостей, но для наглядности возьмем только часть признаков
We construct pairwise graphs of dependencies, but for clarity we take only a part of the features | columns_to_show = ['StateRegistrationOfBirth', 'StateRegistrationOfMarriage',
'StateRegistrationOfPaternityExamination', 'StateRegistrationOfDivorce','StateRegistrationOfDeath']
data=df[columns_to_show]
grid = sns.pairplot(data) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Посмотрим, изменит ли что-то масштабирование.
Let's see the result of scaling. | # change scale of features
scaler = MinMaxScaler()
df2=pd.DataFrame(scaler.fit_transform(df))
df2.columns=df.columns
data2=df2[columns_to_show]
grid2 = sns.pairplot(data2) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Почти без разницы
Almost without difference
Простейшая регрессия по 1 признаку (Regression 1 features)
Рассмотрим два параметра с наиболее выраженной линейной зависимостью StateRegistrationOfBirth и StateRegistrationOfPaternityExamination
Consider two parameters with the most pronounced linear dependence StateRegistrationOfBirth and StateRegistrationOfPaternityExamination | #get data for model
X = data2['StateRegistrationOfBirth'].values
y = data2['StateRegistrationOfPaternityExamination'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
X_train=np.reshape(X_train,[X_train.shape[0],1])
y_train=np.reshape(y_train,[y_train.shape[0],1])
X_test=np.reshape(X_test,[X_test.shape[0],1])
y_test=np.reshape(y_test,[y_test.shape[0],1])
#teach model and get predictions
lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Score:', lr.score(X_test,y_test)) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
График для зависимости, полученной по обучающим данным
The graph for the dependence obtained from the training data | plt.scatter(X_train, y_train, color='black')
plt.plot(X_train, lr.predict(X_train), color='blue',
linewidth=3)
plt.xlabel('StateRegistrationOfBirth')
plt.ylabel('State Registration OfPaternity Examination')
plt.title="Regression on train data" | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
График для зависимости, полученной поконтрольным данным
The graph for the dependence obtained from the test data | plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, lr.predict(X_test), color='green',
linewidth=3)
plt.xlabel('StateRegistrationOfBirth')
plt.ylabel('State Registration OfPaternity Examination')
plt.title="Regression on test data" | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Регрессия по нескольким признакам и Lasso регуляризация (Regression on several features and Lasso regularization)
Попробуем предсказать другой параметр - число зарегестрированных браков, на основании той части признаков, для которых ранее строили диаграммы ('StateRegistrationOfBirth', 'StateRegistrationOfMarriage', 'StateRegistrationOfPaternityExamination', 'StateRegistrationOfDivorce','StateRegistrationOfDeath')
Let's try to predict another parameter - the number of registered marriages, based on that part of the characteristics for which the charts were previously built ('StateRegistrationOfBirth', 'StateRegistrationOfMarriage', 'StateRegistrationOfPaternityExamination', 'StateRegistrationOfDivorce', 'StateRegistrationOfDeath') | #get main data
columns_to_show2=columns_to_show.copy()
columns_to_show2.remove("StateRegistrationOfMarriage")
#get data for a model
X = data2[columns_to_show2].values
y = data2['StateRegistrationOfMarriage'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
y_train=np.reshape(y_train,[y_train.shape[0],1])
y_test=np.reshape(y_test,[y_test.shape[0],1]) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Обучим простою линейную регрессию на 4-х мерном векторе признаков
We teach a linear regression on a 4-dimensional vector of features | lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Score:', lr.score(X_test,y_test))
| 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Рассмотрим линейную регрессию с регуляризацией - Лассо
Consider linear regression with Lasso regularization | #let's look at the different alpha parameter:
#large
Rid=linear_model.Lasso (alpha = 0.01)
Rid.fit(X_train, y_train)
print(' Appha:', Rid.alpha)
print(' Coefficients:', Rid.coef_)
print(' Score:', Rid.score(X_test,y_test))
#Small
Rid=linear_model.Lasso (alpha = 0.000000001)
Rid.fit(X_train, y_train)
print('\n Appha:', Rid.alpha)
print(' Coefficients:', Rid.coef_)
print(' Score:', Rid.score(X_test,y_test))
#Optimal (for these test data)
Rid=linear_model.Lasso (alpha = 0.00025)
Rid.fit(X_train, y_train)
print('\n Appha:', Rid.alpha)
print(' Coefficients:', Rid.coef_)
print(' Score:', Rid.score(X_test,y_test)) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Добавим откровенно бесполезный признак
Add a seless feature | columns_to_show3=columns_to_show2.copy()
columns_to_show3.append("TotalNumber")
columns_to_show3
X = df2[columns_to_show3].values
# y hasn't changed
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
y_train=np.reshape(y_train,[y_train.shape[0],1])
y_test=np.reshape(y_test,[y_test.shape[0],1])
| 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Для начала посмотрим на результаты без регуляризации
First, look at the results without regularization | lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Score:', lr.score(X_test,y_test))
| 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
А теперь с регуляризацией (Lasso).
При малых значениях коэффициента регуляризации получаем незначительное улучшение.
And now with regularization (Lasso).
For small values of the regularization coefficient we obtain a slight improvement. | #Optimal (for these test data)
Rid=linear_model.Lasso (alpha = 0.00015)
Rid.fit(X_train, y_train)
print('\n Appha:', Rid.alpha)
print(' Coefficients:', Rid.coef_)
print(' Score:', Rid.score(X_test,y_test)) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
При больших значениях альфа можно посмотреть, на отбор признаков в действии
For large alpha values, you can look at the selection of features in action | #large
Rid=linear_model.Lasso (alpha = 0.01)
Rid.fit(X_train, y_train)
print('\n Appha:', Rid.alpha)
print(' Coefficients:', Rid.coef_)
print(' Score:', Rid.score(X_test,y_test)) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Резкий рост качества предсказаний можно объяснить, тем, что регистрация браков является составной величиной от общего количества.
Рассмотрим какую часть регистраций браков можно предсказать, только на основании общего количеств регистраций
The increase in the quality of predictions can be explained by the fact that registration of marriages is a composite of the total.
Consider what part of the marriage registrations can be predicted, only based on the total number of registrations. | X_train=np.reshape(X_train[:,4],[X_train.shape[0],1])
X_test=np.reshape(X_test[:,4],[X_test.shape[0],1])
lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Score:', lr.score(X_train,y_train)) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
И взглянем на графики
And look at the graphics | # plot for train data
plt.figure(figsize=(8,10))
plt.subplot(211)
plt.scatter(X_train, y_train, color='black')
plt.plot(X_train, lr.predict(X_train), color='blue',
linewidth=3)
plt.xlabel('Total Number of Registration')
plt.ylabel('State Registration Of Marriage')
plt.title="Regression on train data"
# plot for test data
plt.subplot(212)
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, lr.predict(X_test), '--', color='green',
linewidth=3)
plt.xlabel('Total Number of Registration')
plt.ylabel('State Registration Of Marriage')
plt.title="Regression on test data" | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Добавим другой малополезный признак State Registration Of Name Change
Add another less useful sign. State Registration Of Name Change | columns_to_show4=columns_to_show2.copy()
columns_to_show4.append("StateRegistrationOfNameChange")
X = df2[columns_to_show4].values
# y hasn't changed
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
y_train=np.reshape(y_train,[y_train.shape[0],1])
y_test=np.reshape(y_test,[y_test.shape[0],1])
lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Score:', lr.score(X_test,y_test))
| 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Как видно, он нам только мешает.
As you can see, it's just a hindrance.
Добавим полезный признак, закодированное значение месяца в который получил количество регистраций.
Add a useful feature, the encoded value of the month in which the number of registrations was received. | #get data
columns_to_show5=columns_to_show2.copy()
columns_to_show5.append("Month")
#get data for model
X = df2[columns_to_show5].values
# y hasn't changed
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
y_train=np.reshape(y_train,[y_train.shape[0],1])
y_test=np.reshape(y_test,[y_test.shape[0],1])
#teach model and get predictions
lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Score:', lr.score(X_test,y_test)) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Линейная регрессия для предсказания тренда (Linear regression for predicting a trend)
Вернемся к исходным данным, но рассмотрим их теперь с учетом изменения во времени.
Для начала заменим колонку год на общее количество месяцев с момента начальной даты
В этот раз не будем масштабировать данные, большой пользы это не принесет.
Let's go back to the original data, but consider them now with the change in time.
To begin with, replace the column year by the total number of months from the start date
This time we will not scale the data, it will not be of much use. | #get data
df3=df.copy()
#get new column
df3.Year=df.Year.map(lambda x: (x-2010)*12)+df.Month
df3.rename(columns={'Year': 'Months'}, inplace=True)
#get data for model
X=df3[columns_to_show5].values
y=df3['StateRegistrationOfMarriage'].values
train=[df3.Months<=72]
test=[df3.Months>72]
X_train=X[train]
y_train=y[train]
X_test=X[test]
y_test=y[test]
y_train=np.reshape(y_train,[y_train.shape[0],1])
y_test=np.reshape(y_test,[y_test.shape[0],1])
#teach model and get predictions
lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_[0])
print('Score:', lr.score(X_test,y_test)) | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Результат предсказания "не очень", но думаю лучше, чем просто наобум
Посмотрим на данные в графическом виде, в начале по отдельности, а потом вместе.
Наша модель пусть и не очень хорошо, но улавливает основные особенности тренда, позволяя прогнозировать данные.
The result of the prediction is "not very," but I think it's better than just haphazardly
Let's look at the data in a graphical form, at the beginning separately, and then together.
Our model, though not very good, but captures the main features of the trend, allowing you to predict the data. | plt.figure(figsize=(9,23))
# plot for train data
plt.subplot(311)
plt.scatter(df3.Months.values[train], y_train, color='black')
plt.plot(df3.Months.values[train], lr.predict(X_train), color='blue', linewidth=2)
plt.xlabel('Months (from 01.2010)')
plt.ylabel('State Registration Of Marriage')
plt.title="Regression on train data"
# plot for test data
plt.subplot(312)
plt.scatter(df3.Months.values[test], y_test, color='black')
plt.plot(df3.Months.values[test], lr.predict(X_test), color='green', linewidth=2)
plt.xlabel('Months (from 01.2010)')
plt.ylabel('State Registration Of Marriage')
plt.title="Regression (prediction) on test data"
# plot for all data
plt.subplot(313)
plt.scatter(df3.Months.values[train], y_train, color='black')
plt.plot(df3.Months.values[train], lr.predict(X_train), color='blue', label='train', linewidth=2)
plt.scatter(df3.Months.values[test], y_test, color='black')
plt.plot(df3.Months.values[test], lr.predict(X_test), color='green', label='test', linewidth=2)
plt.title="Regression (prediction) on all data"
plt.xlabel('Months (from 01.2010)')
plt.ylabel('State Registration Of Marriage')
#plot line for link train to test
plt.plot([72,73], lr.predict([X_train[-1],X_test[0]]) , color='magenta',linewidth=2, label='train to test')
plt.legend()
| 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Бонус (Bonus)
Повышаем точность, за счет другого подхода к месяцам
(Increase the accuracy, due to a different approach to the months)
Для начала заново загрузим исходную таблицу
For a start, reload the original table | df_base = pd.read_csv('https://op.mos.ru/EHDWSREST/catalog/export/get?id=230308', compression='zip', header=0, encoding='cp1251', sep=';', quotechar='"') | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Попробуем применить one-hot кодирование к графе Месяц
Let's try to apply one-hot encoding to the column Month | #get data for model
df4=df_base.copy()
df4.drop(['Year','StateRegistrationOfMarriage','ID','global_id','Unnamed: 12','TotalNumber','StateRegistrationOfNameChange','StateRegistrationOfAdoption'],axis=1,inplace=True)
df4=pd.get_dummies(df4,prefix=['Month'])
X=df4.values
X_train=X[train]
X_test=X[test]
#teach model and get predictions
lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_[0])
print('Score:', lr.score(X_test,y_test))
# plot for all data
plt.scatter(df3.Months.values[train], y_train, color='black')
plt.plot(df3.Months.values[train], lr.predict(X_train), color='blue', label='train', linewidth=2)
plt.scatter(df3.Months.values[test], y_test, color='black')
plt.plot(df3.Months.values[test], lr.predict(X_test), color='green', label='test', linewidth=2)
plt.title="Regression (prediction) on all data"
plt.xlabel('Months (from 01.2010)')
plt.ylabel('State Registration Of Marriage')
#plot line for link train to test
plt.plot([72,73], lr.predict([X_train[-1],X_test[0]]) , color='magenta',linewidth=2, label='train to test')
| 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Качество предсказания резко улучшилось
The quality of the prediction has has greatly improved
Теперь попробуем закодировать вместо значения месяца, среднее значение регистрации браков в данный месяц, взятое на основании обучающих данных.
Now try to encode instead of the month, the average value of registration of marriages in a given month, taken on the basis of training data. | #get data for pandas data frame
df5=df_base.copy()
d=dict()
#get we obtain the mean value of Registration Of Marriages by months on the training data
for mon in df5.Month.unique():
d[mon]=df5.StateRegistrationOfMarriage[df5.Month.values[train]==mon].mean()
#d+={}
df5['MeanMarriagePerMonth']=df5.Month.map(d)
df5.drop(['Month','Year','StateRegistrationOfMarriage','ID','global_id','Unnamed: 12','TotalNumber',
'StateRegistrationOfNameChange','StateRegistrationOfAdoption'],axis=1,inplace=True)
#get data for model
X=df5.values
X_train=X[train]
X_test=X[test]
#teach model and get predictions
lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_[0])
print('Score:', lr.score(X_test,y_test))
# plot for all data
plt.scatter(df3.Months.values[train], y_train, color='black')
plt.plot(df3.Months.values[train], lr.predict(X_train), color='blue', label='train', linewidth=2)
plt.scatter(df3.Months.values[test], y_test, color='black')
plt.plot(df3.Months.values[test], lr.predict(X_test), color='green', label='test', linewidth=2)
plt.title="Regression (prediction) on all data"
plt.xlabel('Months (from 01.2010)')
plt.ylabel('State Registration Of Marriage')
#plot line for link train to test
plt.plot([72,73], lr.predict([X_train[-1],X_test[0]]) , color='magenta',linewidth=2, label='train to test') | 3.Machine_learning/2.family_registered_habr.ipynb | bosonbeard/Funny-models-and-scripts | unlicense |
Specify the response and predictor columns | y = "C785"
x = train_df.names[0:784]
train_df[y] = train_df[y].asfactor()
test_df[y] = test_df[y].asfactor() | examples/deeplearning/notebooks/deeplearning_mnist_introduction.ipynb | mathemage/h2o-3 | apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.