markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Step 2: Generate a list of documents
urls = [] #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1994795/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=1994795') #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC314300/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=314300') #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4383356/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=4383356') #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4596899/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=4596899') #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4303126/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=4303126') #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4637461/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=4637461') #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4690355/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=4690355') #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3505152/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=3505152') #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3976810/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=3976810') #https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4061037/ urls.append('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=4061037')
TextRank_Automatic_Summarization_for_Medical_Articles.ipynb
avtlearns/automatic_text_summarization
gpl-3.0
Step 3: Preprocess the documents
documents = [] abstracts = [] texts = [] print 'Preprocessing documents. This may take few minutes ...' for i, url in enumerate(urls): print 'Preprocessing document %d ...' % (i+1) # Download the document my_url = urllib2.urlopen(url) raw_doc = BeautifulSoup(my_url.read(), 'xml') documents.append(raw_doc) # Extract the cleaned abstract raw_abstract = raw_doc.abstract my_abstract = re.sub(r'<\/?\w+>', r' ', str(raw_abstract)) # remove xml tags abstracts.append(my_abstract) # Extract the cleaned text text = raw_doc.body text = re.sub(r'\\n', r' ', str(text)) # remove newline characters text = re.sub(r'<[^>]+>', r' ', str(text)) # remove xml tags text = re.sub(r'\[[^\[^\]]+\]', r' ', str(text)) # remove references text = re.sub(r'\[', r' ', str(text)) # remove any remaining [ text = re.sub(r'\]', r' ', str(text)) # remove any remaining ] text = re.sub(r'[\s]{2,}', r' ', str(text)) # remove more than a single blank space text = re.sub(r'\.\s+,\s+\S', r' ', str(text)) # remove , after a period text = text.decode('utf-8') texts.append(text) print 'All documents preprocessed successfully.' print 'We have %d documents with %d abstracts and %d texts.' % (len(documents), len(abstracts), len(texts)) assert len(documents) == len(abstracts) assert len(documents) == len(texts)
TextRank_Automatic_Summarization_for_Medical_Articles.ipynb
avtlearns/automatic_text_summarization
gpl-3.0
Step 4: Split the documents into sentences
punkttokenizer = PunktSentenceTokenizer() text_sentences = [] for text in texts: sentences = [] seen = set() for sentence in punkttokenizer.tokenize(text): sentences.append(sentence) text_sentences.append(sentences)
TextRank_Automatic_Summarization_for_Medical_Articles.ipynb
avtlearns/automatic_text_summarization
gpl-3.0
Step 5: Count the term frequency for sentences
tf_matrices = [] tfidf_matrices = [] cosine_similarity_matrices = [] print 'Calculating sentence simiarities. This may take few minutes ...' for i, sentences in enumerate(text_sentences): print 'Calculating sentence simiarities of document %d ...' % (i+1) tf_matrix = CountVectorizer().fit_transform(sentences) tf_matrices.append(tf_matrix) tfidf_matrix = TfidfTransformer().fit_transform(tf_matrix) tfidf_matrices.append(tfidf_matrix) cosine_similarity_matrix = tfidf_matrix * tfidf_matrix.T cosine_similarity_matrices.append(cosine_similarity_matrix) print 'All documents processed successfully.' print 'We have %d documents with %d tf_matrices %d tfidf_matrices and %d cosine_similarity_matrices.' \ % (len(documents), len(tf_matrices), len(tfidf_matrices), len(cosine_similarity_matrices)) assert len(documents) == len(tf_matrices) assert len(documents) == len(tfidf_matrices) assert len(documents) == len(cosine_similarity_matrices)
TextRank_Automatic_Summarization_for_Medical_Articles.ipynb
avtlearns/automatic_text_summarization
gpl-3.0
Step 6: Calculate TextRank
similarity_graphs = [] graph_ranks = [] highest_ranks = [] lowest_ranks = [] print 'Calculating TextRanks. This may take few minutes ...' for i, cosine_similarity_matrix in enumerate(cosine_similarity_matrices): print 'Calculating TextRanks of document %d ...' % (i+1) similarity_graph = nx.from_scipy_sparse_matrix(cosine_similarity_matrix) similarity_graphs.append(similarity_graph) ranks = nx.pagerank(similarity_graph) graph_ranks.append(ranks) highest = sorted(((ranks[j],s) for j,s in enumerate(text_sentences[i])), reverse=True) highest_ranks.append(highest) lowest = sorted(((ranks[j],s) for j,s in enumerate(text_sentences[i])), reverse=False) lowest_ranks.append(lowest) print 'All documents processed successfully.' print 'We have %d documents with %d similarity_graphs %d graph_ranks and %d highest_ranks.' \ % (len(documents), len(similarity_graphs), len(graph_ranks), len(highest_ranks)) assert len(documents) == len(similarity_graphs) assert len(documents) == len(graph_ranks) assert len(documents) == len(highest_ranks)
TextRank_Automatic_Summarization_for_Medical_Articles.ipynb
avtlearns/automatic_text_summarization
gpl-3.0
Step 7: Save extractive summaries
print 'Saving extractive summaries. This may take a few minutes ...' for i, highest in enumerate(highest_ranks): print 'Writing extractive summary for document %d ...' % (i+1) out_file = '\\TextRank\\system\\article%d_system1.txt' % (i+1) with open(out_file, 'w') as f: for i in range(5): f.write((highest[i][1] + '\n').encode('utf-8')) print 'All documents processed successfully.'
TextRank_Automatic_Summarization_for_Medical_Articles.ipynb
avtlearns/automatic_text_summarization
gpl-3.0
Step 8: Save ground truths.
print 'Saving ground truths. This may take a few minutes ...' for i, abstract in enumerate(abstracts): print 'Writing ground truth for document %d ...' % (i+1) out_file = '\\TextRank\\reference\\article%d_reference1.txt' % (i+1) with open(out_file, 'w') as f: f.write(abstract.strip() + '\n') print 'All documents processed successfully.'
TextRank_Automatic_Summarization_for_Medical_Articles.ipynb
avtlearns/automatic_text_summarization
gpl-3.0
Step 9: Calculate ROUGE score
%cd C:\ROUGE !java -jar rouge2.0_0.2.jar df = pd.read_csv('results.csv') print df.sort_values('Avg_F-Score', ascending=False)
TextRank_Automatic_Summarization_for_Medical_Articles.ipynb
avtlearns/automatic_text_summarization
gpl-3.0
Display sensitivity maps for EEG and MEG sensors Sensitivity maps can be produced from forward operators that indicate how well different sensor types will be able to detect neural currents from different regions of the brain. To get started with forward modeling see tut-forward.
# Author: Eric Larson <[email protected]> # # License: BSD-3-Clause import numpy as np import mne from mne.datasets import sample from mne.source_space import compute_distance_to_sensors from mne.source_estimate import SourceEstimate import matplotlib.pyplot as plt print(__doc__) data_path = sample.data_path() fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif' subjects_dir = data_path + '/subjects' # Read the forward solutions with surface orientation fwd = mne.read_forward_solution(fwd_fname) mne.convert_forward_solution(fwd, surf_ori=True, copy=False) leadfield = fwd['sol']['data'] print("Leadfield size : %d x %d" % leadfield.shape)
0.24/_downloads/b7659d33d6ffe8531d004e9d6051f16f/forward_sensitivity_maps.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Compute sensitivity maps
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed') mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed') eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
0.24/_downloads/b7659d33d6ffe8531d004e9d6051f16f/forward_sensitivity_maps.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Show gain matrix a.k.a. leadfield matrix with sensitivity map
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False) picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True) fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True) fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14) for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']): im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto', cmap='RdBu_r') ax.set_title(ch_type.upper()) ax.set_xlabel('sources') ax.set_ylabel('sensors') fig.colorbar(im, ax=ax) fig_2, ax = plt.subplots() ax.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()], bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'], color=['c', 'b', 'k']) fig_2.legend() ax.set(title='Normal orientation sensitivity', xlabel='sensitivity', ylabel='count') brain_sens = grad_map.plot( subjects_dir=subjects_dir, clim=dict(lims=[0, 50, 100]), figure=1) brain_sens.add_text(0.1, 0.9, 'Gradiometer sensitivity', 'title', font_size=16)
0.24/_downloads/b7659d33d6ffe8531d004e9d6051f16f/forward_sensitivity_maps.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Compare sensitivity map with distribution of source depths
# source space with vertices src = fwd['src'] # Compute minimum Euclidean distances between vertices and MEG sensors depths = compute_distance_to_sensors(src=src, info=fwd['info'], picks=picks_meg).min(axis=1) maxdep = depths.max() # for scaling vertices = [src[0]['vertno'], src[1]['vertno']] depths_map = SourceEstimate(data=depths, vertices=vertices, tmin=0., tstep=1.) brain_dep = depths_map.plot( subject='sample', subjects_dir=subjects_dir, clim=dict(kind='value', lims=[0, maxdep / 2., maxdep]), figure=2) brain_dep.add_text(0.1, 0.9, 'Source depth (m)', 'title', font_size=16)
0.24/_downloads/b7659d33d6ffe8531d004e9d6051f16f/forward_sensitivity_maps.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Sensitivity is likely to co-vary with the distance between sources to sensors. To determine the strength of this relationship, we can compute the correlation between source depth and sensitivity values.
corr = np.corrcoef(depths, grad_map.data[:, 0])[0, 1] print('Correlation between source depth and gradiomter sensitivity values: %f.' % corr)
0.24/_downloads/b7659d33d6ffe8531d004e9d6051f16f/forward_sensitivity_maps.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Using interact for animation with data A soliton is a constant velocity wave that maintains its shape as it propagates. They arise from non-linear wave equations, such has the Korteweg–de Vries equation, which has the following analytical solution: $$ \phi(x,t) = \frac{1}{2} c \mathrm{sech}^2 \left[ \frac{\sqrt{c}}{2} \left(x - ct - a \right) \right] $$ The constant c is the velocity and the constant a is the initial location of the soliton. Define soliton(x, t, c, a) function that computes the value of the soliton wave for the given arguments. Your function should work when the postion x or t are NumPy arrays, in which case it should return a NumPy array itself.
def soliton(x, t, c, a): i=(((c**(1/2))/2)*(x-c*t-a)) return ((1/2)*c*(np.cos(i)**(-2))) assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))
assignments/assignment05/InteractEx03.ipynb
phungkh/phys202-2015-work
mit
To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays:
tmin = 0.0 tmax = 10.0 tpoints = 100 t = np.linspace(tmin, tmax, tpoints) xmin = 0.0 xmax = 10.0 xpoints = 200 x = np.linspace(xmin, xmax, xpoints) c = 1.0 a = 0.0
assignments/assignment05/InteractEx03.ipynb
phungkh/phys202-2015-work
mit
Compute a 2d NumPy array called phi: It should have a dtype of float. It should have a shape of (xpoints, tpoints). phi[i,j] should contain the value $\phi(x[i],t[j])$.
phi=np.ndarray((xpoints,tpoints), dtype=float) for i in range(200): for j in range(100): phi[i,j]=soliton(x[i],t[j],c,a) assert phi.shape==(xpoints, tpoints) assert phi.ndim==2 assert phi.dtype==np.dtype(float) assert phi[0,0]==soliton(x[0],t[0],c,a)
assignments/assignment05/InteractEx03.ipynb
phungkh/phys202-2015-work
mit
Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.
def plot_soliton_data(i=0): plt.figure(figsize=(9,6)) plt.plot(x,soliton(x,t[i],c,a)) plt.box(False) plt.ylim(0,6000) plt.grid(True) plt.ylabel('soliton wave') plt.xlabel('x') plot_soliton_data(0) assert True # leave this for grading the plot_soliton_data function
assignments/assignment05/InteractEx03.ipynb
phungkh/phys202-2015-work
mit
Use interact to animate the plot_soliton_data function versus time.
interact(plot_soliton_data, i=(0,100,5)) assert True # leave this for grading the interact with plot_soliton_data cell
assignments/assignment05/InteractEx03.ipynb
phungkh/phys202-2015-work
mit
Let's notice a few key things: 1. To make a list we use [] 2. We separate individual elements with commas 3. The elements of a list can be any type 4. The elements need not be explicitly written Other Properties of Lists You can add two lists to create a new list. You can append an element to the end of a list by using the append function. You can append a list to the end of another list by using the += operator. You can access a single element with something called slicing
la=[1,2,3] lb=["1","2","3"] print(la+lb) la.append(4) print(la) lb+=la print(lb)
Python Workshop/Containers.ipynb
CalPolyPat/Python-Workshop
mit
Slicing At one point, you will want to access certain elements of a list, this is done by slicing. There are a couple of ways to do this. 1. la[0] will give the first element of the list la. Note that lists are indexed starting at zero, not one. 2. la[n] will give the (n+1)th element of the list la. 3. la[-1] will give the last element of the list la. 4. la[-n] will give the nth to last element of the list la. 5. la[p:q:k] will give you every kth element starting at p and ending at q of the list la. Ex/la[1:7:2] will give you every second element starting at 1 and ending at 7. 5b. If p is omitted, it is assumed to be 0. If k is omitted, it is assumed to be 1, if q is omitted, it is assumed to be the last index or -1, which is really the same thing as we see above.
la=[1,2,3,4,5,6,7,8,9,10] print(la[0]) print(la[3]) print(la[-1]) print(la[-3]) print(la[0:10:2]) print(la[3::2]) print(la[2:4:]) print(la[2:4])
Python Workshop/Containers.ipynb
CalPolyPat/Python-Workshop
mit
Exercises Make 2 lists and add them together. Take your list from 1., append the list ["I", "Love", "Python"] to it without using the append command, then print every second element. Can you make a list of lists? Try it. Consider the list x=[3,5,7,8,"Pi"]. 4a. Type out what you would expect python to print along with the type of object printed would be for the following slices: x[2] x[0] x[-2] x[1::2] x[1::] x[::-4] 4b. Check your answer by creating that list and printing out the corresponding slices. Sets Sets are a special type of list that adhere to certain rules. If you have taken any higher level or proof-based math classes, you will recognize that sets in Python are exactly the same as those in mathematics. Instead of using [], {} are used to create a set. Sets have the following properties: Sets will not contain duplicates. If you make a set with duplicates, it will only retain one of them. Sets are not ordered. This means no slicing. Sets have the familiar set operations from math. These are outlined below. You can convert a list to a set in the following manner: Let t be a list, then set(t) is now a set containing the elements of t. Set Operations Consider sets s={1,2,3} and t={1,2,3,4,5}; | Operation | Meaning | Example | |:----------:|:-------:|:-------:| | s&#124;t | Union | {1,2,3,4,5} | | s&t | Intersection | {1,2,3} | | s-t | Difference | {} | | s^t | Symmetric Difference | {4,5} | | s<t | Strict Subset | True | | s<=t | Subset | True | | s>t | Strict Superset | False | | s>= | Superset | False |
t = {1,2,3,3,3,3,3,3,3,3,3,3,3,3,3,4,5} print(t) s = {1,4,5,7,8} print(t-s) print(s-t) print(t^s) print(t-s|s-t) print(t^s==t-s|s-t)
Python Workshop/Containers.ipynb
CalPolyPat/Python-Workshop
mit
Exercises Write a set containing the letters of the word "dog". Find the difference between the set in 1. and the set {"d", 5, "g"} Remove all the duplicates in the list [1,2,4,3,3,3,5,2,3,4,5,6,3,5,7] and print the resulting object in two lines. Tuples Tuples are just like lists except that you cannot append elements to a tuple. You may, however, combine two tuples. To create a tuple, one uses ().
a = (1,2,3,4,5) b = ('a', 'b', 'c') print(a) print(b) print(a+b)
Python Workshop/Containers.ipynb
CalPolyPat/Python-Workshop
mit
Dictionaries Dictionaries are quite different from any container we have seen so far. A dictionary is a bunch of unordered key/value pairs. That is, each element of a dictionary has a key and a value and the elements are in no particular order. It is good to keep this unorderedness in mind later on, for now, let's look at some examples. To create a dictionary we use the following syntax, { key:value}.
#Let's say we have some students take a test and we want to store their scores scores = {'Sally':89, 'Lucy':75, 'Jeff':45, 'Jose':96} print(scores) #We can, however, not combine two different dictionaries scores2 = {'Devin':64, 'John':23, 'Jake':75} print(scores2) print(scores+scores2)
Python Workshop/Containers.ipynb
CalPolyPat/Python-Workshop
mit
Unlike with lists, we cannot access elements of the dictionary with slicing. We must instead use the keys. Let's see how to do this.
print(scores['Sally']) print(scores2['John'])
Python Workshop/Containers.ipynb
CalPolyPat/Python-Workshop
mit
As we can see, the key returns us the value. This can be useful if you have a bunch of items that need to be paired together. Accessing Just the Keys or Values Want to get a list of the keys in a dictionary? How about the values? Fret not, there is a way!
print(scores.keys()) print(scores.values())
Python Workshop/Containers.ipynb
CalPolyPat/Python-Workshop
mit
Exercises Build a dictionary of some constants in physics and math. Print out at least two of these values. Give an example of something that would be best represented by a dictionary, think pairs. In and Not In No, this section isn't about fashion. It is about the in and not in operators. They return a boolean value based on whether or not a value is in or not in a container. Note that for dictionaries, this refers to the keys, no the values.
print('Devin' in scores2) print(2 in a) print('Hello World' not in scores)
Python Workshop/Containers.ipynb
CalPolyPat/Python-Workshop
mit
Converting Between Containers But what if I want my set to be a list or my tuple to be a set? To convert between types of containers, you can use any of the following functions: list() : Converts any container type into a list, for dictionaries it will be the list of keys. tuple() : Converts any container type into a tuple, for dictionaries it will be the tuple of keys. set() : Converts any container type into a set, for dictionaries it will be the set of keys. Note that as above, this will remove all duplicates.
a = [1,2,3] b = (1,2,3) c = {1,2,3} d = {1:2,3:4} print(list(b)) print(list(c)) print(list(d)) print(tuple(a)) print(tuple(c)) print(tuple(d)) print(set(a)) print(set(b)) print(set(d))
Python Workshop/Containers.ipynb
CalPolyPat/Python-Workshop
mit
2.1 Gaussian Elimination
def naive_gaussian_elimination(matrix): """ A simple gaussian elimination to solve equations Args: matrix : numpy 2d array Returns: mat : The matrix processed by gaussian elimination x : The roots of the equation Raises: ValueError: - matrix is null RuntimeError : - Zero pivot encountered """ if matrix is None : raise ValueError('args matrix is null') #Clone the matrix mat = matrix.copy().astype(np.float64) # Row Size m = mat.shape[0] # Column Size n = mat.shape[1] # Gaussian Elimaination for i in range(0, m): if np.abs(mat[i , i]) == 0 : raise RuntimeError('zero pivot encountered') for j in range(i + 1, m): mult = mat[j, i] / mat[i, i] for k in range(i, m): mat[j, k] -= mult * mat[i, k] mat[j, n - 1] -= mult * mat[i, n - 1] # Back Substitution x = np.zeros(m, dtype=np.float64) for i in range(m - 1,-1,-1): for j in range(i + 1, m): mat[i, n-1] = mat[i ,n-1] - mat[i,j] * x[j] mat[i, j] = 0.0 x[i] = mat[i, n-1] / mat[i, i] return mat, x
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Apply Gaussian elimination in tableau form for the system of three equations in three unknowns: $$ \large \begin{matrix} x + 2y - z = 3 & \ 2x + y - 2z = 3 & \ -3x + y + z = -6 \end{matrix} $$
""" Input: [[ 1 2 -1 3] [ 2 1 -2 3] [-3 1 1 -6]] """ input_mat = np.array([1, 2, -1, 3, 2, 1, -2, 3, -3, 1, 1, -6]) input_mat = input_mat.reshape(3, 4) output_mat, x = naive_gaussian_elimination(input_mat) print(output_mat) print('[x, y, z] = {}'.format(x))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Additional Examples Put the system $x + 2y - z = 3,-3x + y + z = -6,2x + z = 8$ into tableau form and solve by Gaussian elimination.
input_mat = np.array([ [ 1, 2, -1, 3], [-3, 1, 1, -6], [ 2, 0, 1, 8] ]) output_mat, x = naive_gaussian_elimination(input_mat) print(output_mat) print('[x, y, z] = {}'.format(x))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
2.1 Computer Problems Put together the code fragments in this section to create a MATLAB program for “naive” Gaussian elimination (meaning no row exchanges allowed). Use it to solve the systems of Exercise 2. See my implementation naive_gaussian_elimination in python. Let $H$ denote the $n \times n$ Hilbert matrix, whose $(i, j)$ entry is $1 / (i + j - 1)$. Use the MATLAB program from Computer Problem 1 to solve $Hx = b$, where $b$ is the vector of all ones, for (a) n = 2 (b) n = 5 (c) n = 10.
def computer_problems2__2_1(n): # generate Hilbert matrix H H = scipy.linalg.hilbert(n) # generate b b = np.ones(n).reshape(n, 1) # combine H:b in tableau form mat = np.hstack((H, b)) # gaussian elimination _, x = naive_gaussian_elimination(mat) return x with np.printoptions(precision = 6, suppress = True): print('(a) n = 2 → x = {}'.format(computer_problems2__2_1( 2))) print('(b) n = 5 → x = {}'.format(computer_problems2__2_1( 5))) print('(c) n = 10 → x = {}'.format(computer_problems2__2_1(10)))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
2.2 The LU Factorization
def LU_factorization(matrix): """ LU decomposition Arguments: matrix : numpy 2d array Return: L : lower triangular matrix U : upper triangular matrix Raises: ValueError: - matrix is null - matrix is not a 2d array RuntimeError : - zero pivot encountered """ if matrix is None : raise ValueError('args matrix is null') if matrix.ndim != 2 : raise ValueError('matrix is not a 2d-array') # dimension dim = matrix.shape[0] # Prepare LU matrixs L = np.identity(dim).astype(np.float64) U = matrix.copy().astype(np.float64) # Gaussian Elimaination for i in range(0, dim - 1): # Check pivot is not zero if np.abs(U[i , i]) == 0 : raise RuntimeError('zero pivot encountered') for j in range(i + 1, dim): mult = U[j, i] / U[i, i] for k in range(i, dim): U[j, k] -= mult * U[i, k] L[j, i] = mult return L, U
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
DEFINITION 2.2 An $m \times n$ matrix $L$ is lower triangular if its entries satisfy $l_{ij} = 0$ for $i < j$. An $m \times n$ matrix $U$ is upper triangular if its entries satisfy $u_{ij} = 0$ for $i > j$. Example Find the LU factorization for the matrix $A$ in $$ \large \begin{bmatrix} 1 & 1 \ 3 & -4 \ \end{bmatrix} $$
A = np.array([ [1, 1], [3, -4] ]) L, U = LU_factorization(A) print('L = ') print(L) print() print('U = ') print(U)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Find the LU factorization of A = $$ \large \begin{bmatrix} 1 & 2 & -1 \ 2 & 1 & -2 \ -3 & 1 & 1 \ \end{bmatrix} $$
A = np.array([ [ 1, 2, -1], [ 2, 1, -2], [-3, 1, 1] ]) L, U = LU_factorization(A) print('L = ') print(L) print() print('U = ') print(U)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Solve system $$ \large \begin{bmatrix} 1 & 1 \ 3 & -4 \ \end{bmatrix} \begin{bmatrix} x_1 \ x_2 \ \end{bmatrix} = \begin{bmatrix} 3 \ 2 \ \end{bmatrix} $$ , using the LU factorization
A = np.array([ [ 1, 1], [ 3, -4] ]) b = np.array([3, 2]).reshape(2, 1) L, U = LU_factorization(A) # calculate Lc = b where Ux = c mat = np.hstack((L, b)) c = naive_gaussian_elimination(mat)[1].reshape(2, 1) # calculate Ux = c mat = np.hstack((U, c)) x = naive_gaussian_elimination(mat)[1].reshape(2, 1) # output the result print('x1 = {}, x2 = {}'.format(x[0][0], x[1][0]))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Solve system \begin{matrix} x + 2y - z = 3 & \ 2x + y - 2z = 3 & \ -3x + y + z = -6 \end{matrix} using the LU factorization
A = np.array([ [ 1, 2, -1], [ 2, 1, -2], [-3, 1, 1] ]) b = np.array([3, 3, -6]).reshape(3, 1) L, U = LU_factorization(A) # calculate Lc = b where Ux = c mat = np.hstack((L, b)) c = naive_gaussian_elimination(mat)[1].reshape(3, 1) # calculate Ux = c mat = np.hstack((U, c)) x = naive_gaussian_elimination(mat)[1].reshape(3, 1) # output the result print('x1 = {}, x2 = {}, x3 = {}'.format(x[0][0], x[1][0], x[2][0]))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Additional Examples Solve $$ \large \begin{bmatrix} 2 & 4 & -2 \ 1 & -2 & 1 \ 4 & -4 & 8 \ \end{bmatrix} \begin{bmatrix} x_1 \ x_2 \ x_3 \ \end{bmatrix} = \begin{bmatrix} 6 \ 3 \ 0 \ \end{bmatrix} $$ using the A = LU factorization
A = np.array([ [ 2, 4, -2], [ 1, -2, 1], [ 4, -4, 8] ]) b = np.array([6, 3, 0]).reshape(3, 1) L, U = LU_factorization(A) # calculate Lc = b where Ux = c mat = np.hstack((L, b)) c = naive_gaussian_elimination(mat)[1].reshape(3, 1) # calculate Ux = c mat = np.hstack((U, c)) x = naive_gaussian_elimination(mat)[1].reshape(3, 1) # output the result print('x1 = {}, x2 = {}, x3 = {}'.format(x[0][0], x[1][0], x[2][0]))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
2.2 Computer Problems Use the code fragments for Gaussian elimination in the previous section to write a MATLAB script to take a matrix A as input and output L and U. No row exchanges are allowed - the program should be designed to shut down if it encounters a zero pivot. Check your program by factoring the matrices in Exercise 2. See my implementation LU_factorization in python.
# Exercise 2 - (a) A = np.array([ [ 3, 1, 2], [ 6, 3, 4], [ 3, 1, 5] ]) L, U = LU_factorization(A) print('L = ') print(L) print() print('U = ') print(U) # Exercise 2 - (b) A = np.array([ [ 4, 2, 0], [ 4, 4, 2], [ 2, 2, 3] ]) L, U = LU_factorization(A) print('L = ') print(L) print() print('U = ') print(U) # Exercise 2 - (c) A = np.array([ [ 1, -1, 1, 2], [ 0, 2, 1, 0], [ 1, 3, 4, 4], [ 0, 2, 1, -1] ]) L, U = LU_factorization(A) print('L = ') print(L) print() print('U = ') print(U)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Add two-step back substitution to your script from Computer Problem 1, and use it to solve the systems in Exercise 4.
def LU_factorization_with_back_substitution(A, b): """ LU decomposition with two-step back substitution where Ax = b Arguments: A : coefficient matrix b : constant vector Return: x : solution vector """ L, U = LU_factorization(A) # row size rowsz = b.size # calculate Lc = b where Ux = c matrix = np.hstack((L, b)) c = naive_gaussian_elimination(matrix)[1].reshape(rowsz, 1) # calculate Ux = c matrix = np.hstack((U, c)) x = naive_gaussian_elimination(matrix)[1].reshape(rowsz) return x # Exercise 4 - (a) A = np.array([ [ 3, 1, 2], [ 6, 3, 4], [ 3, 1, 5] ]) b = np.array([0, 1, 3]).reshape(3, 1) x = LU_factorization_with_back_substitution(A, b) print(x) # Exercise 4 - (b) A = np.array([ [ 4, 2, 0], [ 4, 4, 2], [ 2, 2, 3] ]) b = np.array([2, 4, 6]).reshape(3, 1) x = LU_factorization_with_back_substitution(A, b) print(x)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
2.3 Sources Of Error DEFINITION 2.3 The infinity norm, or maximum norm, of the vector $x = (x_1, \cdots, x_n)$ is $||x||_{\infty} = \text{max}|x_i|, i = 1,\cdots,n$, that is, the maximum of the absolute values of the components of x. DEFINITION 2.4 Let $x_a$ be an approximate solution of the linear system $Ax = b$. The residual is the vector $r = b - Ax_a$. The backward error is the norm of the residual $||b - Ax_a||{\infty}$, and the forward error is $||x - x_a||{\infty}$. Example Find the backward and forward errors for the approximate solution $x_a = [1, 1]$ of the system $$ \large \begin{bmatrix} 1 & 1 \ 3 & -4 \ \end{bmatrix} \begin{bmatrix} x_1 \ x_2 \ \end{bmatrix} = \begin{bmatrix} 3 \ 2 \ \end{bmatrix} $$
A = np.array([ [ 1, 1], [ 3, -4] ]) b = np.array([3, 2]) xa = np.array([1, 1]) # Get correct solution system = sympy.Matrix(((1, 1, 3), (3, -4, 2))) solver = sympy.solve_linear_system(system, sympy.abc.x, sympy.abc.y) # Packed as list x = np.array([solver[sympy.abc.x].evalf(), solver[sympy.abc.y].evalf()]) # Output print(x) # Get backward error (differences in the input) residual = b - np.matmul(A, xa) backward_error = np.max(np.abs(residual)) print('backward error is {:f}'.format(backward_error)) # Get fowrawd error (differences in the output) forward_error = np.max(np.abs(x - xa)) print('forward error is {:f}'.format(forward_error))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Find the forward and backward errors for the approximate solution [-1, 3.0001] of the system $$ \large \begin{align} x_1 + x_2 &= 2 \ 1.0001 x_1 + x_2 &= 2.0001 \ \end{align} $$
A = np.array([ [ 1, 1], [ 1.0001, 1], ]) b = np.array([2, 2.0001]) # approximated solution xa = np.array([-1, 3.0001]) # correct solution x = LU_factorization_with_back_substitution(A, b.reshape(2, 1)) # Get backward error residual = b - np.matmul(A, xa) backward_error = np.max(np.abs(residual)) print('backward error is {:f}'.format(backward_error)) # Get fowrawd error forward_error = np.max(np.abs(x - xa)) print('forward error is {:f}'.format(forward_error))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
The relative backward error of system $Ax = b$ is defined to be $\large \frac{||r||{\infty}}{||b||{\infty}}$. The relative forward error is $\large \frac{||x - x_a||{\infty}}{||x||{\infty}}$. The error magnification factor for $Ax = b$ is the ratio of the two, or $\large \text{error magnification factor} = \frac{\text{relative forward error}}{\text{relative backward error}} = \frac{\frac{||x - x_a||{\infty}}{||x||{\infty}}}{\frac{||r||{\infty}}{||b||{\infty}}}$ DEFINITION 2.5 The condition number of a square matrix A, cond(A), is the maximum possible error magnification factor for solving Ax = b, over all right-hand sides b. The matrix norm of an n x n matrix A as $$ \large ||A||_{\infty} = \text{maximum absolute row sum} $$
def matrix_norm(A): rowsum = np.sum(np.abs(A), axis = 1) return np.max(rowsum)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
THEOREM 2.6 The condition number of the n x n matrix A is $$ \large cond(A) = ||A|| \cdot ||A^{-1}|| $$
def condition_number(A): inv_A = np.linalg.inv(A) cond = matrix_norm(A) * matrix_norm(inv_A) return cond
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Additional Examples Find the determinant and the condition number (in the infinity norm) of the matrix $$ \large \begin{bmatrix} 811802 & 810901 \ 810901 & 810001 \ \end{bmatrix} $$
A = np.array([ [ 811802, 810901], [ 810901, 810001], ]) print('determinant of A is {}'.format(scipy.linalg.det(A))) print('condition number : {:.4e}'.format(condition_number(A)))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
The solution of the system $$ \large \begin{bmatrix} 2 & 4.01 \ 3 & 6 \ \end{bmatrix} \begin{bmatrix} x_1 \ x_2 \ \end{bmatrix} = \begin{bmatrix} 6.01 \ 9 \ \end{bmatrix} $$ is $[1, 1]$ (a) Find the relative forward and backward errors and error magnification (in the infinity norm) for the approximate solution [21,-9].
A = np.array([ [ 2, 4.01], [ 3, 6.00], ]) b = np.array([6.01, 9]) # approximated solution xa = np.array([21, -9]) # correct solution x = LU_factorization_with_back_substitution(A, b.reshape(2, 1)) # forward error forward_error = np.max(np.abs(x - xa)) # relative forward error relative_forward_error = forward_error / np.max(np.abs(x)) # backward error backward_error = np.max(np.abs(b - np.matmul(A, xa))) # relative backward error relative_backward_error = backward_error / np.max(np.abs(b)) # error magnification factor error_magnification_factor = relative_forward_error / relative_backward_error print('relative forward error : {}'.format(relative_forward_error)) print('relative backward error : {}'.format(relative_backward_error)) print('error magnification factor : {}'.format(error_magnification_factor))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
(b) Find the condition number of the coefficient matrix.
A = np.array([ [ 2, 4.01], [ 3, 6.00], ]) print('condition number : {}'.format(condition_number(A)))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
2.3 Computer Problems For the n x n matrix with entries $A_{ij} = 5 / (i + 2j - 1)$, set $x = [1,\cdots,1]^T$ and $b = Ax$. Use the MATLAB program from Computer Problem 2.1.1 or MATLAB’s backslash command to compute $x_c$, the double precision computed solution. Find the infinity norm of the forward error and the error magnification factor of the problem $Ax = b$, and compare it with the condition number of A: (a) n = 6 (b) n = 10.
def system_provider(n, data_generator): A = np.zeros([n, n]) x = np.ones(n) for i in range(n): for j in range(n): A[i, j] = data_generator(i + 1, j + 1) b = np.matmul(A, x) return A, x, b def problem_2_3_1_generic_solver(n, data_generator): A, x, b = system_provider(n, data_generator) xc = np.linalg.solve(A, b) # forward error forward_error = np.max(np.abs(x - xc)) # relative forward error relative_forward_error = forward_error / np.max(np.abs(x)) # backward error backward_error = np.max(np.abs(b - np.matmul(A, xc))) # relative backward error relative_backward_error = backward_error / np.max(np.abs(b)) # error magnification factor error_magnification_factor = relative_forward_error / relative_backward_error # condition number condA = condition_number(A) return forward_error, error_magnification_factor, condA def problem_2_3_1_solver(n): return problem_2_3_1_generic_solver(n, lambda i, j : 5 / (i + 2 * j - 1)) # (a) n = 6 print('(a) n = 6, forward error = {:.3g}, error magnification factor = {:.3g}, condition number = {:.3g}'.format(*problem_2_3_1_solver(6))) # (b) n = 10 print('(b) n = 10, forward error = {:.3g}, error magnification factor = {:.3g}, condition number = {:.3g}'.format(*problem_2_3_1_solver(10)))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Carry out Computer Problem 1 for the matrix with entries $A_{ij} = 1/(|i - j| + 1)$.
def problem_2_3_2_solver(n): return problem_2_3_1_generic_solver(n, lambda i, j : 1 / (np.abs(i - j) + 1)) # (a) n = 6 print('(a) n = 6, forward error = {:.3g}, error magnification factor = {:.3g}, condition number = {:.3g}'.format(*problem_2_3_2_solver(6))) # (b) n = 10 print('(b) n = 10, forward error = {:.3g}, error magnification factor = {:.3g}, condition number = {:.3g}'.format(*problem_2_3_2_solver(10)))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Let A be the n x n matrix with entries $A_{ij} = |i - j| + 1$. Define $x = [1,\cdots,1]^T$ and $b = Ax$. For n = 100,200,300,400, and 500, use the MATLAB program from Computer Problem 2.1.1 or MATLAB’s backslash command to compute $x_c$, the double precision computed solution. Calculate the infinity norm of the forward error for each solution. Find the five error magnification factors of the problems $Ax = b$, and compare with the corresponding condition numbers.
def problem_2_3_3_solver(n): return problem_2_3_1_generic_solver(n, lambda i, j : np.abs(i - j) + 1) # n = 100 print('n = 100, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_3_solver(100))) # n = 200 print('n = 200, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_3_solver(200))) # n = 300 print('n = 300, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_3_solver(300))) # n = 400 print('n = 400, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_3_solver(400))) # n = 500 print('n = 500, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_3_solver(500)))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Carry out the steps of Computer Problem 3 for the matrix with entries $A_{ij} = \sqrt{(i - j)^2 + n / 10}$.
def problem_2_3_4_solver(n): return problem_2_3_1_generic_solver(n, lambda i, j : np.sqrt(np.power(i - j, 2) + n / 10)) # n = 100 print('n = 100, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_4_solver(100))) # n = 200 print('n = 200, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_4_solver(200))) # n = 300 print('n = 300, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_4_solver(300))) # n = 400 print('n = 400, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_4_solver(400))) # n = 500 print('n = 500, forward error = {:.2g}, error magnification factor = {:.2g}, condition number = {:.2g}'.format(*problem_2_3_4_solver(500)))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
For what values of n does the solution in Computer Problem 1 have no correct significant digits?
print('n = 11, forward error = {:.3g}, error magnification factor = {:.3g}, condition number = {:.3g}'.format(*problem_2_3_1_solver(11)))
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
2.4 The PA=LU Factorization Example Apply Gaussian elimination with partial pivoting to solve the system \begin{matrix} x_1 - x_2 + 3x_3 = -3 & \ -1x_1 - 2x_3 = 1 & \ 2x_1 + 2x_2 + 4x_3 = 0 \end{matrix}
A = np.array([1, -1, 3, -1, 0, -2, 2, 2, 4]).reshape(3, 3) b = np.array([-3, 1, 0]) lu, piv = linalg.lu_factor(A) x = linalg.lu_solve([lu, piv], b) print(x)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Solve the system $2x_1 + 3x_2 = 4$,$3x_1 + 2x_2 = 1$ using the PA = LU factorization with partial pivoting
""" [[2, 3] [3, 2]] """ A = np.array([2, 3, 3, 2]).reshape(2, 2) b = np.array([4, 1]) lu, piv = linalg.lu_factor(A) x = linalg.lu_solve([lu, piv], b) print(x)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
2.5 Iterative Methods Jacobi Method
def jacobi_method(A, b, x0, k): """ Use jacobi method to solve equations Args: A (numpy 2d array): the matrix b (numpy 1d array): the right hand side vector x0 (numpy 1d array): initial guess k (real number): iterations Return: The approximate solution Exceptions: ValueError The size of matrix's column is not equal to the size of vector's size """ if A.shape[1] is not x0.shape[0] : raise ValueError('The size of the columns of matrix A must be equal to the size of the x0') D = np.diag(A.diagonal()) inv_D = linalg.inv(D) LU = A - D xk = x0 for _ in range(k): xk = np.matmul(b - np.matmul(LU, xk), inv_D) return xk
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Apply the Jacobi Method to the system $3u + v = 5$, $u + 2v = 5$
A = np.array([3, 1, 1, 2]).reshape(2, 2) b = np.array([5, 5]) x = jacobi_method(A, b, np.array([0, 0]), 20) print('x = %s' %x)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Gauss-Seidel Method
def gauss_seidel_method(A, b, x0, k): """ Use gauss seidel method to solve equations Args: A (numpy 2d array): the matrix b (numpy 1d array): the right hand side vector x0 (numpy 1d array): initial guess k (real number): iterations Return: The approximate solution Exceptions: ValueError The size of matrix's column is not equal to the size of vector's size """ if A.shape[1] is not x0.shape[0] : raise ValueError('The size of the columns of matrix A must be equal to the size of the x0') D = np.diag(A.diagonal()) L = np.tril(A) - D U = np.triu(A) - D inv_LD = linalg.inv(L + D) xk = x0 for _ in range(k): xk = np.matmul(inv_LD, -np.matmul(U, xk) + b) return xk
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Apply the Gauss-Seidel Method to the system $$ \begin{bmatrix} 3 & 1 & -1 \ 2 & 4 & 1 \ -1 & 2 & 5 \end{bmatrix} \begin{bmatrix} u \ v \ w \end{bmatrix} = \begin{bmatrix} 4 \ 1 \ 1 \end{bmatrix} $$
A = np.array([3, 1, -1, 2, 4, 1, -1, 2, 5]).reshape(3, 3) b = np.array([4, 1, 1]) x0 = np.array([0, 0, 0]) gauss_seidel_method(A, b, x0, 24)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Successive Over-Relaxation
def gauss_seidel_sor_method(A, b, w, x0, k): """ Use gauss seidel method with sor to solve equations Args: A (numpy 2d array): the matrix b (numpy 1d array): the right hand side vector w (real number): weight x0 (numpy 1d array): initial guess k (real number): iterations Return: The approximate solution Exceptions: ValueError The size of matrix's column is not equal to the size of vector's size """ if A.shape[1] is not x0.shape[0] : raise ValueError('The size of the columns of matrix A must be equal to the size of the x0') D = np.diag(A.diagonal()) L = np.tril(A) - D U = np.triu(A) - D inv_LD = linalg.inv(w * L + D) xk = x0 for _ in range(k): xk = np.matmul(w * inv_LD, b) + np.matmul(inv_LD, (1 - w) * np.matmul(D, xk) - w * np.matmul(U, xk)) return xk
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Apply the Gauss-Seidel Method with sor to the system $$ \begin{bmatrix} 3 & 1 & -1 \ 2 & 4 & 1 \ -1 & 2 & 5 \end{bmatrix} \begin{bmatrix} u \ v \ w \end{bmatrix} = \begin{bmatrix} 4 \ 1 \ 1 \end{bmatrix} $$
A = np.array([3, 1, -1, 2, 4, 1, -1, 2, 5]).reshape(3, 3) b = np.array([4, 1, 1]) x0 = np.array([0, 0, 0]) w = 1.25 gauss_seidel_sor_method(A, b, w, x0, 14)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
2.6 Methods for symmetric positive-definite matrices Cholesky factorization Example Find the Cholesky factorization of $\begin{bmatrix} 4 & -2 & 2 \ -2 & 2 & -4 \ 2 & -4 & 11 \end{bmatrix}$
A = np.array([4, -2, 2, -2, 2, -4, 2, -4, 11]).reshape(3, 3) R = linalg.cholesky(A) print(R)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Conjugate Gradient Method
def conjugate_gradient_method(A, b, x0, k): """ Use conjugate gradient to solve linear equations Args: A : input matrix b : input right hand side vector x0 : initial guess k : iteration Returns: approximate solution """ xk = x0 dk = rk = b - np.matmul(A, x0) for _ in range(k): if not np.any(rk) or all( abs(i) <= 1e-16 for i in rk) is True: break ak = float(np.matmul(rk.T, rk)) / float(np.matmul(dk.T, np.matmul(A, dk))) xk = xk + ak * dk rk1 = rk - ak * np.matmul(A, dk) bk = np.matmul(rk1.T, rk1) / np.matmul(rk.T, rk) dk = rk1 + bk * dk rk = rk1 return xk
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Solve $$ \begin{bmatrix} 2 & 2 \ 2 & 5 \ \end{bmatrix} \begin{bmatrix} u \ v \end{bmatrix} = \begin{bmatrix} 6 \ 3 \end{bmatrix} $$ using the Conjugate Gradient Method
A = np.array([2, 2, 2, 5]).reshape(2, 2) b = np.array([6, 3]) x0 = np.array([0, 0]) conjugate_gradient_method(A, b, x0, 2)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Solve $$ \begin{bmatrix} 1 & -1 & 0 \ -1 & 2 & 1 \ 0 & 1 & 2 \ \end{bmatrix} \begin{bmatrix} u \ v \ w \ \end{bmatrix} = \begin{bmatrix} 0 \ 2 \ 3 \ \end{bmatrix} $$
A = np.array([1, -1, 0, -1, 2, 1, 0, 1, 2]).reshape(3, 3) b = np.array([0, 2, 3]) x0 = np.array([0, 0, 0]) conjugate_gradient_method(A, b, x0, 10)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Solve $$ \begin{bmatrix} 1 & -1 & 0 \ -1 & 2 & 1 \ 0 & 1 & 5 \ \end{bmatrix} \begin{bmatrix} u \ v \ w \ \end{bmatrix} = \begin{bmatrix} 3 \ -3 \ 4 \ \end{bmatrix} $$
A = np.array([1, -1, 0, -1, 2, 1, 0, 1, 5]).reshape(3, 3) b = np.array([3, -3, 4]) x0 = np.array([0, 0, 0]) x = slinalg.cg(A, b, x0)[0] print('x = %s' %x )
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Preconditioning 2.7 Nonlinear Systems Of Equations Multivariate Newton's Method
def multivariate_newton_method(fA, fDA, x0, k): """ Args: fA (function handle) : coefficient matrix with arguments fDA (function handle) : right-hand side vector with arguments x0 (numpy 2d array) : initial guess k (real number) : iteration Return: Approximate solution xk after k iterations """ xk = x0 for _ in range(k): lu, piv = linalg.lu_factor(fDA(*xk)) s = linalg.lu_solve([lu, piv], -fA(*xk)) xk = xk + s return xk
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Use Newton's method with starting guess $(1,2)$ to find a solution of the system $$ v - u^3 = 0 \ u^2 + v^2 - 1 = 0 $$
fA = lambda u,v : np.array([v - pow(u, 3), pow(u, 2) + pow(v, 2) - 1], dtype=np.float64) fDA = lambda u,v : np.array([-3 * pow(u, 2), 1, 2 * u, 2 * v], dtype=np.float64).reshape(2, 2) x0 = np.array([1, 2]) multivariate_newton_method(fA, fDA, x0, 10)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Example Use Newton's method to find the solutions of the system $$ f_1(u,v) = 6u^3 + uv - 3^3 - 4 = 0 \ f_2(u,v) = u^2 - 18uv^2 + 16v^3 + 1 = 0 $$
fA = lambda u,v : np.array([6 * pow(u, 3) + u * v - 3 * pow(v, 3) - 4, pow(u, 2) - 18 * u * pow(v, 2) + 16 * pow(v, 3) + 1], dtype=np.float64) fDA = lambda u,v : np.array([18 * pow(u, 2) + v, u - 9 * pow(v, 2), 2 * u - 18 * pow(v, 2), -36 * u * v + 48 * pow(v, 2)], dtype=np.float64).reshape(2, 2) x0 = np.array([2, 2], dtype=np.float64) multivariate_newton_method(fA, fDA, x0, 5)
2_Systems_Of_Equations.ipynb
Jim00000/Numerical-Analysis
unlicense
Attributes and functions Each solver has some attributes: solver_name The name of the solver. This is the name which will be used to select the solver in cobrapy functions.
solver.solver_name model.optimize(solver="cglpk")
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
_SUPPORTS_MILP The presence of this attribute tells cobrapy that the solver supports mixed-integer linear programming
solver._SUPPORTS_MILP
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
solve Model.optimize is a wrapper for each solver's solve function. It takes in a cobra model and returns a solution
solver.solve(model)
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
create_problem This creates the LP object for the solver.
lp = solver.create_problem(model, objective_sense="maximize") lp
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
solve_problem Solve the LP object and return the solution status
solver.solve_problem(lp)
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
format_solution Extract a cobra.Solution object from a solved LP object
solver.format_solution(lp, model)
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
get_objective_value Extract the objective value from a solved LP object
solver.get_objective_value(lp)
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
get_status Get the solution status of a solved LP object
solver.get_status(lp)
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
change_variable_objective change the objective coefficient a reaction at a particular index. This does not change any of the other objectives which have already been set. This example will double and then revert the biomass coefficient.
model.reactions.index("Biomass_Ecoli_core") solver.change_variable_objective(lp, 12, 2) solver.solve_problem(lp) solver.get_objective_value(lp) solver.change_variable_objective(lp, 12, 1) solver.solve_problem(lp) solver.get_objective_value(lp)
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
change variable_bounds change the lower and upper bounds of a reaction at a particular index. This example will set the lower bound of the biomass to an infeasible value, then revert it.
solver.change_variable_bounds(lp, 12, 1000, 1000) solver.solve_problem(lp) solver.change_variable_bounds(lp, 12, 0, 1000) solver.solve_problem(lp)
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
change_coefficient Change a coefficient in the stoichiometric matrix. In this example, we will set the entry for ADP in the ATMP reaction to in infeasible value, then reset it.
model.metabolites.index("atp_c") model.reactions.index("ATPM") solver.change_coefficient(lp, 16, 10, -10) solver.solve_problem(lp) solver.change_coefficient(lp, 16, 10, -1) solver.solve_problem(lp)
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
set_parameter Set a solver parameter. Each solver will have its own particular set of unique paramters. However, some have unified names. For example, all solvers should accept "tolerance_feasibility."
solver.set_parameter(lp, "tolerance_feasibility", 1e-9) solver.set_parameter(lp, "objective_sense", "minimize") solver.solve_problem(lp) solver.get_objective_value(lp) solver.set_parameter(lp, "objective_sense", "maximize") solver.solve_problem(lp) solver.get_objective_value(lp)
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
Example with FVA Consider flux variability analysis (FVA), which requires maximizing and minimizing every reaction with the original biomass value fixed at its optimal value. If we used the cobra Model API in a naive implementation, we would do the following:
%%time # work on a copy of the model so the original is not changed m = model.copy() # set the lower bound on the objective to be the optimal value f = m.optimize().f for objective_reaction, coefficient in m.objective.items(): objective_reaction.lower_bound = coefficient * f # now maximize and minimze every reaction to find its bounds fva_result = {} for r in m.reactions: m.change_objective(r) fva_result[r.id] = { "maximum": m.optimize(objective_sense="maximize").f, "minimum": m.optimize(objective_sense="minimize").f }
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
Instead, we could use the solver API to do this more efficiently. This is roughly how cobrapy implementes FVA. It keeps uses the same LP object and repeatedly maximizes and minimizes it. This allows the solver to preserve the basis, and is much faster. The speed increase is even more noticeable the larger the model gets.
%%time # create the LP object lp = solver.create_problem(model) # set the lower bound on the objective to be the optimal value solver.solve_problem(lp) f = solver.get_objective_value(lp) for objective_reaction, coefficient in model.objective.items(): objective_index = model.reactions.index(objective_reaction) # old objective is no longer the objective solver.change_variable_objective(lp, objective_index, 0.) solver.change_variable_bounds( lp, objective_index, f * coefficient, objective_reaction.upper_bound) # now maximize and minimze every reaction to find its bounds fva_result = {} for index, r in enumerate(model.reactions): solver.change_variable_objective(lp, index, 1.) result = {} solver.solve_problem(lp, objective_sense="maximize") result["maximum"] = solver.get_objective_value(lp) solver.solve_problem(lp, objective_sense="minimize") result["minimum"] = solver.get_objective_value(lp) solver.change_variable_objective(lp, index, 0.) fva_result[r.id] = result
documentation_builder/solvers.ipynb
jeicher/cobrapy
lgpl-2.1
1. Background Information 1.1 Introduction to the Second Half of the Class The remainder of this course will be divided into three two week modules, each dealing with a different dataset. During the first week of each module, you will complete a (two class) lab in which you are introduced to the dataset and various techniques that you need to use to explore it. At the end of Week 1, you and your lab partner will write a brief (1 paragraph) proposal to Professor Follette detailing an investigation that you would like to complete using that dataset in Week 2. You and your partener will complete this investigation and write it up as your lab the following week. Detailed instructions for submitting your proposal are at the end of this lab. Detailed instructions for the lab writeups will be provided next week. 1.2. Introduction to the QuaRCS Dataset The Quantitative Reasoning for College Science (QuaRCS) assessment is an assessment instrument that Profssor Follette has been administering in general education science classes across the country since 2012. It consists of 25 quantitative questions involving "real world" mathematical skills plus 24 attitudinal and demographic questions. It has been administered to more than 5000 students at eleven institutions. You will be reading the published results of this study for class on Thursday, and exploring the data in class this week and next. A description of all of the variables (pandas dataframe columns) in the QuaRCS dataset and what each numerical answer choice "stands for" is in the file QuaRCS_descriptions.pdf. 2. Investigating Tabular Data with Pandas 2.1 Reading In and Cleaning Data
# these set the pandas defaults so that it will print ALL values, even for very long lists and large dataframes pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None)
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
Read in the QuaRCS data as a pandas dataframe called "data".
data=pd.read_csv('AST200_data_anonymized.csv', encoding="ISO-8859-1") mask = np.where(data == 999) data = data.replace(999,np.nan)
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
Once a dataset has been read in as a pandas dataframe, several useful built-in pandas methods are made available to us. Recall that you call methods with data.method. Check out each of the following
# the * is a trick to print without the ...s for an ordinary python object print(*data.columns) data.dtypes
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
2.2 The describe() method There are also a whole bunch of built in functions that can operate on a pandas dataframe that become available once you've defined it. To see a full list type data. in an empty frame and then hit tab. An especially useful one is dataframe.describe() method, which creates a summary table with some common statistics for all of the columns in the dataframe. In our case here there are a number of NaNs in our table (cases where an answer was left blank), and the describe method ignores them for mean, standard deviation (std), min and max. However, there is a known bug in the pandas module that cause NaNs to break the quartiles in the describe method, so these will always be NaN for any column that has a NaN anywhere in it, rendering them mostly useless here. Still, this is a nice quick way to get descriptive statistics for a table.
data.describe()
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
2.3. Computing Descriptive Statistics You can also of course compute descriptive statistics for columns in a pandas dataframe individually. Examples of each one applied to a single column - student scores on the assessment (PRE_SCORE) are shown below.
np.mean(data["PRE_SCORE"]) #or data["PRE_SCORE"].mean() np.nanmedian(data["PRE_SCORE"]) #or data["PRE_SCORE"].median() data["PRE_SCORE"].max() data["PRE_SCORE"].min() data["PRE_SCORE"].mode() #where first number is the index (should be zero unless column has multiple dimensions # and second number is the mode #not super useful for continuous variables for example, if you put in a continuous variable (like ZPR_1) it won't #return anything because there are no repeat values #perhaps equally useful is the value_counts method, which will tell you how many times each value appears int he column data["PRE_SCORE"].value_counts() #and to count all of the non-zero values data["PRE_SCORE"].count() #different generally from len(dataframe["column name]) because len will count NaNs # but the Score column has no NaNs, so swap this cell and the one before our with #a column that does have NaNs to verify len(data["PRE_SCORE"]) #standard deviation data["PRE_SCORE"].std() #variance data["PRE_SCORE"].var() #verify relationship between variance and standard deviation np.sqrt(data["PRE_SCORE"].var()) #quantiles data["PRE_SCORE"].quantile(0.5) # should return the median! data["PRE_SCORE"].quantile(0.25) data["PRE_SCORE"].quantile(0.75) #interquartile range data["PRE_SCORE"].quantile(0.75)-data["PRE_SCORE"].quantile(0.25) data["PRE_SCORE"].skew() data["PRE_SCORE"].kurtosis()
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
<div class=hw> ### Exercise 1 ------------------ Choose one categorical (answer to any demographic or attitudinal question) and one continuous variable (e.g. PRE_TIME, ZPR_1) and compute all of the statistics from the list above ***in one code cell*** (use print statements) for each variable. Write a paragraph describing all of the statistics that are informative for that variable in words. An example is given below for PRE_SCORE. Because score is numerical ***and*** discrete, all of the statistics above are informative. In your two cases, fewer statistics will be informative, so your explanations may be shorter, though you should challenge yourselves to go beyond merely reporting the statistcs, and should interpret them as well, as below. *QuaRCS score can take discrete integer values between 0 and 25. The minimum score for this dataset is 1 and the maximum is 25. There are 2,777 valid entries for score in this QuaRCS dataset, for which the mean is 13.9 and the median is 14 (both 56\% of the maximum score). These are very close together, suggesting a reasonably centrally-concentrated score distrubution, and the low skewness value of 0.1 supports this. The kurtosis of the distribution is negative (platykurtic), which tells us that the distribution of scores is flat rather than peaky. The most common score ("mode") is 10, with 197 (~7%) of participants getting this score, however all score values from 7-21 have counts of greater than 100, supporting the flat nature of the distribution suggested by the negative kurtosis. The interquartile range (25-75 percentiles) is 8 points, and the standard deviation is 5.3. These represent a large fraction (20 and 32\%) of the entire available score range, respectively, making the distribution quite wide. *Your description of categorical distribution here* *Your description of continuous distribution here*
#your code computing all descriptive statistics for your categorical variable here #your code computing all descriptive statistics for your categorical variable here
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
2.4. Creating Statistical Graphics <div class=hw> ### Exercise 2 - Summary plots for distributions *Warning: Although you will be using QuaRCS data to investigate and experiment with each type of plot below, when you write up your descriptions, they should refer to the **general properties** of the plots, and not to the QuaRCS data specifically. In other words, your descriptions should be general descriptions of the plot types that could be applied to any dataset.* ### 2a - Histogram The syntax for creating a histogram for a pandas dataframe column is: dataframe["Column Name"].hist(bins=nbins) Play around with the column name and bins and refer to the docstring as needed until you understand thoroughly what is being shown. Describe what this ***type of plot*** (not any individual plot that you've made) shows in words and describe when you think it might be useful. Play around with inputs (e.g. column name) until you find a case (dataframe column) where you think the histogram tells you something important and use it as an example to inform your answer. Inputs that do not produce informative histograms should also help to inform your answer. Save a couple of representative histograms (good and bad, use plt.savefig("figure name")) and integrate them into your written (markdown) explanation to support your argument.
#this cell is for playing around with histograms
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
Your explanation here, with figures <div class=hw> ### 2b - Box plot The syntax for creating a box plot for a pair of pandas dataframe columns is: dataframe.boxplot(column="column name 1", by="column name 2") Play around with the column and by variables and refer to the docstring as needed until you understand thoroughly what is being shown. Describe what this ***type of plot*** (not any individual plot that you've made) shows in words and describe when you think it might be useful. Play around with inputs (e.g. column names) until you find a case that you think is well-described by a box and whisker plot and use it as an example to inform your answer. Inputs that do not produce informative box plots should also help to inform your answer. Save a couple of representative box plots (good and bad) and integrate them into your written explanation.
#your sample boxplot code here
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
Your explanation here <div class=hw> ### 2c - Pie Chart The format for making the kind of pie chart that might be useful in this context is as follows: newdataframe = dataframe["column name"].value()counts newdataframe.plot.pie(figsize=(6,6)) Play around with the column and refer to the docstring as needed until you understand thoroughly what is being shown. Describe what this ***type of plot*** (not any individual plot that you've made) shows in words and describe when you think it might be useful. In your explanation here, focus on how a bar chart compares to a histogram, and when you think one or the other might be useful. Play around with inputs (e.g. column names) until you find a case that you think is well-described by a pie chart and use it as an example to inform your answer. Inputs that do not produce informative pie charts should also help to inform your answer. Save a couple of representative pie charts (good and bad) and integrate them into your written explanation.
#your sample pie chart code here
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
Your explanation here <div class=hw> ### 2d - Scatter Plot The syntax for creating a scatter plot is: dataframe.plot.scatter(x='column name',y='column name') Play around with the column and refer to the docstring as needed until you understand thoroughly what is being shown. Describe what this ***type of plot*** (not any individual plot that you've made) shows in words and describe when you think it might be useful. Play around with inputs (e.g. column names) until you find a case that you think is well-described by a scatter plot and use it as an example to inform your answer. Inputs that do not produce informative scatter plots should also help to inform your answer. Save a couple of representative pie charts (good and bad) and integrate them into your written explanation.
#your sample scatter plot code here
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
Your explanation here 2.5. Selecting a Subset of Data <div class=hw> ### Exercise 3 -------------- Write a function called "filter" that takes a dataframe, column name, and value for that column as input and returns a new dataframe containing only those rows where column name = value. For example filter(data, "PRE_GENDER", 1) should return a dataframe about half the size of the original dataframe where all values in the PRE_GENDER column are 1.
#your function here #your tests here
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
If you get to this point during lab time on Tuesday, stop here 3. Testing Differences Between Datasets 3.1 Computing Confidence Intervals Now that we have a mechanism for filtering the dataset, we can test differences between groups with confidence intervals. The syntax for computing the confidence interval on a mean for a given variable is as follows. variable1 = st.t.interval(conf_level,n,loc=np.nanmean(variable2), scale=st.sem(variable2)) where conf_level is the confidence level you with to calculate (e.g. 0.95 is 95% confidence, 0.98 is 98%, etc.) n is the number of samples and should generally be set to the number of valid entries in variable2 -1. An example can be found below.
## apply filter to select only men from data, and pull the scores from this group into a variable df2=filter(data,'PRE_GENDER',1) men_scores=df2['PRE_SCORE'] #compute 95% confidence intervals on the mean (low and high) men_conf=st.t.interval(0.95, len(men_scores)-1, loc=np.mean(men_scores), scale=st.sem(men_scores)) men_conf
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
<div class=hw> ### Exercise 4 ------------------ Choose a categorical variable (any demographic or attitudinal variable) that you find interesting and that has at least four possible values and calculate the condifence intervals on the mean score for each group. Then write a paragraph describing the results. Are the differences between the groups significant according to your data? Would they still be significant if you were to compute the 98% (3-sigma) confidence intervals?
#code to filter data and compute confidence intervals for each answer choice
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
explanatory text 3.2 Visualizing Differences with Overlapping Plots <div class=hw> ### Exercise 5 --------------- Make another dataframe consisting only of students who "devoted effort" to the assessment, meaning their answer for PRE_EFFORT was EITHER a 4 or a 5 (you may have to modify your filter function to accept more than one value for "value"). Make overlapping histograms showing (a) scores for the entire student population and (b) scores for this "high effort" subset. The "alpha" keyword inside the plot commands will set the transparency of your histogram so that you can see both. Play around with it until it looks good. Make sure your chart includes a legend, and describe what conclusions you can draw from the result in a paragraph below the final chart.
#modified filter function here #define your new high effort dataframe using the filter #plot two overlapping histograms
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
explanatory text here 4. Data Investigation - Week 2 Instructions Now that you are familar with the QuaRCS dataset, you and your partner must come up with an investigation that you would like to complete using this data. For the next two modules, this will be more open, but for this first investigation, I will suggest the following three options, of which each group will need to pick one (we will divide in class): Design visualizations that compare student attitudes pre and post-semester Design visualizations that compare student skills (by topical area) pre and post semester Design visualizations that compare students' awareness of their own skills pre and post semester Before 5pm next Monday evening (3/27), you must send Professor Follette a brief e-mail (that you write together, one e-mail per group) describing a plan for how you will approach the problem you've been assigned. What do you need to know that you don't know already? What kind of plots will you make and what kinds of statistics will you compute? What is your first thought for what your final data representations will look like (histograms? box and whisker plots? overlapping plots or side by side?).
from IPython.core.display import HTML def css_styling(): styles = open("../custom.css", "r").read() return HTML(styles) css_styling()
Labs/Lab9/Lab 9.ipynb
kfollette/ASTR200-Spring2017
mit
from pysal.explore import spaghetti as spgh from pysal.lib import examples import geopandas as gpd import matplotlib.pyplot as plt import matplotlib.lines as mlines from shapely.geometry import Point, LineString %matplotlib inline __author__ = "James Gaboardi <[email protected]>"
notebooks/explore/spaghetti/Spaghetti_Pointpatterns_Empirical.ipynb
weikang9009/pysal
bsd-3-clause
1. Instantiating a pysal.spaghetti.Network Instantiate the network from .shp file
ntw = spgh.Network(in_data=examples.get_path('streets.shp'))
notebooks/explore/spaghetti/Spaghetti_Pointpatterns_Empirical.ipynb
weikang9009/pysal
bsd-3-clause
2. Allocating observations to a network Snap point patterns to the network
# Crimes with attributes ntw.snapobservations(examples.get_path('crimes.shp'), 'crimes', attribute=True) # Schools without attributes ntw.snapobservations(examples.get_path('schools.shp'), 'schools', attribute=False)
notebooks/explore/spaghetti/Spaghetti_Pointpatterns_Empirical.ipynb
weikang9009/pysal
bsd-3-clause
3. Visualizing original and snapped locations True and snapped school locations
schools_df = spgh.element_as_gdf(ntw, pp_name='schools', snapped=False) snapped_schools_df = spgh.element_as_gdf(ntw, pp_name='schools', snapped=True)
notebooks/explore/spaghetti/Spaghetti_Pointpatterns_Empirical.ipynb
weikang9009/pysal
bsd-3-clause